diff --git a/test/expected/append-16.out b/test/expected/append-16.out new file mode 100644 index 00000000000..be3a4ccf1cc --- /dev/null +++ b/test/expected/append-16.out @@ -0,0 +1,2444 @@ +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +\set TEST_BASE_NAME append +SELECT format('include/%s_load.sql', :'TEST_BASE_NAME') as "TEST_LOAD_NAME", + format('include/%s_query.sql', :'TEST_BASE_NAME') as "TEST_QUERY_NAME", + format('%s/results/%s_results_optimized.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') as "TEST_RESULTS_OPTIMIZED", + format('%s/results/%s_results_unoptimized.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') as "TEST_RESULTS_UNOPTIMIZED" +\gset +SELECT format('\! diff -u --label "Unoptimized results" --label "Optimized results" %s %s', :'TEST_RESULTS_UNOPTIMIZED', :'TEST_RESULTS_OPTIMIZED') as "DIFF_CMD" +\gset +SET timescaledb.enable_now_constify TO false; +-- disable memoize node to make EXPLAIN output comparable between PG14 and previous versions +SELECT CASE WHEN current_setting('server_version_num')::int/10000 >= 14 THEN set_config('enable_memoize','off',false) ELSE 'off' END AS enable_memoize; + enable_memoize +---------------- + off +(1 row) + +\set PREFIX 'EXPLAIN (analyze, costs off, timing off, summary off)' +\ir :TEST_LOAD_NAME +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +-- create a now() function for repeatable testing that always returns +-- the same timestamp. It needs to be marked STABLE +CREATE OR REPLACE FUNCTION now_s() +RETURNS timestamptz LANGUAGE PLPGSQL STABLE AS +$BODY$ +BEGIN + RAISE NOTICE 'Stable function now_s() called!'; + RETURN '2017-08-22T10:00:00'::timestamptz; +END; +$BODY$; +CREATE OR REPLACE FUNCTION now_i() +RETURNS timestamptz LANGUAGE PLPGSQL IMMUTABLE AS +$BODY$ +BEGIN + RAISE NOTICE 'Immutable function now_i() called!'; + RETURN '2017-08-22T10:00:00'::timestamptz; +END; +$BODY$; +CREATE OR REPLACE FUNCTION now_v() +RETURNS timestamptz LANGUAGE PLPGSQL VOLATILE AS +$BODY$ +BEGIN + RAISE NOTICE 'Volatile function now_v() called!'; + RETURN '2017-08-22T10:00:00'::timestamptz; +END; +$BODY$; +CREATE TABLE append_test(time timestamptz, temp float, colorid integer, attr jsonb); +SELECT create_hypertable('append_test', 'time', chunk_time_interval => 2628000000000); +psql:include/append_load.sql:35: NOTICE: adding not-null constraint to column "time" + create_hypertable +-------------------------- + (1,public,append_test,t) +(1 row) + +-- create three chunks +INSERT INTO append_test VALUES ('2017-03-22T09:18:22', 23.5, 1, '{"a": 1, "b": 2}'), + ('2017-03-22T09:18:23', 21.5, 1, '{"a": 1, "b": 2}'), + ('2017-05-22T09:18:22', 36.2, 2, '{"c": 3, "b": 2}'), + ('2017-05-22T09:18:23', 15.2, 2, '{"c": 3}'), + ('2017-08-22T09:18:22', 34.1, 3, '{"c": 4}'); +-- Create another hypertable to join with +CREATE TABLE join_test(time timestamptz, temp float, colorid integer); +SELECT create_hypertable('join_test', 'time', chunk_time_interval => 2628000000000); +psql:include/append_load.sql:46: NOTICE: adding not-null constraint to column "time" + create_hypertable +------------------------ + (2,public,join_test,t) +(1 row) + +INSERT INTO join_test VALUES ('2017-01-22T09:18:22', 15.2, 1), + ('2017-02-22T09:18:22', 24.5, 2), + ('2017-08-22T09:18:22', 23.1, 3); +-- Create another table to join with which is not a hypertable. +CREATE TABLE join_test_plain(time timestamptz, temp float, colorid integer, attr jsonb); +INSERT INTO join_test_plain VALUES ('2017-01-22T09:18:22', 15.2, 1, '{"a": 1}'), + ('2017-02-22T09:18:22', 24.5, 2, '{"b": 2}'), + ('2017-08-22T09:18:22', 23.1, 3, '{"c": 3}'); +-- create hypertable with DATE time dimension +CREATE TABLE metrics_date(time DATE NOT NULL); +SELECT create_hypertable('metrics_date','time'); + create_hypertable +--------------------------- + (3,public,metrics_date,t) +(1 row) + +INSERT INTO metrics_date SELECT generate_series('2000-01-01'::date, '2000-02-01'::date, '5m'::interval); +ANALYZE metrics_date; +-- create hypertable with TIMESTAMP time dimension +CREATE TABLE metrics_timestamp(time TIMESTAMP NOT NULL); +SELECT create_hypertable('metrics_timestamp','time'); +psql:include/append_load.sql:67: WARNING: column type "timestamp without time zone" used for "time" does not follow best practices + create_hypertable +-------------------------------- + (4,public,metrics_timestamp,t) +(1 row) + +INSERT INTO metrics_timestamp SELECT generate_series('2000-01-01'::date, '2000-02-01'::date, '5m'::interval); +ANALYZE metrics_timestamp; +-- create hypertable with TIMESTAMPTZ time dimension +CREATE TABLE metrics_timestamptz(time TIMESTAMPTZ NOT NULL, device_id INT NOT NULL); +CREATE INDEX ON metrics_timestamptz(device_id,time); +SELECT create_hypertable('metrics_timestamptz','time'); + create_hypertable +---------------------------------- + (5,public,metrics_timestamptz,t) +(1 row) + +INSERT INTO metrics_timestamptz SELECT generate_series('2000-01-01'::date, '2000-02-01'::date, '5m'::interval), 1; +INSERT INTO metrics_timestamptz SELECT generate_series('2000-01-01'::date, '2000-02-01'::date, '5m'::interval), 2; +INSERT INTO metrics_timestamptz SELECT generate_series('2000-01-01'::date, '2000-02-01'::date, '5m'::interval), 3; +ANALYZE metrics_timestamptz; +-- create space partitioned hypertable +CREATE TABLE metrics_space(time timestamptz NOT NULL, device_id int NOT NULL, v1 float, v2 float, v3 text); +SELECT create_hypertable('metrics_space','time','device_id',3); + create_hypertable +---------------------------- + (6,public,metrics_space,t) +(1 row) + +INSERT INTO metrics_space +SELECT time, device_id, device_id + 0.25, device_id + 0.75, device_id +FROM generate_series('2000-01-01'::timestamptz, '2000-01-14'::timestamptz, '5m'::interval) g1(time), + generate_series(1,10,1) g2(device_id) +ORDER BY time, device_id; +ANALYZE metrics_space; +-- test ChunkAppend projection #2661 +CREATE TABLE i2661 ( + machine_id int4 NOT NULL, + "name" varchar(255) NOT NULL, + "timestamp" timestamptz NOT NULL, + "first" float4 NULL +); +SELECT create_hypertable('i2661', 'timestamp'); +psql:include/append_load.sql:99: WARNING: column type "character varying" used for "name" does not follow best practices + create_hypertable +-------------------- + (7,public,i2661,t) +(1 row) + +INSERT INTO i2661 SELECT 1, 'speed', generate_series('2019-12-31 00:00:00', '2020-01-10 00:00:00', '2m'::interval), 0; +ANALYZE i2661; +\ir :TEST_QUERY_NAME +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +-- canary for results diff +-- this should be the only output of the results diff +SELECT setting, current_setting(setting) AS value from (VALUES ('timescaledb.enable_optimizations'),('timescaledb.enable_chunk_append')) v(setting); + setting | value +----------------------------------+------- + timescaledb.enable_optimizations | on + timescaledb.enable_chunk_append | on +(2 rows) + +-- query should exclude all chunks with optimization on +:PREFIX +SELECT * FROM append_test WHERE time > now_s() + '1 month' +ORDER BY time DESC; +psql:include/append_query.sql:12: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:12: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:12: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:12: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:12: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:12: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:12: NOTICE: Stable function now_s() called! + QUERY PLAN +------------------------------------------------------------------ + Custom Scan (ChunkAppend) on append_test (actual rows=0 loops=1) + Order: append_test."time" DESC + Chunks excluded during startup: 3 +(3 rows) + +--query should exclude all chunks and be a MergeAppend +:PREFIX +SELECT * FROM append_test WHERE time > now_s() + '1 month' +ORDER BY time DESC limit 1; +psql:include/append_query.sql:17: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:17: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:17: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:17: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:17: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:17: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:17: NOTICE: Stable function now_s() called! + QUERY PLAN +------------------------------------------------------------------------ + Limit (actual rows=0 loops=1) + -> Custom Scan (ChunkAppend) on append_test (actual rows=0 loops=1) + Order: append_test."time" DESC + Chunks excluded during startup: 3 +(4 rows) + +-- when optimized, the plan should be a constraint-aware append and +-- cover only one chunk. It should be a backward index scan due to +-- descending index on time. Should also skip the main table, since it +-- cannot hold tuples +:PREFIX +SELECT * FROM append_test WHERE time > now_s() - interval '2 months'; +psql:include/append_query.sql:24: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:24: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:24: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:24: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:24: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:24: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:24: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:24: NOTICE: Stable function now_s() called! + QUERY PLAN +---------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on append_test (actual rows=1 loops=1) + Chunks excluded during startup: 2 + -> Index Scan using _hyper_1_3_chunk_append_test_time_idx on _hyper_1_3_chunk (actual rows=1 loops=1) + Index Cond: ("time" > (now_s() - '@ 2 mons'::interval)) +(4 rows) + +-- adding ORDER BY and LIMIT should turn the plan into an optimized +-- ordered append plan +:PREFIX +SELECT * FROM append_test WHERE time > now_s() - interval '2 months' +ORDER BY time LIMIT 3; +psql:include/append_query.sql:30: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:30: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:30: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:30: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:30: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:30: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:30: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:30: NOTICE: Stable function now_s() called! + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on append_test (actual rows=1 loops=1) + Order: append_test."time" + Chunks excluded during startup: 2 + -> Index Scan Backward using _hyper_1_3_chunk_append_test_time_idx on _hyper_1_3_chunk (actual rows=1 loops=1) + Index Cond: ("time" > (now_s() - '@ 2 mons'::interval)) +(6 rows) + +-- no optimized plan for queries with restrictions that can be +-- constified at planning time. Regular planning-time constraint +-- exclusion should occur. +:PREFIX +SELECT * FROM append_test WHERE time > now_i() - interval '2 months' +ORDER BY time; +psql:include/append_query.sql:37: NOTICE: Immutable function now_i() called! + QUERY PLAN +------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on append_test (actual rows=1 loops=1) + Order: append_test."time" + Chunks excluded during startup: 2 + -> Index Scan Backward using _hyper_1_3_chunk_append_test_time_idx on _hyper_1_3_chunk (actual rows=1 loops=1) + Index Cond: ("time" > ('Tue Aug 22 10:00:00 2017 PDT'::timestamp with time zone - '@ 2 mons'::interval)) +(5 rows) + +-- currently, we cannot distinguish between stable and volatile +-- functions as far as applying our modified plan. However, volatile +-- function should not be pre-evaluated to constants, so no chunk +-- exclusion should occur. +:PREFIX +SELECT * FROM append_test WHERE time > now_v() - interval '2 months' +ORDER BY time; +psql:include/append_query.sql:45: NOTICE: Volatile function now_v() called! +psql:include/append_query.sql:45: NOTICE: Volatile function now_v() called! +psql:include/append_query.sql:45: NOTICE: Volatile function now_v() called! +psql:include/append_query.sql:45: NOTICE: Volatile function now_v() called! +psql:include/append_query.sql:45: NOTICE: Volatile function now_v() called! + QUERY PLAN +------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on append_test (actual rows=1 loops=1) + Order: append_test."time" + Chunks excluded during startup: 0 + -> Index Scan Backward using _hyper_1_1_chunk_append_test_time_idx on _hyper_1_1_chunk (actual rows=0 loops=1) + Filter: ("time" > (now_v() - '@ 2 mons'::interval)) + Rows Removed by Filter: 2 + -> Index Scan Backward using _hyper_1_2_chunk_append_test_time_idx on _hyper_1_2_chunk (actual rows=0 loops=1) + Filter: ("time" > (now_v() - '@ 2 mons'::interval)) + Rows Removed by Filter: 2 + -> Index Scan Backward using _hyper_1_3_chunk_append_test_time_idx on _hyper_1_3_chunk (actual rows=1 loops=1) + Filter: ("time" > (now_v() - '@ 2 mons'::interval)) +(11 rows) + +-- prepared statement output should be the same regardless of +-- optimizations +PREPARE query_opt AS +SELECT * FROM append_test WHERE time > now_s() - interval '2 months' +ORDER BY time; +:PREFIX EXECUTE query_opt; +psql:include/append_query.sql:53: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:53: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:53: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:53: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:53: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:53: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:53: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:53: NOTICE: Stable function now_s() called! + QUERY PLAN +------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on append_test (actual rows=1 loops=1) + Order: append_test."time" + Chunks excluded during startup: 2 + -> Index Scan Backward using _hyper_1_3_chunk_append_test_time_idx on _hyper_1_3_chunk (actual rows=1 loops=1) + Index Cond: ("time" > (now_s() - '@ 2 mons'::interval)) +(5 rows) + +DEALLOCATE query_opt; +-- aggregates should produce same output +:PREFIX +SELECT date_trunc('year', time) t, avg(temp) FROM append_test +WHERE time > now_s() - interval '4 months' +GROUP BY t +ORDER BY t DESC; +psql:include/append_query.sql:62: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:62: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:62: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:62: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:62: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:62: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:62: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:62: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:62: NOTICE: Stable function now_s() called! + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------- + GroupAggregate (actual rows=1 loops=1) + Group Key: (date_trunc('year'::text, append_test."time")) + -> Result (actual rows=3 loops=1) + -> Custom Scan (ChunkAppend) on append_test (actual rows=3 loops=1) + Order: date_trunc('year'::text, append_test."time") DESC + Chunks excluded during startup: 1 + -> Index Scan using _hyper_1_3_chunk_append_test_time_idx on _hyper_1_3_chunk (actual rows=1 loops=1) + Index Cond: ("time" > (now_s() - '@ 4 mons'::interval)) + -> Index Scan using _hyper_1_2_chunk_append_test_time_idx on _hyper_1_2_chunk (actual rows=2 loops=1) + Index Cond: ("time" > (now_s() - '@ 4 mons'::interval)) +(10 rows) + +-- querying outside the time range should return nothing. This tests +-- that ConstraintAwareAppend can handle the case when an Append node +-- is turned into a Result node due to no children +:PREFIX +SELECT date_trunc('year', time) t, avg(temp) +FROM append_test +WHERE time < '2016-03-22' +AND date_part('dow', time) between 1 and 5 +GROUP BY t +ORDER BY t DESC; + QUERY PLAN +----------------------------------------------------------- + GroupAggregate (actual rows=0 loops=1) + Group Key: (date_trunc('year'::text, "time")) + -> Sort (actual rows=0 loops=1) + Sort Key: (date_trunc('year'::text, "time")) DESC + Sort Method: quicksort + -> Result (actual rows=0 loops=1) + One-Time Filter: false +(7 rows) + +-- a parameterized query can safely constify params, so won't be +-- optimized by constraint-aware append since regular constraint +-- exclusion works just fine +PREPARE query_param AS +SELECT * FROM append_test WHERE time > $1 ORDER BY time; +:PREFIX +EXECUTE query_param(now_s() - interval '2 months'); +psql:include/append_query.sql:82: NOTICE: Stable function now_s() called! + QUERY PLAN +------------------------------------------------------------------------------------------------------------- + Index Scan Backward using _hyper_1_3_chunk_append_test_time_idx on _hyper_1_3_chunk (actual rows=1 loops=1) + Index Cond: ("time" > 'Thu Jun 22 10:00:00 2017 PDT'::timestamp with time zone) +(2 rows) + +DEALLOCATE query_param; +--test with cte +:PREFIX +WITH data AS ( + SELECT time_bucket(INTERVAL '30 day', TIME) AS btime, AVG(temp) AS VALUE + FROM append_test + WHERE + TIME > now_s() - INTERVAL '400 day' + AND colorid > 0 + GROUP BY btime +), +period AS ( + SELECT time_bucket(INTERVAL '30 day', TIME) AS btime + FROM GENERATE_SERIES('2017-03-22T01:01:01', '2017-08-23T01:01:01', INTERVAL '30 day') TIME + ) +SELECT period.btime, VALUE + FROM period + LEFT JOIN DATA USING (btime) + ORDER BY period.btime; +psql:include/append_query.sql:102: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:102: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:102: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:102: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:102: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:102: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:102: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:102: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:102: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:102: NOTICE: Stable function now_s() called! + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort (actual rows=6 loops=1) + Sort Key: (time_bucket('@ 30 days'::interval, "time"."time")) + Sort Method: quicksort + -> Hash Left Join (actual rows=6 loops=1) + Hash Cond: (time_bucket('@ 30 days'::interval, "time"."time") = data.btime) + -> Function Scan on generate_series "time" (actual rows=6 loops=1) + -> Hash (actual rows=3 loops=1) + Buckets: 1024 Batches: 1 + -> Subquery Scan on data (actual rows=3 loops=1) + -> HashAggregate (actual rows=3 loops=1) + Group Key: time_bucket('@ 30 days'::interval, append_test."time") + Batches: 1 + -> Result (actual rows=5 loops=1) + -> Custom Scan (ChunkAppend) on append_test (actual rows=5 loops=1) + Chunks excluded during startup: 0 + -> Index Scan Backward using _hyper_1_1_chunk_append_test_time_idx on _hyper_1_1_chunk (actual rows=2 loops=1) + Index Cond: ("time" > (now_s() - '@ 400 days'::interval)) + Filter: (colorid > 0) + -> Index Scan Backward using _hyper_1_2_chunk_append_test_time_idx on _hyper_1_2_chunk (actual rows=2 loops=1) + Index Cond: ("time" > (now_s() - '@ 400 days'::interval)) + Filter: (colorid > 0) + -> Index Scan Backward using _hyper_1_3_chunk_append_test_time_idx on _hyper_1_3_chunk (actual rows=1 loops=1) + Index Cond: ("time" > (now_s() - '@ 400 days'::interval)) + Filter: (colorid > 0) +(24 rows) + +WITH data AS ( + SELECT time_bucket(INTERVAL '30 day', TIME) AS btime, AVG(temp) AS VALUE + FROM append_test + WHERE + TIME > now_s() - INTERVAL '400 day' + AND colorid > 0 + GROUP BY btime +), +period AS ( + SELECT time_bucket(INTERVAL '30 day', TIME) AS btime + FROM GENERATE_SERIES('2017-03-22T01:01:01', '2017-08-23T01:01:01', INTERVAL '30 day') TIME + ) +SELECT period.btime, VALUE + FROM period + LEFT JOIN DATA USING (btime) + ORDER BY period.btime; +psql:include/append_query.sql:119: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:119: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:119: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:119: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:119: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:119: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:119: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:119: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:119: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:119: NOTICE: Stable function now_s() called! + btime | value +------------------------------+------- + Fri Mar 03 16:00:00 2017 PST | 22.5 + Sun Apr 02 17:00:00 2017 PDT | + Tue May 02 17:00:00 2017 PDT | 25.7 + Thu Jun 01 17:00:00 2017 PDT | + Sat Jul 01 17:00:00 2017 PDT | + Mon Jul 31 17:00:00 2017 PDT | 34.1 +(6 rows) + +-- force nested loop join with no materialization. This tests that the +-- inner ConstraintAwareScan supports resetting its scan for every +-- iteration of the outer relation loop +set enable_hashjoin = 'off'; +set enable_mergejoin = 'off'; +set enable_material = 'off'; +:PREFIX +SELECT * FROM append_test a INNER JOIN join_test j ON (a.colorid = j.colorid) +WHERE a.time > now_s() - interval '3 hours' AND j.time > now_s() - interval '3 hours'; +psql:include/append_query.sql:130: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:130: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:130: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:130: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:130: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:130: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:130: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:130: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:130: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:130: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:130: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:130: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:130: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:130: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:130: NOTICE: Stable function now_s() called! +psql:include/append_query.sql:130: NOTICE: Stable function now_s() called! + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------- + Nested Loop (actual rows=1 loops=1) + Join Filter: (a.colorid = j.colorid) + -> Custom Scan (ChunkAppend) on append_test a (actual rows=1 loops=1) + Chunks excluded during startup: 2 + -> Index Scan using _hyper_1_3_chunk_append_test_time_idx on _hyper_1_3_chunk a_1 (actual rows=1 loops=1) + Index Cond: ("time" > (now_s() - '@ 3 hours'::interval)) + -> Custom Scan (ChunkAppend) on join_test j (actual rows=1 loops=1) + Chunks excluded during startup: 2 + -> Index Scan using _hyper_2_6_chunk_join_test_time_idx on _hyper_2_6_chunk j_1 (actual rows=1 loops=1) + Index Cond: ("time" > (now_s() - '@ 3 hours'::interval)) +(10 rows) + +reset enable_hashjoin; +reset enable_mergejoin; +reset enable_material; +-- test constraint_exclusion with date time dimension and DATE/TIMESTAMP/TIMESTAMPTZ constraints +-- the queries should all have 3 chunks +:PREFIX SELECT * FROM metrics_date WHERE time > '2000-01-15'::date ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------ + Custom Scan (ChunkAppend) on metrics_date (actual rows=4609 loops=1) + Order: metrics_date."time" + -> Index Only Scan Backward using _hyper_3_9_chunk_metrics_date_time_idx on _hyper_3_9_chunk (actual rows=1152 loops=1) + Index Cond: ("time" > '01-15-2000'::date) + Heap Fetches: 1152 + -> Index Only Scan Backward using _hyper_3_10_chunk_metrics_date_time_idx on _hyper_3_10_chunk (actual rows=2016 loops=1) + Index Cond: ("time" > '01-15-2000'::date) + Heap Fetches: 2016 + -> Index Only Scan Backward using _hyper_3_11_chunk_metrics_date_time_idx on _hyper_3_11_chunk (actual rows=1441 loops=1) + Index Cond: ("time" > '01-15-2000'::date) + Heap Fetches: 1441 +(11 rows) + +:PREFIX SELECT * FROM metrics_date WHERE time > '2000-01-15'::timestamp ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------ + Custom Scan (ChunkAppend) on metrics_date (actual rows=4609 loops=1) + Order: metrics_date."time" + -> Index Only Scan Backward using _hyper_3_9_chunk_metrics_date_time_idx on _hyper_3_9_chunk (actual rows=1152 loops=1) + Index Cond: ("time" > 'Sat Jan 15 00:00:00 2000'::timestamp without time zone) + Heap Fetches: 1152 + -> Index Only Scan Backward using _hyper_3_10_chunk_metrics_date_time_idx on _hyper_3_10_chunk (actual rows=2016 loops=1) + Index Cond: ("time" > 'Sat Jan 15 00:00:00 2000'::timestamp without time zone) + Heap Fetches: 2016 + -> Index Only Scan Backward using _hyper_3_11_chunk_metrics_date_time_idx on _hyper_3_11_chunk (actual rows=1441 loops=1) + Index Cond: ("time" > 'Sat Jan 15 00:00:00 2000'::timestamp without time zone) + Heap Fetches: 1441 +(11 rows) + +:PREFIX SELECT * FROM metrics_date WHERE time > '2000-01-15'::timestamptz ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------ + Custom Scan (ChunkAppend) on metrics_date (actual rows=4609 loops=1) + Order: metrics_date."time" + Chunks excluded during startup: 2 + -> Index Only Scan Backward using _hyper_3_9_chunk_metrics_date_time_idx on _hyper_3_9_chunk (actual rows=1152 loops=1) + Index Cond: ("time" > 'Sat Jan 15 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 1152 + -> Index Only Scan Backward using _hyper_3_10_chunk_metrics_date_time_idx on _hyper_3_10_chunk (actual rows=2016 loops=1) + Index Cond: ("time" > 'Sat Jan 15 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 2016 + -> Index Only Scan Backward using _hyper_3_11_chunk_metrics_date_time_idx on _hyper_3_11_chunk (actual rows=1441 loops=1) + Index Cond: ("time" > 'Sat Jan 15 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 1441 +(12 rows) + +-- test Const OP Var +-- the queries should all have 3 chunks +:PREFIX SELECT * FROM metrics_date WHERE '2000-01-15'::date < time ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------ + Custom Scan (ChunkAppend) on metrics_date (actual rows=4609 loops=1) + Order: metrics_date."time" + -> Index Only Scan Backward using _hyper_3_9_chunk_metrics_date_time_idx on _hyper_3_9_chunk (actual rows=1152 loops=1) + Index Cond: ("time" > '01-15-2000'::date) + Heap Fetches: 1152 + -> Index Only Scan Backward using _hyper_3_10_chunk_metrics_date_time_idx on _hyper_3_10_chunk (actual rows=2016 loops=1) + Index Cond: ("time" > '01-15-2000'::date) + Heap Fetches: 2016 + -> Index Only Scan Backward using _hyper_3_11_chunk_metrics_date_time_idx on _hyper_3_11_chunk (actual rows=1441 loops=1) + Index Cond: ("time" > '01-15-2000'::date) + Heap Fetches: 1441 +(11 rows) + +:PREFIX SELECT * FROM metrics_date WHERE '2000-01-15'::timestamp < time ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------ + Custom Scan (ChunkAppend) on metrics_date (actual rows=4609 loops=1) + Order: metrics_date."time" + -> Index Only Scan Backward using _hyper_3_9_chunk_metrics_date_time_idx on _hyper_3_9_chunk (actual rows=1152 loops=1) + Index Cond: ("time" > 'Sat Jan 15 00:00:00 2000'::timestamp without time zone) + Heap Fetches: 1152 + -> Index Only Scan Backward using _hyper_3_10_chunk_metrics_date_time_idx on _hyper_3_10_chunk (actual rows=2016 loops=1) + Index Cond: ("time" > 'Sat Jan 15 00:00:00 2000'::timestamp without time zone) + Heap Fetches: 2016 + -> Index Only Scan Backward using _hyper_3_11_chunk_metrics_date_time_idx on _hyper_3_11_chunk (actual rows=1441 loops=1) + Index Cond: ("time" > 'Sat Jan 15 00:00:00 2000'::timestamp without time zone) + Heap Fetches: 1441 +(11 rows) + +:PREFIX SELECT * FROM metrics_date WHERE '2000-01-15'::timestamptz < time ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------ + Custom Scan (ChunkAppend) on metrics_date (actual rows=4609 loops=1) + Order: metrics_date."time" + Chunks excluded during startup: 2 + -> Index Only Scan Backward using _hyper_3_9_chunk_metrics_date_time_idx on _hyper_3_9_chunk (actual rows=1152 loops=1) + Index Cond: ("time" > 'Sat Jan 15 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 1152 + -> Index Only Scan Backward using _hyper_3_10_chunk_metrics_date_time_idx on _hyper_3_10_chunk (actual rows=2016 loops=1) + Index Cond: ("time" > 'Sat Jan 15 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 2016 + -> Index Only Scan Backward using _hyper_3_11_chunk_metrics_date_time_idx on _hyper_3_11_chunk (actual rows=1441 loops=1) + Index Cond: ("time" > 'Sat Jan 15 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 1441 +(12 rows) + +-- test 2 constraints +-- the queries should all have 2 chunks +:PREFIX SELECT * FROM metrics_date WHERE time > '2000-01-15'::date AND time < '2000-01-21'::date ORDER BY time; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_date (actual rows=1440 loops=1) + Order: metrics_date."time" + -> Index Only Scan Backward using _hyper_3_9_chunk_metrics_date_time_idx on _hyper_3_9_chunk (actual rows=1152 loops=1) + Index Cond: (("time" > '01-15-2000'::date) AND ("time" < '01-21-2000'::date)) + Heap Fetches: 1152 + -> Index Only Scan Backward using _hyper_3_10_chunk_metrics_date_time_idx on _hyper_3_10_chunk (actual rows=288 loops=1) + Index Cond: (("time" > '01-15-2000'::date) AND ("time" < '01-21-2000'::date)) + Heap Fetches: 288 +(8 rows) + +:PREFIX SELECT * FROM metrics_date WHERE time > '2000-01-15'::timestamp AND time < '2000-01-21'::timestamp ORDER BY time; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_date (actual rows=1440 loops=1) + Order: metrics_date."time" + -> Index Only Scan Backward using _hyper_3_9_chunk_metrics_date_time_idx on _hyper_3_9_chunk (actual rows=1152 loops=1) + Index Cond: (("time" > 'Sat Jan 15 00:00:00 2000'::timestamp without time zone) AND ("time" < 'Fri Jan 21 00:00:00 2000'::timestamp without time zone)) + Heap Fetches: 1152 + -> Index Only Scan Backward using _hyper_3_10_chunk_metrics_date_time_idx on _hyper_3_10_chunk (actual rows=288 loops=1) + Index Cond: (("time" > 'Sat Jan 15 00:00:00 2000'::timestamp without time zone) AND ("time" < 'Fri Jan 21 00:00:00 2000'::timestamp without time zone)) + Heap Fetches: 288 +(8 rows) + +:PREFIX SELECT * FROM metrics_date WHERE time > '2000-01-15'::timestamptz AND time < '2000-01-21'::timestamptz ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_date (actual rows=1440 loops=1) + Order: metrics_date."time" + Chunks excluded during startup: 3 + -> Index Only Scan Backward using _hyper_3_9_chunk_metrics_date_time_idx on _hyper_3_9_chunk (actual rows=1152 loops=1) + Index Cond: (("time" > 'Sat Jan 15 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Fri Jan 21 00:00:00 2000 PST'::timestamp with time zone)) + Heap Fetches: 1152 + -> Index Only Scan Backward using _hyper_3_10_chunk_metrics_date_time_idx on _hyper_3_10_chunk (actual rows=288 loops=1) + Index Cond: (("time" > 'Sat Jan 15 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Fri Jan 21 00:00:00 2000 PST'::timestamp with time zone)) + Heap Fetches: 288 +(9 rows) + +-- test constraint_exclusion with timestamp time dimension and DATE/TIMESTAMP/TIMESTAMPTZ constraints +-- the queries should all have 3 chunks +:PREFIX SELECT * FROM metrics_timestamp WHERE time > '2000-01-15'::date ORDER BY time; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_timestamp (actual rows=4896 loops=1) + Order: metrics_timestamp."time" + -> Index Only Scan Backward using _hyper_4_14_chunk_metrics_timestamp_time_idx on _hyper_4_14_chunk (actual rows=1439 loops=1) + Index Cond: ("time" > '01-15-2000'::date) + Heap Fetches: 1439 + -> Index Only Scan Backward using _hyper_4_15_chunk_metrics_timestamp_time_idx on _hyper_4_15_chunk (actual rows=2016 loops=1) + Index Cond: ("time" > '01-15-2000'::date) + Heap Fetches: 2016 + -> Index Only Scan Backward using _hyper_4_16_chunk_metrics_timestamp_time_idx on _hyper_4_16_chunk (actual rows=1441 loops=1) + Index Cond: ("time" > '01-15-2000'::date) + Heap Fetches: 1441 +(11 rows) + +:PREFIX SELECT * FROM metrics_timestamp WHERE time > '2000-01-15'::timestamp ORDER BY time; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_timestamp (actual rows=4896 loops=1) + Order: metrics_timestamp."time" + -> Index Only Scan Backward using _hyper_4_14_chunk_metrics_timestamp_time_idx on _hyper_4_14_chunk (actual rows=1439 loops=1) + Index Cond: ("time" > 'Sat Jan 15 00:00:00 2000'::timestamp without time zone) + Heap Fetches: 1439 + -> Index Only Scan Backward using _hyper_4_15_chunk_metrics_timestamp_time_idx on _hyper_4_15_chunk (actual rows=2016 loops=1) + Index Cond: ("time" > 'Sat Jan 15 00:00:00 2000'::timestamp without time zone) + Heap Fetches: 2016 + -> Index Only Scan Backward using _hyper_4_16_chunk_metrics_timestamp_time_idx on _hyper_4_16_chunk (actual rows=1441 loops=1) + Index Cond: ("time" > 'Sat Jan 15 00:00:00 2000'::timestamp without time zone) + Heap Fetches: 1441 +(11 rows) + +:PREFIX SELECT * FROM metrics_timestamp WHERE time > '2000-01-15'::timestamptz ORDER BY time; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_timestamp (actual rows=4896 loops=1) + Order: metrics_timestamp."time" + Chunks excluded during startup: 2 + -> Index Only Scan Backward using _hyper_4_14_chunk_metrics_timestamp_time_idx on _hyper_4_14_chunk (actual rows=1439 loops=1) + Index Cond: ("time" > 'Sat Jan 15 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 1439 + -> Index Only Scan Backward using _hyper_4_15_chunk_metrics_timestamp_time_idx on _hyper_4_15_chunk (actual rows=2016 loops=1) + Index Cond: ("time" > 'Sat Jan 15 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 2016 + -> Index Only Scan Backward using _hyper_4_16_chunk_metrics_timestamp_time_idx on _hyper_4_16_chunk (actual rows=1441 loops=1) + Index Cond: ("time" > 'Sat Jan 15 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 1441 +(12 rows) + +-- test Const OP Var +-- the queries should all have 3 chunks +:PREFIX SELECT * FROM metrics_timestamp WHERE '2000-01-15'::date < time ORDER BY time; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_timestamp (actual rows=4896 loops=1) + Order: metrics_timestamp."time" + -> Index Only Scan Backward using _hyper_4_14_chunk_metrics_timestamp_time_idx on _hyper_4_14_chunk (actual rows=1439 loops=1) + Index Cond: ("time" > '01-15-2000'::date) + Heap Fetches: 1439 + -> Index Only Scan Backward using _hyper_4_15_chunk_metrics_timestamp_time_idx on _hyper_4_15_chunk (actual rows=2016 loops=1) + Index Cond: ("time" > '01-15-2000'::date) + Heap Fetches: 2016 + -> Index Only Scan Backward using _hyper_4_16_chunk_metrics_timestamp_time_idx on _hyper_4_16_chunk (actual rows=1441 loops=1) + Index Cond: ("time" > '01-15-2000'::date) + Heap Fetches: 1441 +(11 rows) + +:PREFIX SELECT * FROM metrics_timestamp WHERE '2000-01-15'::timestamp < time ORDER BY time; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_timestamp (actual rows=4896 loops=1) + Order: metrics_timestamp."time" + -> Index Only Scan Backward using _hyper_4_14_chunk_metrics_timestamp_time_idx on _hyper_4_14_chunk (actual rows=1439 loops=1) + Index Cond: ("time" > 'Sat Jan 15 00:00:00 2000'::timestamp without time zone) + Heap Fetches: 1439 + -> Index Only Scan Backward using _hyper_4_15_chunk_metrics_timestamp_time_idx on _hyper_4_15_chunk (actual rows=2016 loops=1) + Index Cond: ("time" > 'Sat Jan 15 00:00:00 2000'::timestamp without time zone) + Heap Fetches: 2016 + -> Index Only Scan Backward using _hyper_4_16_chunk_metrics_timestamp_time_idx on _hyper_4_16_chunk (actual rows=1441 loops=1) + Index Cond: ("time" > 'Sat Jan 15 00:00:00 2000'::timestamp without time zone) + Heap Fetches: 1441 +(11 rows) + +:PREFIX SELECT * FROM metrics_timestamp WHERE '2000-01-15'::timestamptz < time ORDER BY time; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_timestamp (actual rows=4896 loops=1) + Order: metrics_timestamp."time" + Chunks excluded during startup: 2 + -> Index Only Scan Backward using _hyper_4_14_chunk_metrics_timestamp_time_idx on _hyper_4_14_chunk (actual rows=1439 loops=1) + Index Cond: ("time" > 'Sat Jan 15 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 1439 + -> Index Only Scan Backward using _hyper_4_15_chunk_metrics_timestamp_time_idx on _hyper_4_15_chunk (actual rows=2016 loops=1) + Index Cond: ("time" > 'Sat Jan 15 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 2016 + -> Index Only Scan Backward using _hyper_4_16_chunk_metrics_timestamp_time_idx on _hyper_4_16_chunk (actual rows=1441 loops=1) + Index Cond: ("time" > 'Sat Jan 15 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 1441 +(12 rows) + +-- test 2 constraints +-- the queries should all have 2 chunks +:PREFIX SELECT * FROM metrics_timestamp WHERE time > '2000-01-15'::date AND time < '2000-01-21'::date ORDER BY time; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_timestamp (actual rows=1727 loops=1) + Order: metrics_timestamp."time" + -> Index Only Scan Backward using _hyper_4_14_chunk_metrics_timestamp_time_idx on _hyper_4_14_chunk (actual rows=1439 loops=1) + Index Cond: (("time" > '01-15-2000'::date) AND ("time" < '01-21-2000'::date)) + Heap Fetches: 1439 + -> Index Only Scan Backward using _hyper_4_15_chunk_metrics_timestamp_time_idx on _hyper_4_15_chunk (actual rows=288 loops=1) + Index Cond: (("time" > '01-15-2000'::date) AND ("time" < '01-21-2000'::date)) + Heap Fetches: 288 +(8 rows) + +:PREFIX SELECT * FROM metrics_timestamp WHERE time > '2000-01-15'::timestamp AND time < '2000-01-21'::timestamp ORDER BY time; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_timestamp (actual rows=1727 loops=1) + Order: metrics_timestamp."time" + -> Index Only Scan Backward using _hyper_4_14_chunk_metrics_timestamp_time_idx on _hyper_4_14_chunk (actual rows=1439 loops=1) + Index Cond: (("time" > 'Sat Jan 15 00:00:00 2000'::timestamp without time zone) AND ("time" < 'Fri Jan 21 00:00:00 2000'::timestamp without time zone)) + Heap Fetches: 1439 + -> Index Only Scan Backward using _hyper_4_15_chunk_metrics_timestamp_time_idx on _hyper_4_15_chunk (actual rows=288 loops=1) + Index Cond: (("time" > 'Sat Jan 15 00:00:00 2000'::timestamp without time zone) AND ("time" < 'Fri Jan 21 00:00:00 2000'::timestamp without time zone)) + Heap Fetches: 288 +(8 rows) + +:PREFIX SELECT * FROM metrics_timestamp WHERE time > '2000-01-15'::timestamptz AND time < '2000-01-21'::timestamptz ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_timestamp (actual rows=1727 loops=1) + Order: metrics_timestamp."time" + Chunks excluded during startup: 3 + -> Index Only Scan Backward using _hyper_4_14_chunk_metrics_timestamp_time_idx on _hyper_4_14_chunk (actual rows=1439 loops=1) + Index Cond: (("time" > 'Sat Jan 15 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Fri Jan 21 00:00:00 2000 PST'::timestamp with time zone)) + Heap Fetches: 1439 + -> Index Only Scan Backward using _hyper_4_15_chunk_metrics_timestamp_time_idx on _hyper_4_15_chunk (actual rows=288 loops=1) + Index Cond: (("time" > 'Sat Jan 15 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Fri Jan 21 00:00:00 2000 PST'::timestamp with time zone)) + Heap Fetches: 288 +(9 rows) + +-- test constraint_exclusion with timestamptz time dimension and DATE/TIMESTAMP/TIMESTAMPTZ constraints +-- the queries should all have 3 chunks +:PREFIX SELECT time FROM metrics_timestamptz WHERE time > '2000-01-15'::date ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_timestamptz (actual rows=14688 loops=1) + Order: metrics_timestamptz."time" + Chunks excluded during startup: 2 + -> Index Only Scan Backward using _hyper_5_19_chunk_metrics_timestamptz_time_idx on _hyper_5_19_chunk (actual rows=4029 loops=1) + Index Cond: ("time" > '01-15-2000'::date) + Heap Fetches: 4029 + -> Index Only Scan Backward using _hyper_5_20_chunk_metrics_timestamptz_time_idx on _hyper_5_20_chunk (actual rows=6048 loops=1) + Index Cond: ("time" > '01-15-2000'::date) + Heap Fetches: 6048 + -> Index Only Scan Backward using _hyper_5_21_chunk_metrics_timestamptz_time_idx on _hyper_5_21_chunk (actual rows=4611 loops=1) + Index Cond: ("time" > '01-15-2000'::date) + Heap Fetches: 4611 +(12 rows) + +:PREFIX SELECT time FROM metrics_timestamptz WHERE time > '2000-01-15'::timestamp ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_timestamptz (actual rows=14688 loops=1) + Order: metrics_timestamptz."time" + Chunks excluded during startup: 2 + -> Index Only Scan Backward using _hyper_5_19_chunk_metrics_timestamptz_time_idx on _hyper_5_19_chunk (actual rows=4029 loops=1) + Index Cond: ("time" > 'Sat Jan 15 00:00:00 2000'::timestamp without time zone) + Heap Fetches: 4029 + -> Index Only Scan Backward using _hyper_5_20_chunk_metrics_timestamptz_time_idx on _hyper_5_20_chunk (actual rows=6048 loops=1) + Index Cond: ("time" > 'Sat Jan 15 00:00:00 2000'::timestamp without time zone) + Heap Fetches: 6048 + -> Index Only Scan Backward using _hyper_5_21_chunk_metrics_timestamptz_time_idx on _hyper_5_21_chunk (actual rows=4611 loops=1) + Index Cond: ("time" > 'Sat Jan 15 00:00:00 2000'::timestamp without time zone) + Heap Fetches: 4611 +(12 rows) + +:PREFIX SELECT time FROM metrics_timestamptz WHERE time > '2000-01-15'::timestamptz ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_timestamptz (actual rows=14688 loops=1) + Order: metrics_timestamptz."time" + -> Index Only Scan Backward using _hyper_5_19_chunk_metrics_timestamptz_time_idx on _hyper_5_19_chunk (actual rows=4029 loops=1) + Index Cond: ("time" > 'Sat Jan 15 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 4029 + -> Index Only Scan Backward using _hyper_5_20_chunk_metrics_timestamptz_time_idx on _hyper_5_20_chunk (actual rows=6048 loops=1) + Index Cond: ("time" > 'Sat Jan 15 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 6048 + -> Index Only Scan Backward using _hyper_5_21_chunk_metrics_timestamptz_time_idx on _hyper_5_21_chunk (actual rows=4611 loops=1) + Index Cond: ("time" > 'Sat Jan 15 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 4611 +(11 rows) + +-- test Const OP Var +-- the queries should all have 3 chunks +:PREFIX SELECT time FROM metrics_timestamptz WHERE '2000-01-15'::date < time ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_timestamptz (actual rows=14688 loops=1) + Order: metrics_timestamptz."time" + Chunks excluded during startup: 2 + -> Index Only Scan Backward using _hyper_5_19_chunk_metrics_timestamptz_time_idx on _hyper_5_19_chunk (actual rows=4029 loops=1) + Index Cond: ("time" > '01-15-2000'::date) + Heap Fetches: 4029 + -> Index Only Scan Backward using _hyper_5_20_chunk_metrics_timestamptz_time_idx on _hyper_5_20_chunk (actual rows=6048 loops=1) + Index Cond: ("time" > '01-15-2000'::date) + Heap Fetches: 6048 + -> Index Only Scan Backward using _hyper_5_21_chunk_metrics_timestamptz_time_idx on _hyper_5_21_chunk (actual rows=4611 loops=1) + Index Cond: ("time" > '01-15-2000'::date) + Heap Fetches: 4611 +(12 rows) + +:PREFIX SELECT time FROM metrics_timestamptz WHERE '2000-01-15'::timestamp < time ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_timestamptz (actual rows=14688 loops=1) + Order: metrics_timestamptz."time" + Chunks excluded during startup: 2 + -> Index Only Scan Backward using _hyper_5_19_chunk_metrics_timestamptz_time_idx on _hyper_5_19_chunk (actual rows=4029 loops=1) + Index Cond: ("time" > 'Sat Jan 15 00:00:00 2000'::timestamp without time zone) + Heap Fetches: 4029 + -> Index Only Scan Backward using _hyper_5_20_chunk_metrics_timestamptz_time_idx on _hyper_5_20_chunk (actual rows=6048 loops=1) + Index Cond: ("time" > 'Sat Jan 15 00:00:00 2000'::timestamp without time zone) + Heap Fetches: 6048 + -> Index Only Scan Backward using _hyper_5_21_chunk_metrics_timestamptz_time_idx on _hyper_5_21_chunk (actual rows=4611 loops=1) + Index Cond: ("time" > 'Sat Jan 15 00:00:00 2000'::timestamp without time zone) + Heap Fetches: 4611 +(12 rows) + +:PREFIX SELECT time FROM metrics_timestamptz WHERE '2000-01-15'::timestamptz < time ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_timestamptz (actual rows=14688 loops=1) + Order: metrics_timestamptz."time" + -> Index Only Scan Backward using _hyper_5_19_chunk_metrics_timestamptz_time_idx on _hyper_5_19_chunk (actual rows=4029 loops=1) + Index Cond: ("time" > 'Sat Jan 15 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 4029 + -> Index Only Scan Backward using _hyper_5_20_chunk_metrics_timestamptz_time_idx on _hyper_5_20_chunk (actual rows=6048 loops=1) + Index Cond: ("time" > 'Sat Jan 15 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 6048 + -> Index Only Scan Backward using _hyper_5_21_chunk_metrics_timestamptz_time_idx on _hyper_5_21_chunk (actual rows=4611 loops=1) + Index Cond: ("time" > 'Sat Jan 15 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 4611 +(11 rows) + +-- test 2 constraints +-- the queries should all have 2 chunks +:PREFIX SELECT time FROM metrics_timestamptz WHERE time > '2000-01-15'::date AND time < '2000-01-21'::date ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_timestamptz (actual rows=5181 loops=1) + Order: metrics_timestamptz."time" + Chunks excluded during startup: 3 + -> Index Only Scan Backward using _hyper_5_19_chunk_metrics_timestamptz_time_idx on _hyper_5_19_chunk (actual rows=4029 loops=1) + Index Cond: (("time" > '01-15-2000'::date) AND ("time" < '01-21-2000'::date)) + Heap Fetches: 4029 + -> Index Only Scan Backward using _hyper_5_20_chunk_metrics_timestamptz_time_idx on _hyper_5_20_chunk (actual rows=1152 loops=1) + Index Cond: (("time" > '01-15-2000'::date) AND ("time" < '01-21-2000'::date)) + Heap Fetches: 1152 +(9 rows) + +:PREFIX SELECT time FROM metrics_timestamptz WHERE time > '2000-01-15'::timestamp AND time < '2000-01-21'::timestamp ORDER BY time; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_timestamptz (actual rows=5181 loops=1) + Order: metrics_timestamptz."time" + Chunks excluded during startup: 3 + -> Index Only Scan Backward using _hyper_5_19_chunk_metrics_timestamptz_time_idx on _hyper_5_19_chunk (actual rows=4029 loops=1) + Index Cond: (("time" > 'Sat Jan 15 00:00:00 2000'::timestamp without time zone) AND ("time" < 'Fri Jan 21 00:00:00 2000'::timestamp without time zone)) + Heap Fetches: 4029 + -> Index Only Scan Backward using _hyper_5_20_chunk_metrics_timestamptz_time_idx on _hyper_5_20_chunk (actual rows=1152 loops=1) + Index Cond: (("time" > 'Sat Jan 15 00:00:00 2000'::timestamp without time zone) AND ("time" < 'Fri Jan 21 00:00:00 2000'::timestamp without time zone)) + Heap Fetches: 1152 +(9 rows) + +:PREFIX SELECT time FROM metrics_timestamptz WHERE time > '2000-01-15'::timestamptz AND time < '2000-01-21'::timestamptz ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_timestamptz (actual rows=5181 loops=1) + Order: metrics_timestamptz."time" + -> Index Only Scan Backward using _hyper_5_19_chunk_metrics_timestamptz_time_idx on _hyper_5_19_chunk (actual rows=4029 loops=1) + Index Cond: (("time" > 'Sat Jan 15 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Fri Jan 21 00:00:00 2000 PST'::timestamp with time zone)) + Heap Fetches: 4029 + -> Index Only Scan Backward using _hyper_5_20_chunk_metrics_timestamptz_time_idx on _hyper_5_20_chunk (actual rows=1152 loops=1) + Index Cond: (("time" > 'Sat Jan 15 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Fri Jan 21 00:00:00 2000 PST'::timestamp with time zone)) + Heap Fetches: 1152 +(8 rows) + +-- test constraint_exclusion with space partitioning and DATE/TIMESTAMP/TIMESTAMPTZ constraints +-- exclusion for constraints with non-matching datatypes not working for space partitioning atm +:PREFIX SELECT time FROM metrics_space WHERE time > '2000-01-10'::date ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_space (actual rows=11520 loops=1) + Order: metrics_space."time" + -> Merge Append (actual rows=0 loops=1) + Sort Key: _hyper_6_22_chunk."time" + -> Index Only Scan Backward using _hyper_6_22_chunk_metrics_space_time_idx on _hyper_6_22_chunk (actual rows=0 loops=1) + Index Cond: ("time" > '01-10-2000'::date) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_6_23_chunk_metrics_space_time_idx on _hyper_6_23_chunk (actual rows=0 loops=1) + Index Cond: ("time" > '01-10-2000'::date) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_6_24_chunk_metrics_space_time_idx on _hyper_6_24_chunk (actual rows=0 loops=1) + Index Cond: ("time" > '01-10-2000'::date) + Heap Fetches: 0 + -> Merge Append (actual rows=7670 loops=1) + Sort Key: _hyper_6_25_chunk."time" + -> Index Only Scan Backward using _hyper_6_25_chunk_metrics_space_time_idx on _hyper_6_25_chunk (actual rows=3068 loops=1) + Index Cond: ("time" > '01-10-2000'::date) + Heap Fetches: 3068 + -> Index Only Scan Backward using _hyper_6_26_chunk_metrics_space_time_idx on _hyper_6_26_chunk (actual rows=3068 loops=1) + Index Cond: ("time" > '01-10-2000'::date) + Heap Fetches: 3068 + -> Index Only Scan Backward using _hyper_6_27_chunk_metrics_space_time_idx on _hyper_6_27_chunk (actual rows=1534 loops=1) + Index Cond: ("time" > '01-10-2000'::date) + Heap Fetches: 1534 + -> Merge Append (actual rows=3850 loops=1) + Sort Key: _hyper_6_28_chunk."time" + -> Index Only Scan Backward using _hyper_6_28_chunk_metrics_space_time_idx on _hyper_6_28_chunk (actual rows=1540 loops=1) + Index Cond: ("time" > '01-10-2000'::date) + Heap Fetches: 1540 + -> Index Only Scan Backward using _hyper_6_29_chunk_metrics_space_time_idx on _hyper_6_29_chunk (actual rows=1540 loops=1) + Index Cond: ("time" > '01-10-2000'::date) + Heap Fetches: 1540 + -> Index Only Scan Backward using _hyper_6_30_chunk_metrics_space_time_idx on _hyper_6_30_chunk (actual rows=770 loops=1) + Index Cond: ("time" > '01-10-2000'::date) + Heap Fetches: 770 +(35 rows) + +:PREFIX SELECT time FROM metrics_space WHERE time > '2000-01-10'::timestamp ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_space (actual rows=11520 loops=1) + Order: metrics_space."time" + -> Merge Append (actual rows=0 loops=1) + Sort Key: _hyper_6_22_chunk."time" + -> Index Only Scan Backward using _hyper_6_22_chunk_metrics_space_time_idx on _hyper_6_22_chunk (actual rows=0 loops=1) + Index Cond: ("time" > 'Mon Jan 10 00:00:00 2000'::timestamp without time zone) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_6_23_chunk_metrics_space_time_idx on _hyper_6_23_chunk (actual rows=0 loops=1) + Index Cond: ("time" > 'Mon Jan 10 00:00:00 2000'::timestamp without time zone) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_6_24_chunk_metrics_space_time_idx on _hyper_6_24_chunk (actual rows=0 loops=1) + Index Cond: ("time" > 'Mon Jan 10 00:00:00 2000'::timestamp without time zone) + Heap Fetches: 0 + -> Merge Append (actual rows=7670 loops=1) + Sort Key: _hyper_6_25_chunk."time" + -> Index Only Scan Backward using _hyper_6_25_chunk_metrics_space_time_idx on _hyper_6_25_chunk (actual rows=3068 loops=1) + Index Cond: ("time" > 'Mon Jan 10 00:00:00 2000'::timestamp without time zone) + Heap Fetches: 3068 + -> Index Only Scan Backward using _hyper_6_26_chunk_metrics_space_time_idx on _hyper_6_26_chunk (actual rows=3068 loops=1) + Index Cond: ("time" > 'Mon Jan 10 00:00:00 2000'::timestamp without time zone) + Heap Fetches: 3068 + -> Index Only Scan Backward using _hyper_6_27_chunk_metrics_space_time_idx on _hyper_6_27_chunk (actual rows=1534 loops=1) + Index Cond: ("time" > 'Mon Jan 10 00:00:00 2000'::timestamp without time zone) + Heap Fetches: 1534 + -> Merge Append (actual rows=3850 loops=1) + Sort Key: _hyper_6_28_chunk."time" + -> Index Only Scan Backward using _hyper_6_28_chunk_metrics_space_time_idx on _hyper_6_28_chunk (actual rows=1540 loops=1) + Index Cond: ("time" > 'Mon Jan 10 00:00:00 2000'::timestamp without time zone) + Heap Fetches: 1540 + -> Index Only Scan Backward using _hyper_6_29_chunk_metrics_space_time_idx on _hyper_6_29_chunk (actual rows=1540 loops=1) + Index Cond: ("time" > 'Mon Jan 10 00:00:00 2000'::timestamp without time zone) + Heap Fetches: 1540 + -> Index Only Scan Backward using _hyper_6_30_chunk_metrics_space_time_idx on _hyper_6_30_chunk (actual rows=770 loops=1) + Index Cond: ("time" > 'Mon Jan 10 00:00:00 2000'::timestamp without time zone) + Heap Fetches: 770 +(35 rows) + +:PREFIX SELECT time FROM metrics_space WHERE time > '2000-01-10'::timestamptz ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_space (actual rows=11520 loops=1) + Order: metrics_space."time" + -> Merge Append (actual rows=7670 loops=1) + Sort Key: _hyper_6_25_chunk."time" + -> Index Only Scan Backward using _hyper_6_25_chunk_metrics_space_time_idx on _hyper_6_25_chunk (actual rows=3068 loops=1) + Index Cond: ("time" > 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 3068 + -> Index Only Scan Backward using _hyper_6_26_chunk_metrics_space_time_idx on _hyper_6_26_chunk (actual rows=3068 loops=1) + Index Cond: ("time" > 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 3068 + -> Index Only Scan Backward using _hyper_6_27_chunk_metrics_space_time_idx on _hyper_6_27_chunk (actual rows=1534 loops=1) + Index Cond: ("time" > 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 1534 + -> Merge Append (actual rows=3850 loops=1) + Sort Key: _hyper_6_28_chunk."time" + -> Index Only Scan Backward using _hyper_6_28_chunk_metrics_space_time_idx on _hyper_6_28_chunk (actual rows=1540 loops=1) + Index Cond: ("time" > 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 1540 + -> Index Only Scan Backward using _hyper_6_29_chunk_metrics_space_time_idx on _hyper_6_29_chunk (actual rows=1540 loops=1) + Index Cond: ("time" > 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 1540 + -> Index Only Scan Backward using _hyper_6_30_chunk_metrics_space_time_idx on _hyper_6_30_chunk (actual rows=770 loops=1) + Index Cond: ("time" > 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 770 +(24 rows) + +-- test Const OP Var +-- exclusion for constraints with non-matching datatypes not working for space partitioning atm +:PREFIX SELECT time FROM metrics_space WHERE '2000-01-10'::date < time ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_space (actual rows=11520 loops=1) + Order: metrics_space."time" + -> Merge Append (actual rows=0 loops=1) + Sort Key: _hyper_6_22_chunk."time" + -> Index Only Scan Backward using _hyper_6_22_chunk_metrics_space_time_idx on _hyper_6_22_chunk (actual rows=0 loops=1) + Index Cond: ("time" > '01-10-2000'::date) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_6_23_chunk_metrics_space_time_idx on _hyper_6_23_chunk (actual rows=0 loops=1) + Index Cond: ("time" > '01-10-2000'::date) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_6_24_chunk_metrics_space_time_idx on _hyper_6_24_chunk (actual rows=0 loops=1) + Index Cond: ("time" > '01-10-2000'::date) + Heap Fetches: 0 + -> Merge Append (actual rows=7670 loops=1) + Sort Key: _hyper_6_25_chunk."time" + -> Index Only Scan Backward using _hyper_6_25_chunk_metrics_space_time_idx on _hyper_6_25_chunk (actual rows=3068 loops=1) + Index Cond: ("time" > '01-10-2000'::date) + Heap Fetches: 3068 + -> Index Only Scan Backward using _hyper_6_26_chunk_metrics_space_time_idx on _hyper_6_26_chunk (actual rows=3068 loops=1) + Index Cond: ("time" > '01-10-2000'::date) + Heap Fetches: 3068 + -> Index Only Scan Backward using _hyper_6_27_chunk_metrics_space_time_idx on _hyper_6_27_chunk (actual rows=1534 loops=1) + Index Cond: ("time" > '01-10-2000'::date) + Heap Fetches: 1534 + -> Merge Append (actual rows=3850 loops=1) + Sort Key: _hyper_6_28_chunk."time" + -> Index Only Scan Backward using _hyper_6_28_chunk_metrics_space_time_idx on _hyper_6_28_chunk (actual rows=1540 loops=1) + Index Cond: ("time" > '01-10-2000'::date) + Heap Fetches: 1540 + -> Index Only Scan Backward using _hyper_6_29_chunk_metrics_space_time_idx on _hyper_6_29_chunk (actual rows=1540 loops=1) + Index Cond: ("time" > '01-10-2000'::date) + Heap Fetches: 1540 + -> Index Only Scan Backward using _hyper_6_30_chunk_metrics_space_time_idx on _hyper_6_30_chunk (actual rows=770 loops=1) + Index Cond: ("time" > '01-10-2000'::date) + Heap Fetches: 770 +(35 rows) + +:PREFIX SELECT time FROM metrics_space WHERE '2000-01-10'::timestamp < time ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_space (actual rows=11520 loops=1) + Order: metrics_space."time" + -> Merge Append (actual rows=0 loops=1) + Sort Key: _hyper_6_22_chunk."time" + -> Index Only Scan Backward using _hyper_6_22_chunk_metrics_space_time_idx on _hyper_6_22_chunk (actual rows=0 loops=1) + Index Cond: ("time" > 'Mon Jan 10 00:00:00 2000'::timestamp without time zone) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_6_23_chunk_metrics_space_time_idx on _hyper_6_23_chunk (actual rows=0 loops=1) + Index Cond: ("time" > 'Mon Jan 10 00:00:00 2000'::timestamp without time zone) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_6_24_chunk_metrics_space_time_idx on _hyper_6_24_chunk (actual rows=0 loops=1) + Index Cond: ("time" > 'Mon Jan 10 00:00:00 2000'::timestamp without time zone) + Heap Fetches: 0 + -> Merge Append (actual rows=7670 loops=1) + Sort Key: _hyper_6_25_chunk."time" + -> Index Only Scan Backward using _hyper_6_25_chunk_metrics_space_time_idx on _hyper_6_25_chunk (actual rows=3068 loops=1) + Index Cond: ("time" > 'Mon Jan 10 00:00:00 2000'::timestamp without time zone) + Heap Fetches: 3068 + -> Index Only Scan Backward using _hyper_6_26_chunk_metrics_space_time_idx on _hyper_6_26_chunk (actual rows=3068 loops=1) + Index Cond: ("time" > 'Mon Jan 10 00:00:00 2000'::timestamp without time zone) + Heap Fetches: 3068 + -> Index Only Scan Backward using _hyper_6_27_chunk_metrics_space_time_idx on _hyper_6_27_chunk (actual rows=1534 loops=1) + Index Cond: ("time" > 'Mon Jan 10 00:00:00 2000'::timestamp without time zone) + Heap Fetches: 1534 + -> Merge Append (actual rows=3850 loops=1) + Sort Key: _hyper_6_28_chunk."time" + -> Index Only Scan Backward using _hyper_6_28_chunk_metrics_space_time_idx on _hyper_6_28_chunk (actual rows=1540 loops=1) + Index Cond: ("time" > 'Mon Jan 10 00:00:00 2000'::timestamp without time zone) + Heap Fetches: 1540 + -> Index Only Scan Backward using _hyper_6_29_chunk_metrics_space_time_idx on _hyper_6_29_chunk (actual rows=1540 loops=1) + Index Cond: ("time" > 'Mon Jan 10 00:00:00 2000'::timestamp without time zone) + Heap Fetches: 1540 + -> Index Only Scan Backward using _hyper_6_30_chunk_metrics_space_time_idx on _hyper_6_30_chunk (actual rows=770 loops=1) + Index Cond: ("time" > 'Mon Jan 10 00:00:00 2000'::timestamp without time zone) + Heap Fetches: 770 +(35 rows) + +:PREFIX SELECT time FROM metrics_space WHERE '2000-01-10'::timestamptz < time ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_space (actual rows=11520 loops=1) + Order: metrics_space."time" + -> Merge Append (actual rows=7670 loops=1) + Sort Key: _hyper_6_25_chunk."time" + -> Index Only Scan Backward using _hyper_6_25_chunk_metrics_space_time_idx on _hyper_6_25_chunk (actual rows=3068 loops=1) + Index Cond: ("time" > 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 3068 + -> Index Only Scan Backward using _hyper_6_26_chunk_metrics_space_time_idx on _hyper_6_26_chunk (actual rows=3068 loops=1) + Index Cond: ("time" > 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 3068 + -> Index Only Scan Backward using _hyper_6_27_chunk_metrics_space_time_idx on _hyper_6_27_chunk (actual rows=1534 loops=1) + Index Cond: ("time" > 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 1534 + -> Merge Append (actual rows=3850 loops=1) + Sort Key: _hyper_6_28_chunk."time" + -> Index Only Scan Backward using _hyper_6_28_chunk_metrics_space_time_idx on _hyper_6_28_chunk (actual rows=1540 loops=1) + Index Cond: ("time" > 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 1540 + -> Index Only Scan Backward using _hyper_6_29_chunk_metrics_space_time_idx on _hyper_6_29_chunk (actual rows=1540 loops=1) + Index Cond: ("time" > 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 1540 + -> Index Only Scan Backward using _hyper_6_30_chunk_metrics_space_time_idx on _hyper_6_30_chunk (actual rows=770 loops=1) + Index Cond: ("time" > 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 770 +(24 rows) + +-- test 2 constraints +-- exclusion for constraints with non-matching datatypes not working for space partitioning atm +:PREFIX SELECT time FROM metrics_space WHERE time > '2000-01-10'::date AND time < '2000-01-15'::date ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_space (actual rows=11520 loops=1) + Order: metrics_space."time" + -> Merge Append (actual rows=0 loops=1) + Sort Key: _hyper_6_22_chunk."time" + -> Index Only Scan Backward using _hyper_6_22_chunk_metrics_space_time_idx on _hyper_6_22_chunk (actual rows=0 loops=1) + Index Cond: (("time" > '01-10-2000'::date) AND ("time" < '01-15-2000'::date)) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_6_23_chunk_metrics_space_time_idx on _hyper_6_23_chunk (actual rows=0 loops=1) + Index Cond: (("time" > '01-10-2000'::date) AND ("time" < '01-15-2000'::date)) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_6_24_chunk_metrics_space_time_idx on _hyper_6_24_chunk (actual rows=0 loops=1) + Index Cond: (("time" > '01-10-2000'::date) AND ("time" < '01-15-2000'::date)) + Heap Fetches: 0 + -> Merge Append (actual rows=7670 loops=1) + Sort Key: _hyper_6_25_chunk."time" + -> Index Only Scan Backward using _hyper_6_25_chunk_metrics_space_time_idx on _hyper_6_25_chunk (actual rows=3068 loops=1) + Index Cond: (("time" > '01-10-2000'::date) AND ("time" < '01-15-2000'::date)) + Heap Fetches: 3068 + -> Index Only Scan Backward using _hyper_6_26_chunk_metrics_space_time_idx on _hyper_6_26_chunk (actual rows=3068 loops=1) + Index Cond: (("time" > '01-10-2000'::date) AND ("time" < '01-15-2000'::date)) + Heap Fetches: 3068 + -> Index Only Scan Backward using _hyper_6_27_chunk_metrics_space_time_idx on _hyper_6_27_chunk (actual rows=1534 loops=1) + Index Cond: (("time" > '01-10-2000'::date) AND ("time" < '01-15-2000'::date)) + Heap Fetches: 1534 + -> Merge Append (actual rows=3850 loops=1) + Sort Key: _hyper_6_28_chunk."time" + -> Index Only Scan Backward using _hyper_6_28_chunk_metrics_space_time_idx on _hyper_6_28_chunk (actual rows=1540 loops=1) + Index Cond: (("time" > '01-10-2000'::date) AND ("time" < '01-15-2000'::date)) + Heap Fetches: 1540 + -> Index Only Scan Backward using _hyper_6_29_chunk_metrics_space_time_idx on _hyper_6_29_chunk (actual rows=1540 loops=1) + Index Cond: (("time" > '01-10-2000'::date) AND ("time" < '01-15-2000'::date)) + Heap Fetches: 1540 + -> Index Only Scan Backward using _hyper_6_30_chunk_metrics_space_time_idx on _hyper_6_30_chunk (actual rows=770 loops=1) + Index Cond: (("time" > '01-10-2000'::date) AND ("time" < '01-15-2000'::date)) + Heap Fetches: 770 +(35 rows) + +:PREFIX SELECT time FROM metrics_space WHERE time > '2000-01-10'::timestamp AND time < '2000-01-15'::timestamp ORDER BY time; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_space (actual rows=11520 loops=1) + Order: metrics_space."time" + -> Merge Append (actual rows=0 loops=1) + Sort Key: _hyper_6_22_chunk."time" + -> Index Only Scan Backward using _hyper_6_22_chunk_metrics_space_time_idx on _hyper_6_22_chunk (actual rows=0 loops=1) + Index Cond: (("time" > 'Mon Jan 10 00:00:00 2000'::timestamp without time zone) AND ("time" < 'Sat Jan 15 00:00:00 2000'::timestamp without time zone)) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_6_23_chunk_metrics_space_time_idx on _hyper_6_23_chunk (actual rows=0 loops=1) + Index Cond: (("time" > 'Mon Jan 10 00:00:00 2000'::timestamp without time zone) AND ("time" < 'Sat Jan 15 00:00:00 2000'::timestamp without time zone)) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_6_24_chunk_metrics_space_time_idx on _hyper_6_24_chunk (actual rows=0 loops=1) + Index Cond: (("time" > 'Mon Jan 10 00:00:00 2000'::timestamp without time zone) AND ("time" < 'Sat Jan 15 00:00:00 2000'::timestamp without time zone)) + Heap Fetches: 0 + -> Merge Append (actual rows=7670 loops=1) + Sort Key: _hyper_6_25_chunk."time" + -> Index Only Scan Backward using _hyper_6_25_chunk_metrics_space_time_idx on _hyper_6_25_chunk (actual rows=3068 loops=1) + Index Cond: (("time" > 'Mon Jan 10 00:00:00 2000'::timestamp without time zone) AND ("time" < 'Sat Jan 15 00:00:00 2000'::timestamp without time zone)) + Heap Fetches: 3068 + -> Index Only Scan Backward using _hyper_6_26_chunk_metrics_space_time_idx on _hyper_6_26_chunk (actual rows=3068 loops=1) + Index Cond: (("time" > 'Mon Jan 10 00:00:00 2000'::timestamp without time zone) AND ("time" < 'Sat Jan 15 00:00:00 2000'::timestamp without time zone)) + Heap Fetches: 3068 + -> Index Only Scan Backward using _hyper_6_27_chunk_metrics_space_time_idx on _hyper_6_27_chunk (actual rows=1534 loops=1) + Index Cond: (("time" > 'Mon Jan 10 00:00:00 2000'::timestamp without time zone) AND ("time" < 'Sat Jan 15 00:00:00 2000'::timestamp without time zone)) + Heap Fetches: 1534 + -> Merge Append (actual rows=3850 loops=1) + Sort Key: _hyper_6_28_chunk."time" + -> Index Only Scan Backward using _hyper_6_28_chunk_metrics_space_time_idx on _hyper_6_28_chunk (actual rows=1540 loops=1) + Index Cond: (("time" > 'Mon Jan 10 00:00:00 2000'::timestamp without time zone) AND ("time" < 'Sat Jan 15 00:00:00 2000'::timestamp without time zone)) + Heap Fetches: 1540 + -> Index Only Scan Backward using _hyper_6_29_chunk_metrics_space_time_idx on _hyper_6_29_chunk (actual rows=1540 loops=1) + Index Cond: (("time" > 'Mon Jan 10 00:00:00 2000'::timestamp without time zone) AND ("time" < 'Sat Jan 15 00:00:00 2000'::timestamp without time zone)) + Heap Fetches: 1540 + -> Index Only Scan Backward using _hyper_6_30_chunk_metrics_space_time_idx on _hyper_6_30_chunk (actual rows=770 loops=1) + Index Cond: (("time" > 'Mon Jan 10 00:00:00 2000'::timestamp without time zone) AND ("time" < 'Sat Jan 15 00:00:00 2000'::timestamp without time zone)) + Heap Fetches: 770 +(35 rows) + +:PREFIX SELECT time FROM metrics_space WHERE time > '2000-01-10'::timestamptz AND time < '2000-01-15'::timestamptz ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_space (actual rows=11520 loops=1) + Order: metrics_space."time" + -> Merge Append (actual rows=7670 loops=1) + Sort Key: _hyper_6_25_chunk."time" + -> Index Only Scan Backward using _hyper_6_25_chunk_metrics_space_time_idx on _hyper_6_25_chunk (actual rows=3068 loops=1) + Index Cond: (("time" > 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Sat Jan 15 00:00:00 2000 PST'::timestamp with time zone)) + Heap Fetches: 3068 + -> Index Only Scan Backward using _hyper_6_26_chunk_metrics_space_time_idx on _hyper_6_26_chunk (actual rows=3068 loops=1) + Index Cond: (("time" > 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Sat Jan 15 00:00:00 2000 PST'::timestamp with time zone)) + Heap Fetches: 3068 + -> Index Only Scan Backward using _hyper_6_27_chunk_metrics_space_time_idx on _hyper_6_27_chunk (actual rows=1534 loops=1) + Index Cond: (("time" > 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Sat Jan 15 00:00:00 2000 PST'::timestamp with time zone)) + Heap Fetches: 1534 + -> Merge Append (actual rows=3850 loops=1) + Sort Key: _hyper_6_28_chunk."time" + -> Index Only Scan Backward using _hyper_6_28_chunk_metrics_space_time_idx on _hyper_6_28_chunk (actual rows=1540 loops=1) + Index Cond: (("time" > 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Sat Jan 15 00:00:00 2000 PST'::timestamp with time zone)) + Heap Fetches: 1540 + -> Index Only Scan Backward using _hyper_6_29_chunk_metrics_space_time_idx on _hyper_6_29_chunk (actual rows=1540 loops=1) + Index Cond: (("time" > 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Sat Jan 15 00:00:00 2000 PST'::timestamp with time zone)) + Heap Fetches: 1540 + -> Index Only Scan Backward using _hyper_6_30_chunk_metrics_space_time_idx on _hyper_6_30_chunk (actual rows=770 loops=1) + Index Cond: (("time" > 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Sat Jan 15 00:00:00 2000 PST'::timestamp with time zone)) + Heap Fetches: 770 +(24 rows) + +-- test filtering on space partition +:PREFIX SELECT time FROM metrics_space WHERE time > '2000-01-10'::timestamptz AND device_id = 1 ORDER BY time; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_space (actual rows=1152 loops=1) + Order: metrics_space."time" + -> Index Only Scan Backward using _hyper_6_25_chunk_metrics_space_device_id_time_idx on _hyper_6_25_chunk (actual rows=767 loops=1) + Index Cond: ((device_id = 1) AND ("time" > 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + Heap Fetches: 767 + -> Index Only Scan Backward using _hyper_6_28_chunk_metrics_space_device_id_time_idx on _hyper_6_28_chunk (actual rows=385 loops=1) + Index Cond: ((device_id = 1) AND ("time" > 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + Heap Fetches: 385 +(8 rows) + +:PREFIX SELECT time FROM metrics_space WHERE time > '2000-01-10'::timestamptz AND device_id IN (1,2) ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_space (actual rows=2304 loops=1) + Order: metrics_space."time" + -> Merge Append (actual rows=1534 loops=1) + Sort Key: _hyper_6_25_chunk."time" + -> Index Scan Backward using _hyper_6_25_chunk_metrics_space_time_idx on _hyper_6_25_chunk (actual rows=767 loops=1) + Index Cond: ("time" > 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 2301 + -> Index Scan Backward using _hyper_6_26_chunk_metrics_space_time_idx on _hyper_6_26_chunk (actual rows=767 loops=1) + Index Cond: ("time" > 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 2301 + -> Merge Append (actual rows=770 loops=1) + Sort Key: _hyper_6_28_chunk."time" + -> Index Scan Backward using _hyper_6_28_chunk_metrics_space_time_idx on _hyper_6_28_chunk (actual rows=385 loops=1) + Index Cond: ("time" > 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 1155 + -> Index Scan Backward using _hyper_6_29_chunk_metrics_space_time_idx on _hyper_6_29_chunk (actual rows=385 loops=1) + Index Cond: ("time" > 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 1155 +(22 rows) + +:PREFIX SELECT time FROM metrics_space WHERE time > '2000-01-10'::timestamptz AND device_id IN (VALUES(1)) ORDER BY time; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_space (actual rows=1152 loops=1) + Order: metrics_space."time" + -> Index Only Scan Backward using _hyper_6_25_chunk_metrics_space_device_id_time_idx on _hyper_6_25_chunk (actual rows=767 loops=1) + Index Cond: ((device_id = 1) AND ("time" > 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + Heap Fetches: 767 + -> Index Only Scan Backward using _hyper_6_28_chunk_metrics_space_device_id_time_idx on _hyper_6_28_chunk (actual rows=385 loops=1) + Index Cond: ((device_id = 1) AND ("time" > 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + Heap Fetches: 385 +(8 rows) + +:PREFIX SELECT time FROM metrics_space WHERE time > '2000-01-10'::timestamptz AND v3 IN (VALUES('1')) ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_space (actual rows=1152 loops=1) + Order: metrics_space."time" + -> Merge Append (actual rows=767 loops=1) + Sort Key: _hyper_6_25_chunk."time" + -> Index Scan Backward using _hyper_6_25_chunk_metrics_space_time_idx on _hyper_6_25_chunk (actual rows=767 loops=1) + Index Cond: ("time" > 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Filter: (v3 = '1'::text) + Rows Removed by Filter: 2301 + -> Index Scan Backward using _hyper_6_26_chunk_metrics_space_time_idx on _hyper_6_26_chunk (actual rows=0 loops=1) + Index Cond: ("time" > 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Filter: (v3 = '1'::text) + Rows Removed by Filter: 3068 + -> Index Scan Backward using _hyper_6_27_chunk_metrics_space_time_idx on _hyper_6_27_chunk (actual rows=0 loops=1) + Index Cond: ("time" > 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Filter: (v3 = '1'::text) + Rows Removed by Filter: 1534 + -> Merge Append (actual rows=385 loops=1) + Sort Key: _hyper_6_28_chunk."time" + -> Index Scan Backward using _hyper_6_28_chunk_metrics_space_time_idx on _hyper_6_28_chunk (actual rows=385 loops=1) + Index Cond: ("time" > 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Filter: (v3 = '1'::text) + Rows Removed by Filter: 1155 + -> Index Scan Backward using _hyper_6_29_chunk_metrics_space_time_idx on _hyper_6_29_chunk (actual rows=0 loops=1) + Index Cond: ("time" > 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Filter: (v3 = '1'::text) + Rows Removed by Filter: 1540 + -> Index Scan Backward using _hyper_6_30_chunk_metrics_space_time_idx on _hyper_6_30_chunk (actual rows=0 loops=1) + Index Cond: ("time" > 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Filter: (v3 = '1'::text) + Rows Removed by Filter: 770 +(30 rows) + +:PREFIX SELECT * FROM metrics_space +WHERE time = (VALUES ('2019-12-24' at time zone 'UTC')) + AND v3 NOT IN (VALUES ('1')); + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_space (actual rows=0 loops=1) + Chunks excluded during startup: 0 + Chunks excluded during runtime: 9 + InitPlan 1 (returns $0) + -> Result (actual rows=1 loops=1) + -> Index Scan using _hyper_6_22_chunk_metrics_space_time_idx on _hyper_6_22_chunk (never executed) + Index Cond: ("time" = $0) + Filter: (NOT (hashed SubPlan 2)) + SubPlan 2 + -> Result (never executed) + -> Index Scan using _hyper_6_23_chunk_metrics_space_time_idx on _hyper_6_23_chunk (never executed) + Index Cond: ("time" = $0) + Filter: (NOT (hashed SubPlan 2)) + -> Index Scan using _hyper_6_24_chunk_metrics_space_time_idx on _hyper_6_24_chunk (never executed) + Index Cond: ("time" = $0) + Filter: (NOT (hashed SubPlan 2)) + -> Index Scan using _hyper_6_25_chunk_metrics_space_time_idx on _hyper_6_25_chunk (never executed) + Index Cond: ("time" = $0) + Filter: (NOT (hashed SubPlan 2)) + -> Index Scan using _hyper_6_26_chunk_metrics_space_time_idx on _hyper_6_26_chunk (never executed) + Index Cond: ("time" = $0) + Filter: (NOT (hashed SubPlan 2)) + -> Index Scan using _hyper_6_27_chunk_metrics_space_time_idx on _hyper_6_27_chunk (never executed) + Index Cond: ("time" = $0) + Filter: (NOT (hashed SubPlan 2)) + -> Index Scan using _hyper_6_28_chunk_metrics_space_time_idx on _hyper_6_28_chunk (never executed) + Index Cond: ("time" = $0) + Filter: (NOT (hashed SubPlan 2)) + -> Index Scan using _hyper_6_29_chunk_metrics_space_time_idx on _hyper_6_29_chunk (never executed) + Index Cond: ("time" = $0) + Filter: (NOT (hashed SubPlan 2)) + -> Index Scan using _hyper_6_30_chunk_metrics_space_time_idx on _hyper_6_30_chunk (never executed) + Index Cond: ("time" = $0) + Filter: (NOT (hashed SubPlan 2)) +(34 rows) + +-- test CURRENT_DATE +-- should be 0 chunks +:PREFIX SELECT time FROM metrics_date WHERE time > CURRENT_DATE ORDER BY time; + QUERY PLAN +------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_date (actual rows=0 loops=1) + Order: metrics_date."time" + Chunks excluded during startup: 5 +(3 rows) + +:PREFIX SELECT time FROM metrics_timestamp WHERE time > CURRENT_DATE ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------ + Custom Scan (ChunkAppend) on metrics_timestamp (actual rows=0 loops=1) + Order: metrics_timestamp."time" + Chunks excluded during startup: 5 +(3 rows) + +:PREFIX SELECT time FROM metrics_timestamptz WHERE time > CURRENT_DATE ORDER BY time; + QUERY PLAN +-------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_timestamptz (actual rows=0 loops=1) + Order: metrics_timestamptz."time" + Chunks excluded during startup: 5 +(3 rows) + +:PREFIX SELECT time FROM metrics_space WHERE time > CURRENT_DATE ORDER BY time; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_space (actual rows=0 loops=1) + Order: metrics_space."time" + -> Merge Append (actual rows=0 loops=1) + Sort Key: _hyper_6_22_chunk."time" + -> Index Only Scan Backward using _hyper_6_22_chunk_metrics_space_time_idx on _hyper_6_22_chunk (actual rows=0 loops=1) + Index Cond: ("time" > CURRENT_DATE) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_6_23_chunk_metrics_space_time_idx on _hyper_6_23_chunk (actual rows=0 loops=1) + Index Cond: ("time" > CURRENT_DATE) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_6_24_chunk_metrics_space_time_idx on _hyper_6_24_chunk (actual rows=0 loops=1) + Index Cond: ("time" > CURRENT_DATE) + Heap Fetches: 0 + -> Merge Append (actual rows=0 loops=1) + Sort Key: _hyper_6_25_chunk."time" + -> Index Only Scan Backward using _hyper_6_25_chunk_metrics_space_time_idx on _hyper_6_25_chunk (actual rows=0 loops=1) + Index Cond: ("time" > CURRENT_DATE) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_6_26_chunk_metrics_space_time_idx on _hyper_6_26_chunk (actual rows=0 loops=1) + Index Cond: ("time" > CURRENT_DATE) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_6_27_chunk_metrics_space_time_idx on _hyper_6_27_chunk (actual rows=0 loops=1) + Index Cond: ("time" > CURRENT_DATE) + Heap Fetches: 0 + -> Merge Append (actual rows=0 loops=1) + Sort Key: _hyper_6_28_chunk."time" + -> Index Only Scan Backward using _hyper_6_28_chunk_metrics_space_time_idx on _hyper_6_28_chunk (actual rows=0 loops=1) + Index Cond: ("time" > CURRENT_DATE) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_6_29_chunk_metrics_space_time_idx on _hyper_6_29_chunk (actual rows=0 loops=1) + Index Cond: ("time" > CURRENT_DATE) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_6_30_chunk_metrics_space_time_idx on _hyper_6_30_chunk (actual rows=0 loops=1) + Index Cond: ("time" > CURRENT_DATE) + Heap Fetches: 0 +(35 rows) + +-- test CURRENT_TIMESTAMP +-- should be 0 chunks +:PREFIX SELECT time FROM metrics_date WHERE time > CURRENT_TIMESTAMP ORDER BY time; + QUERY PLAN +------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_date (actual rows=0 loops=1) + Order: metrics_date."time" + Chunks excluded during startup: 5 +(3 rows) + +:PREFIX SELECT time FROM metrics_timestamp WHERE time > CURRENT_TIMESTAMP ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------ + Custom Scan (ChunkAppend) on metrics_timestamp (actual rows=0 loops=1) + Order: metrics_timestamp."time" + Chunks excluded during startup: 5 +(3 rows) + +:PREFIX SELECT time FROM metrics_timestamptz WHERE time > CURRENT_TIMESTAMP ORDER BY time; + QUERY PLAN +-------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_timestamptz (actual rows=0 loops=1) + Order: metrics_timestamptz."time" + Chunks excluded during startup: 5 +(3 rows) + +:PREFIX SELECT time FROM metrics_space WHERE time > CURRENT_TIMESTAMP ORDER BY time; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_space (actual rows=0 loops=1) + Order: metrics_space."time" + -> Merge Append (actual rows=0 loops=1) + Sort Key: _hyper_6_22_chunk."time" + -> Index Only Scan Backward using _hyper_6_22_chunk_metrics_space_time_idx on _hyper_6_22_chunk (actual rows=0 loops=1) + Index Cond: ("time" > CURRENT_TIMESTAMP) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_6_23_chunk_metrics_space_time_idx on _hyper_6_23_chunk (actual rows=0 loops=1) + Index Cond: ("time" > CURRENT_TIMESTAMP) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_6_24_chunk_metrics_space_time_idx on _hyper_6_24_chunk (actual rows=0 loops=1) + Index Cond: ("time" > CURRENT_TIMESTAMP) + Heap Fetches: 0 + -> Merge Append (actual rows=0 loops=1) + Sort Key: _hyper_6_25_chunk."time" + -> Index Only Scan Backward using _hyper_6_25_chunk_metrics_space_time_idx on _hyper_6_25_chunk (actual rows=0 loops=1) + Index Cond: ("time" > CURRENT_TIMESTAMP) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_6_26_chunk_metrics_space_time_idx on _hyper_6_26_chunk (actual rows=0 loops=1) + Index Cond: ("time" > CURRENT_TIMESTAMP) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_6_27_chunk_metrics_space_time_idx on _hyper_6_27_chunk (actual rows=0 loops=1) + Index Cond: ("time" > CURRENT_TIMESTAMP) + Heap Fetches: 0 + -> Merge Append (actual rows=0 loops=1) + Sort Key: _hyper_6_28_chunk."time" + -> Index Only Scan Backward using _hyper_6_28_chunk_metrics_space_time_idx on _hyper_6_28_chunk (actual rows=0 loops=1) + Index Cond: ("time" > CURRENT_TIMESTAMP) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_6_29_chunk_metrics_space_time_idx on _hyper_6_29_chunk (actual rows=0 loops=1) + Index Cond: ("time" > CURRENT_TIMESTAMP) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_6_30_chunk_metrics_space_time_idx on _hyper_6_30_chunk (actual rows=0 loops=1) + Index Cond: ("time" > CURRENT_TIMESTAMP) + Heap Fetches: 0 +(35 rows) + +-- test now() +-- should be 0 chunks +:PREFIX SELECT time FROM metrics_date WHERE time > now() ORDER BY time; + QUERY PLAN +------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_date (actual rows=0 loops=1) + Order: metrics_date."time" + Chunks excluded during startup: 5 +(3 rows) + +:PREFIX SELECT time FROM metrics_timestamp WHERE time > now() ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------ + Custom Scan (ChunkAppend) on metrics_timestamp (actual rows=0 loops=1) + Order: metrics_timestamp."time" + Chunks excluded during startup: 5 +(3 rows) + +:PREFIX SELECT time FROM metrics_timestamptz WHERE time > now() ORDER BY time; + QUERY PLAN +-------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_timestamptz (actual rows=0 loops=1) + Order: metrics_timestamptz."time" + Chunks excluded during startup: 5 +(3 rows) + +:PREFIX SELECT time FROM metrics_space WHERE time > now() ORDER BY time; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_space (actual rows=0 loops=1) + Order: metrics_space."time" + -> Merge Append (actual rows=0 loops=1) + Sort Key: _hyper_6_22_chunk."time" + -> Index Only Scan Backward using _hyper_6_22_chunk_metrics_space_time_idx on _hyper_6_22_chunk (actual rows=0 loops=1) + Index Cond: ("time" > now()) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_6_23_chunk_metrics_space_time_idx on _hyper_6_23_chunk (actual rows=0 loops=1) + Index Cond: ("time" > now()) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_6_24_chunk_metrics_space_time_idx on _hyper_6_24_chunk (actual rows=0 loops=1) + Index Cond: ("time" > now()) + Heap Fetches: 0 + -> Merge Append (actual rows=0 loops=1) + Sort Key: _hyper_6_25_chunk."time" + -> Index Only Scan Backward using _hyper_6_25_chunk_metrics_space_time_idx on _hyper_6_25_chunk (actual rows=0 loops=1) + Index Cond: ("time" > now()) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_6_26_chunk_metrics_space_time_idx on _hyper_6_26_chunk (actual rows=0 loops=1) + Index Cond: ("time" > now()) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_6_27_chunk_metrics_space_time_idx on _hyper_6_27_chunk (actual rows=0 loops=1) + Index Cond: ("time" > now()) + Heap Fetches: 0 + -> Merge Append (actual rows=0 loops=1) + Sort Key: _hyper_6_28_chunk."time" + -> Index Only Scan Backward using _hyper_6_28_chunk_metrics_space_time_idx on _hyper_6_28_chunk (actual rows=0 loops=1) + Index Cond: ("time" > now()) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_6_29_chunk_metrics_space_time_idx on _hyper_6_29_chunk (actual rows=0 loops=1) + Index Cond: ("time" > now()) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_6_30_chunk_metrics_space_time_idx on _hyper_6_30_chunk (actual rows=0 loops=1) + Index Cond: ("time" > now()) + Heap Fetches: 0 +(35 rows) + +-- query with tablesample and planner exclusion +:PREFIX +SELECT * FROM metrics_date TABLESAMPLE BERNOULLI(5) REPEATABLE(0) +WHERE time > '2000-01-15' +ORDER BY time DESC; + QUERY PLAN +---------------------------------------------------------------------------------- + Sort (actual rows=217 loops=1) + Sort Key: _hyper_3_11_chunk."time" DESC + Sort Method: quicksort + -> Append (actual rows=217 loops=1) + -> Sample Scan on _hyper_3_11_chunk (actual rows=72 loops=1) + Sampling: bernoulli ('5'::real) REPEATABLE ('0'::double precision) + Filter: ("time" > '01-15-2000'::date) + -> Sample Scan on _hyper_3_10_chunk (actual rows=94 loops=1) + Sampling: bernoulli ('5'::real) REPEATABLE ('0'::double precision) + Filter: ("time" > '01-15-2000'::date) + -> Sample Scan on _hyper_3_9_chunk (actual rows=51 loops=1) + Sampling: bernoulli ('5'::real) REPEATABLE ('0'::double precision) + Filter: ("time" > '01-15-2000'::date) + Rows Removed by Filter: 43 +(14 rows) + +-- query with tablesample and startup exclusion +:PREFIX +SELECT * FROM metrics_date TABLESAMPLE BERNOULLI(5) REPEATABLE(0) +WHERE time > '2000-01-15'::text::date +ORDER BY time DESC; + QUERY PLAN +---------------------------------------------------------------------------------- + Sort (actual rows=217 loops=1) + Sort Key: metrics_date."time" DESC + Sort Method: quicksort + -> Custom Scan (ChunkAppend) on metrics_date (actual rows=217 loops=1) + Chunks excluded during startup: 2 + -> Sample Scan on _hyper_3_11_chunk (actual rows=72 loops=1) + Sampling: bernoulli ('5'::real) REPEATABLE ('0'::double precision) + Filter: ("time" > ('2000-01-15'::cstring)::date) + -> Sample Scan on _hyper_3_10_chunk (actual rows=94 loops=1) + Sampling: bernoulli ('5'::real) REPEATABLE ('0'::double precision) + Filter: ("time" > ('2000-01-15'::cstring)::date) + -> Sample Scan on _hyper_3_9_chunk (actual rows=51 loops=1) + Sampling: bernoulli ('5'::real) REPEATABLE ('0'::double precision) + Filter: ("time" > ('2000-01-15'::cstring)::date) + Rows Removed by Filter: 43 +(15 rows) + +-- query with tablesample, space partitioning and planner exclusion +:PREFIX +SELECT * FROM metrics_space TABLESAMPLE BERNOULLI(5) REPEATABLE(0) +WHERE time > '2000-01-10'::timestamptz +ORDER BY time DESC, device_id; + QUERY PLAN +------------------------------------------------------------------------------------------- + Sort (actual rows=522 loops=1) + Sort Key: _hyper_6_30_chunk."time" DESC, _hyper_6_30_chunk.device_id + Sort Method: quicksort + -> Append (actual rows=522 loops=1) + -> Sample Scan on _hyper_6_30_chunk (actual rows=35 loops=1) + Sampling: bernoulli ('5'::real) REPEATABLE ('0'::double precision) + Filter: ("time" > 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Sample Scan on _hyper_6_29_chunk (actual rows=61 loops=1) + Sampling: bernoulli ('5'::real) REPEATABLE ('0'::double precision) + Filter: ("time" > 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Sample Scan on _hyper_6_28_chunk (actual rows=61 loops=1) + Sampling: bernoulli ('5'::real) REPEATABLE ('0'::double precision) + Filter: ("time" > 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Sample Scan on _hyper_6_27_chunk (actual rows=65 loops=1) + Sampling: bernoulli ('5'::real) REPEATABLE ('0'::double precision) + Filter: ("time" > 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 113 + -> Sample Scan on _hyper_6_26_chunk (actual rows=150 loops=1) + Sampling: bernoulli ('5'::real) REPEATABLE ('0'::double precision) + Filter: ("time" > 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 218 + -> Sample Scan on _hyper_6_25_chunk (actual rows=150 loops=1) + Sampling: bernoulli ('5'::real) REPEATABLE ('0'::double precision) + Filter: ("time" > 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 218 +(25 rows) + +-- test runtime exclusion +-- test runtime exclusion with LATERAL and 2 hypertables +:PREFIX SELECT m1.time, m2.time FROM metrics_timestamptz m1 LEFT JOIN LATERAL(SELECT time FROM metrics_timestamptz m2 WHERE m1.time = m2.time LIMIT 1) m2 ON true ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------ + Nested Loop Left Join (actual rows=26787 loops=1) + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 (actual rows=26787 loops=1) + Order: m1."time" + -> Index Only Scan Backward using _hyper_5_17_chunk_metrics_timestamptz_time_idx on _hyper_5_17_chunk m1_1 (actual rows=4032 loops=1) + Heap Fetches: 4032 + -> Index Only Scan Backward using _hyper_5_18_chunk_metrics_timestamptz_time_idx on _hyper_5_18_chunk m1_2 (actual rows=6048 loops=1) + Heap Fetches: 6048 + -> Index Only Scan Backward using _hyper_5_19_chunk_metrics_timestamptz_time_idx on _hyper_5_19_chunk m1_3 (actual rows=6048 loops=1) + Heap Fetches: 6048 + -> Index Only Scan Backward using _hyper_5_20_chunk_metrics_timestamptz_time_idx on _hyper_5_20_chunk m1_4 (actual rows=6048 loops=1) + Heap Fetches: 6048 + -> Index Only Scan Backward using _hyper_5_21_chunk_metrics_timestamptz_time_idx on _hyper_5_21_chunk m1_5 (actual rows=4611 loops=1) + Heap Fetches: 4611 + -> Limit (actual rows=1 loops=26787) + -> Custom Scan (ChunkAppend) on metrics_timestamptz m2 (actual rows=1 loops=26787) + Chunks excluded during runtime: 4 + -> Index Only Scan using _hyper_5_17_chunk_metrics_timestamptz_time_idx on _hyper_5_17_chunk m2_1 (actual rows=1 loops=4032) + Index Cond: ("time" = m1."time") + Heap Fetches: 4032 + -> Index Only Scan using _hyper_5_18_chunk_metrics_timestamptz_time_idx on _hyper_5_18_chunk m2_2 (actual rows=1 loops=6048) + Index Cond: ("time" = m1."time") + Heap Fetches: 6048 + -> Index Only Scan using _hyper_5_19_chunk_metrics_timestamptz_time_idx on _hyper_5_19_chunk m2_3 (actual rows=1 loops=6048) + Index Cond: ("time" = m1."time") + Heap Fetches: 6048 + -> Index Only Scan using _hyper_5_20_chunk_metrics_timestamptz_time_idx on _hyper_5_20_chunk m2_4 (actual rows=1 loops=6048) + Index Cond: ("time" = m1."time") + Heap Fetches: 6048 + -> Index Only Scan using _hyper_5_21_chunk_metrics_timestamptz_time_idx on _hyper_5_21_chunk m2_5 (actual rows=1 loops=4611) + Index Cond: ("time" = m1."time") + Heap Fetches: 4611 +(31 rows) + +-- test runtime exclusion and startup exclusions +:PREFIX SELECT m1.time, m2.time FROM metrics_timestamptz m1 LEFT JOIN LATERAL(SELECT time FROM metrics_timestamptz m2 WHERE m1.time = m2.time AND m2.time < '2000-01-10'::text::timestamptz LIMIT 1) m2 ON true ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------ + Nested Loop Left Join (actual rows=26787 loops=1) + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 (actual rows=26787 loops=1) + Order: m1."time" + -> Index Only Scan Backward using _hyper_5_17_chunk_metrics_timestamptz_time_idx on _hyper_5_17_chunk m1_1 (actual rows=4032 loops=1) + Heap Fetches: 4032 + -> Index Only Scan Backward using _hyper_5_18_chunk_metrics_timestamptz_time_idx on _hyper_5_18_chunk m1_2 (actual rows=6048 loops=1) + Heap Fetches: 6048 + -> Index Only Scan Backward using _hyper_5_19_chunk_metrics_timestamptz_time_idx on _hyper_5_19_chunk m1_3 (actual rows=6048 loops=1) + Heap Fetches: 6048 + -> Index Only Scan Backward using _hyper_5_20_chunk_metrics_timestamptz_time_idx on _hyper_5_20_chunk m1_4 (actual rows=6048 loops=1) + Heap Fetches: 6048 + -> Index Only Scan Backward using _hyper_5_21_chunk_metrics_timestamptz_time_idx on _hyper_5_21_chunk m1_5 (actual rows=4611 loops=1) + Heap Fetches: 4611 + -> Limit (actual rows=0 loops=26787) + -> Custom Scan (ChunkAppend) on metrics_timestamptz m2 (actual rows=0 loops=26787) + Chunks excluded during startup: 3 + Chunks excluded during runtime: 1 + -> Index Only Scan using _hyper_5_17_chunk_metrics_timestamptz_time_idx on _hyper_5_17_chunk m2_1 (actual rows=1 loops=4032) + Index Cond: (("time" < ('2000-01-10'::cstring)::timestamp with time zone) AND ("time" = m1."time")) + Heap Fetches: 4032 + -> Index Only Scan using _hyper_5_18_chunk_metrics_timestamptz_time_idx on _hyper_5_18_chunk m2_2 (actual rows=1 loops=6048) + Index Cond: (("time" < ('2000-01-10'::cstring)::timestamp with time zone) AND ("time" = m1."time")) + Heap Fetches: 3744 +(23 rows) + +-- test runtime exclusion does not activate for constraints on non-partitioning columns +-- should not use runtime exclusion +:PREFIX SELECT * FROM append_test a LEFT JOIN LATERAL(SELECT * FROM join_test j WHERE a.colorid = j.colorid ORDER BY time DESC LIMIT 1) j ON true ORDER BY a.time LIMIT 1; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=1 loops=1) + -> Nested Loop Left Join (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on append_test a (actual rows=1 loops=1) + Order: a."time" + -> Index Scan Backward using _hyper_1_1_chunk_append_test_time_idx on _hyper_1_1_chunk a_1 (actual rows=1 loops=1) + -> Index Scan Backward using _hyper_1_2_chunk_append_test_time_idx on _hyper_1_2_chunk a_2 (never executed) + -> Index Scan Backward using _hyper_1_3_chunk_append_test_time_idx on _hyper_1_3_chunk a_3 (never executed) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on join_test j (actual rows=1 loops=1) + Order: j."time" DESC + Hypertables excluded during runtime: 0 + -> Index Scan using _hyper_2_6_chunk_join_test_time_idx on _hyper_2_6_chunk j_1 (actual rows=0 loops=1) + Filter: (a.colorid = colorid) + Rows Removed by Filter: 1 + -> Index Scan using _hyper_2_5_chunk_join_test_time_idx on _hyper_2_5_chunk j_2 (actual rows=0 loops=1) + Filter: (a.colorid = colorid) + Rows Removed by Filter: 1 + -> Index Scan using _hyper_2_4_chunk_join_test_time_idx on _hyper_2_4_chunk j_3 (actual rows=1 loops=1) + Filter: (a.colorid = colorid) +(19 rows) + +-- test runtime exclusion with LATERAL and generate_series +:PREFIX SELECT g.time FROM generate_series('2000-01-01'::timestamptz, '2000-02-01'::timestamptz, '1d'::interval) g(time) LEFT JOIN LATERAL(SELECT time FROM metrics_timestamptz m WHERE m.time=g.time LIMIT 1) m ON true; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------- + Nested Loop Left Join (actual rows=32 loops=1) + -> Function Scan on generate_series g (actual rows=32 loops=1) + -> Limit (actual rows=1 loops=32) + -> Result (actual rows=1 loops=32) + -> Custom Scan (ChunkAppend) on metrics_timestamptz m (actual rows=1 loops=32) + Chunks excluded during runtime: 4 + -> Index Only Scan using _hyper_5_17_chunk_metrics_timestamptz_time_idx on _hyper_5_17_chunk m_1 (actual rows=1 loops=5) + Index Cond: ("time" = g."time") + Heap Fetches: 5 + -> Index Only Scan using _hyper_5_18_chunk_metrics_timestamptz_time_idx on _hyper_5_18_chunk m_2 (actual rows=1 loops=7) + Index Cond: ("time" = g."time") + Heap Fetches: 7 + -> Index Only Scan using _hyper_5_19_chunk_metrics_timestamptz_time_idx on _hyper_5_19_chunk m_3 (actual rows=1 loops=7) + Index Cond: ("time" = g."time") + Heap Fetches: 7 + -> Index Only Scan using _hyper_5_20_chunk_metrics_timestamptz_time_idx on _hyper_5_20_chunk m_4 (actual rows=1 loops=7) + Index Cond: ("time" = g."time") + Heap Fetches: 7 + -> Index Only Scan using _hyper_5_21_chunk_metrics_timestamptz_time_idx on _hyper_5_21_chunk m_5 (actual rows=1 loops=6) + Index Cond: ("time" = g."time") + Heap Fetches: 6 +(21 rows) + +:PREFIX SELECT * FROM generate_series('2000-01-01'::timestamptz,'2000-02-01'::timestamptz,'1d'::interval) AS g(time) INNER JOIN LATERAL (SELECT time FROM metrics_timestamptz m WHERE time=g.time) m ON true; + QUERY PLAN +-------------------------------------------------------------------------------- + Hash Join (actual rows=96 loops=1) + Hash Cond: (g."time" = m_1."time") + -> Function Scan on generate_series g (actual rows=32 loops=1) + -> Hash (actual rows=26787 loops=1) + Buckets: 32768 Batches: 1 + -> Append (actual rows=26787 loops=1) + -> Seq Scan on _hyper_5_17_chunk m_1 (actual rows=4032 loops=1) + -> Seq Scan on _hyper_5_18_chunk m_2 (actual rows=6048 loops=1) + -> Seq Scan on _hyper_5_19_chunk m_3 (actual rows=6048 loops=1) + -> Seq Scan on _hyper_5_20_chunk m_4 (actual rows=6048 loops=1) + -> Seq Scan on _hyper_5_21_chunk m_5 (actual rows=4611 loops=1) +(11 rows) + +:PREFIX SELECT * FROM generate_series('2000-01-01'::timestamptz,'2000-02-01'::timestamptz,'1d'::interval) AS g(time) INNER JOIN LATERAL (SELECT time FROM metrics_timestamptz m WHERE time=g.time ORDER BY time) m ON true; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------- + Nested Loop (actual rows=96 loops=1) + -> Function Scan on generate_series g (actual rows=32 loops=1) + -> Custom Scan (ChunkAppend) on metrics_timestamptz m (actual rows=3 loops=32) + Chunks excluded during runtime: 4 + -> Index Only Scan using _hyper_5_17_chunk_metrics_timestamptz_time_idx on _hyper_5_17_chunk m_1 (actual rows=3 loops=5) + Index Cond: ("time" = g."time") + Heap Fetches: 15 + -> Index Only Scan using _hyper_5_18_chunk_metrics_timestamptz_time_idx on _hyper_5_18_chunk m_2 (actual rows=3 loops=7) + Index Cond: ("time" = g."time") + Heap Fetches: 21 + -> Index Only Scan using _hyper_5_19_chunk_metrics_timestamptz_time_idx on _hyper_5_19_chunk m_3 (actual rows=3 loops=7) + Index Cond: ("time" = g."time") + Heap Fetches: 21 + -> Index Only Scan using _hyper_5_20_chunk_metrics_timestamptz_time_idx on _hyper_5_20_chunk m_4 (actual rows=3 loops=7) + Index Cond: ("time" = g."time") + Heap Fetches: 21 + -> Index Only Scan using _hyper_5_21_chunk_metrics_timestamptz_time_idx on _hyper_5_21_chunk m_5 (actual rows=3 loops=6) + Index Cond: ("time" = g."time") + Heap Fetches: 18 +(19 rows) + +:PREFIX SELECT * FROM generate_series('2000-01-01'::timestamptz,'2000-02-01'::timestamptz,'1d'::interval) AS g(time) INNER JOIN LATERAL (SELECT time FROM metrics_timestamptz m WHERE time>g.time + '1 day' ORDER BY time LIMIT 1) m ON true; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------- + Nested Loop (actual rows=30 loops=1) + -> Function Scan on generate_series g (actual rows=32 loops=1) + -> Limit (actual rows=1 loops=32) + -> Custom Scan (ChunkAppend) on metrics_timestamptz m (actual rows=1 loops=32) + Order: m."time" + Chunks excluded during startup: 0 + Chunks excluded during runtime: 2 + -> Index Only Scan Backward using _hyper_5_17_chunk_metrics_timestamptz_time_idx on _hyper_5_17_chunk m_1 (actual rows=1 loops=4) + Index Cond: ("time" > (g."time" + '@ 1 day'::interval)) + Heap Fetches: 4 + -> Index Only Scan Backward using _hyper_5_18_chunk_metrics_timestamptz_time_idx on _hyper_5_18_chunk m_2 (actual rows=1 loops=7) + Index Cond: ("time" > (g."time" + '@ 1 day'::interval)) + Heap Fetches: 7 + -> Index Only Scan Backward using _hyper_5_19_chunk_metrics_timestamptz_time_idx on _hyper_5_19_chunk m_3 (actual rows=1 loops=7) + Index Cond: ("time" > (g."time" + '@ 1 day'::interval)) + Heap Fetches: 7 + -> Index Only Scan Backward using _hyper_5_20_chunk_metrics_timestamptz_time_idx on _hyper_5_20_chunk m_4 (actual rows=1 loops=7) + Index Cond: ("time" > (g."time" + '@ 1 day'::interval)) + Heap Fetches: 7 + -> Index Only Scan Backward using _hyper_5_21_chunk_metrics_timestamptz_time_idx on _hyper_5_21_chunk m_5 (actual rows=1 loops=7) + Index Cond: ("time" > (g."time" + '@ 1 day'::interval)) + Heap Fetches: 5 +(22 rows) + +-- test runtime exclusion with subquery +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 WHERE m1.time=(SELECT max(time) FROM metrics_timestamptz); + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_timestamptz m1 (actual rows=3 loops=1) + Chunks excluded during runtime: 4 + InitPlan 2 (returns $1) + -> Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_timestamptz (actual rows=1 loops=1) + Order: metrics_timestamptz."time" DESC + -> Index Only Scan using _hyper_5_21_chunk_metrics_timestamptz_time_idx on _hyper_5_21_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 1 + -> Index Only Scan using _hyper_5_20_chunk_metrics_timestamptz_time_idx on _hyper_5_20_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 + -> Index Only Scan using _hyper_5_19_chunk_metrics_timestamptz_time_idx on _hyper_5_19_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 + -> Index Only Scan using _hyper_5_18_chunk_metrics_timestamptz_time_idx on _hyper_5_18_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 + -> Index Only Scan using _hyper_5_17_chunk_metrics_timestamptz_time_idx on _hyper_5_17_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 + -> Index Only Scan using _hyper_5_17_chunk_metrics_timestamptz_time_idx on _hyper_5_17_chunk m1_1 (never executed) + Index Cond: ("time" = $1) + Heap Fetches: 0 + -> Index Only Scan using _hyper_5_18_chunk_metrics_timestamptz_time_idx on _hyper_5_18_chunk m1_2 (never executed) + Index Cond: ("time" = $1) + Heap Fetches: 0 + -> Index Only Scan using _hyper_5_19_chunk_metrics_timestamptz_time_idx on _hyper_5_19_chunk m1_3 (never executed) + Index Cond: ("time" = $1) + Heap Fetches: 0 + -> Index Only Scan using _hyper_5_20_chunk_metrics_timestamptz_time_idx on _hyper_5_20_chunk m1_4 (never executed) + Index Cond: ("time" = $1) + Heap Fetches: 0 + -> Index Only Scan using _hyper_5_21_chunk_metrics_timestamptz_time_idx on _hyper_5_21_chunk m1_5 (actual rows=3 loops=1) + Index Cond: ("time" = $1) + Heap Fetches: 3 +(38 rows) + +-- test runtime exclusion with correlated subquery +:PREFIX SELECT m1.time, (SELECT m2.time FROM metrics_timestamptz m2 WHERE m2.time < m1.time ORDER BY m2.time DESC LIMIT 1) FROM metrics_timestamptz m1 WHERE m1.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------ + Result (actual rows=7776 loops=1) + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 (actual rows=7776 loops=1) + Order: m1."time" + -> Index Only Scan Backward using _hyper_5_17_chunk_metrics_timestamptz_time_idx on _hyper_5_17_chunk m1_1 (actual rows=4032 loops=1) + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 4032 + -> Index Only Scan Backward using _hyper_5_18_chunk_metrics_timestamptz_time_idx on _hyper_5_18_chunk m1_2 (actual rows=3744 loops=1) + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 3744 + SubPlan 1 + -> Limit (actual rows=1 loops=7776) + -> Custom Scan (ChunkAppend) on metrics_timestamptz m2 (actual rows=1 loops=7776) + Order: m2."time" DESC + Chunks excluded during runtime: 3 + -> Index Only Scan using _hyper_5_21_chunk_metrics_timestamptz_time_idx on _hyper_5_21_chunk m2_1 (never executed) + Index Cond: ("time" < m1."time") + Heap Fetches: 0 + -> Index Only Scan using _hyper_5_20_chunk_metrics_timestamptz_time_idx on _hyper_5_20_chunk m2_2 (never executed) + Index Cond: ("time" < m1."time") + Heap Fetches: 0 + -> Index Only Scan using _hyper_5_19_chunk_metrics_timestamptz_time_idx on _hyper_5_19_chunk m2_3 (never executed) + Index Cond: ("time" < m1."time") + Heap Fetches: 0 + -> Index Only Scan using _hyper_5_18_chunk_metrics_timestamptz_time_idx on _hyper_5_18_chunk m2_4 (actual rows=1 loops=3741) + Index Cond: ("time" < m1."time") + Heap Fetches: 3741 + -> Index Only Scan using _hyper_5_17_chunk_metrics_timestamptz_time_idx on _hyper_5_17_chunk m2_5 (actual rows=1 loops=4035) + Index Cond: ("time" < m1."time") + Heap Fetches: 4032 +(29 rows) + +-- test EXISTS +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 WHERE EXISTS(SELECT 1 FROM metrics_timestamptz m2 WHERE m1.time < m2.time) ORDER BY m1.time DESC limit 1000; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=1000 loops=1) + -> Nested Loop Semi Join (actual rows=1000 loops=1) + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 (actual rows=1003 loops=1) + Order: m1."time" DESC + -> Index Only Scan using _hyper_5_21_chunk_metrics_timestamptz_time_idx on _hyper_5_21_chunk m1_1 (actual rows=1003 loops=1) + Heap Fetches: 1003 + -> Index Only Scan using _hyper_5_20_chunk_metrics_timestamptz_time_idx on _hyper_5_20_chunk m1_2 (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_5_19_chunk_metrics_timestamptz_time_idx on _hyper_5_19_chunk m1_3 (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_5_18_chunk_metrics_timestamptz_time_idx on _hyper_5_18_chunk m1_4 (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_5_17_chunk_metrics_timestamptz_time_idx on _hyper_5_17_chunk m1_5 (never executed) + Heap Fetches: 0 + -> Append (actual rows=1 loops=1003) + -> Index Only Scan using _hyper_5_17_chunk_metrics_timestamptz_time_idx on _hyper_5_17_chunk m2_1 (actual rows=0 loops=1003) + Index Cond: ("time" > m1."time") + Heap Fetches: 0 + -> Index Only Scan using _hyper_5_18_chunk_metrics_timestamptz_time_idx on _hyper_5_18_chunk m2_2 (actual rows=0 loops=1003) + Index Cond: ("time" > m1."time") + Heap Fetches: 0 + -> Index Only Scan using _hyper_5_19_chunk_metrics_timestamptz_time_idx on _hyper_5_19_chunk m2_3 (actual rows=0 loops=1003) + Index Cond: ("time" > m1."time") + Heap Fetches: 0 + -> Index Only Scan using _hyper_5_20_chunk_metrics_timestamptz_time_idx on _hyper_5_20_chunk m2_4 (actual rows=0 loops=1003) + Index Cond: ("time" > m1."time") + Heap Fetches: 0 + -> Index Only Scan using _hyper_5_21_chunk_metrics_timestamptz_time_idx on _hyper_5_21_chunk m2_5 (actual rows=1 loops=1003) + Index Cond: ("time" > m1."time") + Heap Fetches: 1000 +(30 rows) + +-- test constraint exclusion for subqueries with append +-- should include 2 chunks +:PREFIX SELECT time FROM (SELECT time FROM metrics_timestamptz WHERE time < '2000-01-10'::text::timestamptz ORDER BY time) m; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_timestamptz (actual rows=7776 loops=1) + Order: metrics_timestamptz."time" + Chunks excluded during startup: 3 + -> Index Only Scan Backward using _hyper_5_17_chunk_metrics_timestamptz_time_idx on _hyper_5_17_chunk (actual rows=4032 loops=1) + Index Cond: ("time" < ('2000-01-10'::cstring)::timestamp with time zone) + Heap Fetches: 4032 + -> Index Only Scan Backward using _hyper_5_18_chunk_metrics_timestamptz_time_idx on _hyper_5_18_chunk (actual rows=3744 loops=1) + Index Cond: ("time" < ('2000-01-10'::cstring)::timestamp with time zone) + Heap Fetches: 3744 +(9 rows) + +-- test constraint exclusion for subqueries with mergeappend +-- should include 2 chunks +:PREFIX SELECT device_id, time FROM (SELECT device_id, time FROM metrics_timestamptz WHERE time < '2000-01-10'::text::timestamptz ORDER BY device_id, time) m; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ConstraintAwareAppend) (actual rows=7776 loops=1) + Hypertable: metrics_timestamptz + Chunks excluded during startup: 3 + -> Merge Append (actual rows=7776 loops=1) + Sort Key: _hyper_5_17_chunk.device_id, _hyper_5_17_chunk."time" + -> Index Only Scan using _hyper_5_17_chunk_metrics_timestamptz_device_id_time_idx on _hyper_5_17_chunk (actual rows=4032 loops=1) + Index Cond: ("time" < ('2000-01-10'::cstring)::timestamp with time zone) + Heap Fetches: 4032 + -> Index Only Scan using _hyper_5_18_chunk_metrics_timestamptz_device_id_time_idx on _hyper_5_18_chunk (actual rows=3744 loops=1) + Index Cond: ("time" < ('2000-01-10'::cstring)::timestamp with time zone) + Heap Fetches: 3744 +(11 rows) + +-- test LIMIT pushdown +-- no aggregates/window functions/SRF should pushdown limit +:PREFIX SELECT FROM metrics_timestamptz ORDER BY time LIMIT 1; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_timestamptz (actual rows=1 loops=1) + Order: metrics_timestamptz."time" + -> Index Only Scan Backward using _hyper_5_17_chunk_metrics_timestamptz_time_idx on _hyper_5_17_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan Backward using _hyper_5_18_chunk_metrics_timestamptz_time_idx on _hyper_5_18_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_5_19_chunk_metrics_timestamptz_time_idx on _hyper_5_19_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_5_20_chunk_metrics_timestamptz_time_idx on _hyper_5_20_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_5_21_chunk_metrics_timestamptz_time_idx on _hyper_5_21_chunk (never executed) + Heap Fetches: 0 +(13 rows) + +-- aggregates should prevent pushdown +:PREFIX SELECT count(*) FROM metrics_timestamptz LIMIT 1; + QUERY PLAN +---------------------------------------------------------------------------------- + Limit (actual rows=1 loops=1) + -> Finalize Aggregate (actual rows=1 loops=1) + -> Append (actual rows=5 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_5_17_chunk (actual rows=4032 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_5_18_chunk (actual rows=6048 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_5_19_chunk (actual rows=6048 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_5_20_chunk (actual rows=6048 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_5_21_chunk (actual rows=4611 loops=1) +(13 rows) + +:PREFIX SELECT count(*) FROM metrics_space LIMIT 1; + QUERY PLAN +---------------------------------------------------------------------------------- + Limit (actual rows=1 loops=1) + -> Finalize Aggregate (actual rows=1 loops=1) + -> Append (actual rows=9 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_6_22_chunk (actual rows=5376 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_6_23_chunk (actual rows=5376 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_6_24_chunk (actual rows=2688 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_6_25_chunk (actual rows=8064 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_6_26_chunk (actual rows=8064 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_6_27_chunk (actual rows=4032 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_6_28_chunk (actual rows=1540 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_6_29_chunk (actual rows=1540 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_6_30_chunk (actual rows=770 loops=1) +(21 rows) + +-- HAVING should prevent pushdown +:PREFIX SELECT 1 FROM metrics_timestamptz HAVING count(*) > 1 LIMIT 1; + QUERY PLAN +---------------------------------------------------------------------------------- + Limit (actual rows=1 loops=1) + -> Finalize Aggregate (actual rows=1 loops=1) + Filter: (count(*) > 1) + -> Append (actual rows=5 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_5_17_chunk (actual rows=4032 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_5_18_chunk (actual rows=6048 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_5_19_chunk (actual rows=6048 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_5_20_chunk (actual rows=6048 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_5_21_chunk (actual rows=4611 loops=1) +(14 rows) + +:PREFIX SELECT 1 FROM metrics_space HAVING count(*) > 1 LIMIT 1; + QUERY PLAN +---------------------------------------------------------------------------------- + Limit (actual rows=1 loops=1) + -> Finalize Aggregate (actual rows=1 loops=1) + Filter: (count(*) > 1) + -> Append (actual rows=9 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_6_22_chunk (actual rows=5376 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_6_23_chunk (actual rows=5376 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_6_24_chunk (actual rows=2688 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_6_25_chunk (actual rows=8064 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_6_26_chunk (actual rows=8064 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_6_27_chunk (actual rows=4032 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_6_28_chunk (actual rows=1540 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_6_29_chunk (actual rows=1540 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_6_30_chunk (actual rows=770 loops=1) +(22 rows) + +-- DISTINCT should prevent pushdown +SET enable_hashagg TO false; +:PREFIX SELECT DISTINCT device_id FROM metrics_timestamptz ORDER BY device_id LIMIT 3; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=3 loops=1) + -> Unique (actual rows=3 loops=1) + -> Merge Append (actual rows=17859 loops=1) + Sort Key: _hyper_5_17_chunk.device_id + -> Index Only Scan using _hyper_5_17_chunk_metrics_timestamptz_device_id_time_idx on _hyper_5_17_chunk (actual rows=2689 loops=1) + Heap Fetches: 2689 + -> Index Only Scan using _hyper_5_18_chunk_metrics_timestamptz_device_id_time_idx on _hyper_5_18_chunk (actual rows=4033 loops=1) + Heap Fetches: 4033 + -> Index Only Scan using _hyper_5_19_chunk_metrics_timestamptz_device_id_time_idx on _hyper_5_19_chunk (actual rows=4033 loops=1) + Heap Fetches: 4033 + -> Index Only Scan using _hyper_5_20_chunk_metrics_timestamptz_device_id_time_idx on _hyper_5_20_chunk (actual rows=4033 loops=1) + Heap Fetches: 4033 + -> Index Only Scan using _hyper_5_21_chunk_metrics_timestamptz_device_id_time_idx on _hyper_5_21_chunk (actual rows=3075 loops=1) + Heap Fetches: 3075 +(14 rows) + +:PREFIX SELECT DISTINCT device_id FROM metrics_space ORDER BY device_id LIMIT 3; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=3 loops=1) + -> Unique (actual rows=3 loops=1) + -> Merge Append (actual rows=7491 loops=1) + Sort Key: _hyper_6_22_chunk.device_id + -> Index Only Scan using _hyper_6_22_chunk_metrics_space_device_id_time_idx on _hyper_6_22_chunk (actual rows=1345 loops=1) + Heap Fetches: 1345 + -> Index Only Scan using _hyper_6_23_chunk_metrics_space_device_id_time_idx on _hyper_6_23_chunk (actual rows=1345 loops=1) + Heap Fetches: 1345 + -> Index Only Scan using _hyper_6_24_chunk_metrics_space_device_id_time_idx on _hyper_6_24_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_6_25_chunk_metrics_space_device_id_time_idx on _hyper_6_25_chunk (actual rows=2017 loops=1) + Heap Fetches: 2017 + -> Index Only Scan using _hyper_6_26_chunk_metrics_space_device_id_time_idx on _hyper_6_26_chunk (actual rows=2017 loops=1) + Heap Fetches: 2017 + -> Index Only Scan using _hyper_6_27_chunk_metrics_space_device_id_time_idx on _hyper_6_27_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_6_28_chunk_metrics_space_device_id_time_idx on _hyper_6_28_chunk (actual rows=386 loops=1) + Heap Fetches: 386 + -> Index Only Scan using _hyper_6_29_chunk_metrics_space_device_id_time_idx on _hyper_6_29_chunk (actual rows=386 loops=1) + Heap Fetches: 386 + -> Index Only Scan using _hyper_6_30_chunk_metrics_space_device_id_time_idx on _hyper_6_30_chunk (actual rows=1 loops=1) + Heap Fetches: 1 +(22 rows) + +RESET enable_hashagg; +-- JOINs should prevent pushdown +-- when LIMIT gets pushed to a Sort node it will switch to top-N heapsort +-- if more tuples then LIMIT are requested this will trigger an error +-- to trigger this we need a Sort node that is below ChunkAppend +CREATE TABLE join_limit (time timestamptz, device_id int); +SELECT table_name FROM create_hypertable('join_limit','time',create_default_indexes:=false); +psql:include/append_query.sql:315: NOTICE: adding not-null constraint to column "time" + table_name +------------ + join_limit +(1 row) + +CREATE INDEX ON join_limit(time,device_id); +INSERT INTO join_limit +SELECT time, device_id +FROM generate_series('2000-01-01'::timestamptz,'2000-01-21','30m') g1(time), + generate_series(1,10,1) g2(device_id) +ORDER BY time, device_id; +-- get 2nd chunk oid +SELECT tableoid AS "CHUNK_OID" FROM join_limit WHERE time > '2000-01-07' ORDER BY time LIMIT 1 +\gset +--get index name for 2nd chunk +SELECT indexrelid::regclass AS "INDEX_NAME" FROM pg_index WHERE indrelid = :CHUNK_OID +\gset +DROP INDEX :INDEX_NAME; +:PREFIX SELECT * FROM metrics_timestamptz m1 INNER JOIN join_limit m2 ON m1.time = m2.time AND m1.device_id=m2.device_id WHERE m1.time > '2000-01-07' ORDER BY m1.time, m1.device_id LIMIT 3; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=3 loops=1) + -> Merge Join (actual rows=3 loops=1) + Merge Cond: (m2."time" = m1."time") + Join Filter: (m2.device_id = m1.device_id) + Rows Removed by Join Filter: 4 + -> Custom Scan (ChunkAppend) on join_limit m2 (actual rows=3 loops=1) + Order: m2."time", m2.device_id + -> Sort (actual rows=3 loops=1) + Sort Key: m2_1."time", m2_1.device_id + Sort Method: quicksort + -> Seq Scan on _hyper_8_35_chunk m2_1 (actual rows=2710 loops=1) + Filter: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 650 + -> Index Only Scan using _hyper_8_36_chunk_join_limit_time_device_id_idx on _hyper_8_36_chunk m2_2 (never executed) + Index Cond: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 0 + -> Index Only Scan using _hyper_8_37_chunk_join_limit_time_device_id_idx on _hyper_8_37_chunk m2_3 (never executed) + Index Cond: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 0 + -> Materialize (actual rows=22 loops=1) + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 (actual rows=19 loops=1) + Order: m1."time" + -> Index Scan Backward using _hyper_5_18_chunk_metrics_timestamptz_time_idx on _hyper_5_18_chunk m1_1 (actual rows=19 loops=1) + Index Cond: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + -> Index Scan Backward using _hyper_5_19_chunk_metrics_timestamptz_time_idx on _hyper_5_19_chunk m1_2 (never executed) + Index Cond: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + -> Index Scan Backward using _hyper_5_20_chunk_metrics_timestamptz_time_idx on _hyper_5_20_chunk m1_3 (never executed) + Index Cond: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + -> Index Scan Backward using _hyper_5_21_chunk_metrics_timestamptz_time_idx on _hyper_5_21_chunk m1_4 (never executed) + Index Cond: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) +(30 rows) + +DROP TABLE join_limit; +-- test ChunkAppend projection #2661 +:PREFIX SELECT ts.timestamp, ht.timestamp +FROM ( + SELECT generate_series( + to_timestamp(FLOOR(EXTRACT (EPOCH FROM '2020-01-01T00:01:00Z'::timestamp) / 300) * 300) AT TIME ZONE 'UTC', + '2020-01-01T01:00:00Z', + '5 minutes'::interval + ) AS timestamp +) ts +LEFT JOIN i2661 ht ON + (FLOOR(EXTRACT (EPOCH FROM ht."timestamp") / 300) * 300 = EXTRACT (EPOCH FROM ts.timestamp)) + AND ht.timestamp > '2019-12-30T00:00:00Z'::timestamp; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------ + Merge Left Join (actual rows=33 loops=1) + Merge Cond: ((EXTRACT(epoch FROM ts."timestamp")) = ((floor((EXTRACT(epoch FROM ht."timestamp") / '300'::numeric)) * '300'::numeric))) + -> Sort (actual rows=13 loops=1) + Sort Key: (EXTRACT(epoch FROM ts."timestamp")) + Sort Method: quicksort + -> Subquery Scan on ts (actual rows=13 loops=1) + -> ProjectSet (actual rows=13 loops=1) + -> Result (actual rows=1 loops=1) + -> Sort (actual rows=514 loops=1) + Sort Key: ((floor((EXTRACT(epoch FROM ht."timestamp") / '300'::numeric)) * '300'::numeric)) + Sort Method: quicksort + -> Result (actual rows=7201 loops=1) + -> Custom Scan (ChunkAppend) on i2661 ht (actual rows=7201 loops=1) + Chunks excluded during startup: 0 + -> Seq Scan on _hyper_7_31_chunk ht_1 (actual rows=1200 loops=1) + Filter: ("timestamp" > 'Mon Dec 30 00:00:00 2019'::timestamp without time zone) + -> Seq Scan on _hyper_7_32_chunk ht_2 (actual rows=5040 loops=1) + Filter: ("timestamp" > 'Mon Dec 30 00:00:00 2019'::timestamp without time zone) + -> Seq Scan on _hyper_7_33_chunk ht_3 (actual rows=961 loops=1) + Filter: ("timestamp" > 'Mon Dec 30 00:00:00 2019'::timestamp without time zone) +(20 rows) + +-- #3030 test chunkappend keeps pathkeys when subpath is append +-- on PG11 this will not use ChunkAppend but MergeAppend +SET enable_seqscan TO FALSE; +CREATE TABLE i3030(time timestamptz NOT NULL, a int, b int); +SELECT table_name FROM create_hypertable('i3030', 'time', create_default_indexes=>false); + table_name +------------ + i3030 +(1 row) + +CREATE INDEX ON i3030(a,time); +INSERT INTO i3030 (time,a) SELECT time, a FROM generate_series('2000-01-01'::timestamptz,'2000-01-01 3:00:00'::timestamptz,'1min'::interval) time, generate_series(1,30) a; +ANALYZE i3030; +:PREFIX SELECT * FROM i3030 where time BETWEEN '2000-01-01'::text::timestamptz AND '2000-01-03'::text::timestamptz ORDER BY a,time LIMIT 1; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on i3030 (actual rows=1 loops=1) + Order: i3030.a, i3030."time" + Chunks excluded during startup: 0 + -> Index Scan using _hyper_9_38_chunk_i3030_a_time_idx on _hyper_9_38_chunk (actual rows=1 loops=1) + Index Cond: (("time" >= ('2000-01-01'::cstring)::timestamp with time zone) AND ("time" <= ('2000-01-03'::cstring)::timestamp with time zone)) +(6 rows) + +DROP TABLE i3030; +RESET enable_seqscan; +--parent runtime exclusion tests: +--optimization works with ANY (array) +:PREFIX +SELECT * +FROM append_test a +WHERE a.attr @> ANY((SELECT coalesce(array_agg(attr), array[]::jsonb[]) FROM join_test_plain WHERE temp > 100)::jsonb[]); + QUERY PLAN +-------------------------------------------------------------------- + Custom Scan (ChunkAppend) on append_test a (actual rows=0 loops=1) + Hypertables excluded during runtime: 1 + InitPlan 1 (returns $0) + -> Aggregate (actual rows=1 loops=1) + -> Seq Scan on join_test_plain (actual rows=0 loops=1) + Filter: (temp > '100'::double precision) + Rows Removed by Filter: 3 + -> Seq Scan on _hyper_1_1_chunk a_1 (never executed) + Filter: (attr @> ANY ($0)) + -> Seq Scan on _hyper_1_2_chunk a_2 (never executed) + Filter: (attr @> ANY ($0)) + -> Seq Scan on _hyper_1_3_chunk a_3 (never executed) + Filter: (attr @> ANY ($0)) +(13 rows) + +--optimization does not work for ANY subquery (does not force an initplan) +:PREFIX +SELECT * +FROM append_test a +WHERE a.attr @> ANY((SELECT attr FROM join_test_plain WHERE temp > 100)); + QUERY PLAN +---------------------------------------------------------------------- + Nested Loop Semi Join (actual rows=0 loops=1) + Join Filter: (a_1.attr @> join_test_plain.attr) + -> Append (actual rows=5 loops=1) + -> Seq Scan on _hyper_1_1_chunk a_1 (actual rows=2 loops=1) + -> Seq Scan on _hyper_1_2_chunk a_2 (actual rows=2 loops=1) + -> Seq Scan on _hyper_1_3_chunk a_3 (actual rows=1 loops=1) + -> Materialize (actual rows=0 loops=5) + -> Seq Scan on join_test_plain (actual rows=0 loops=1) + Filter: (temp > '100'::double precision) + Rows Removed by Filter: 3 +(10 rows) + +--works on any strict operator without ANY +:PREFIX +SELECT * +FROM append_test a +WHERE a.attr @> (SELECT attr FROM join_test_plain WHERE temp > 100 limit 1); + QUERY PLAN +-------------------------------------------------------------------- + Custom Scan (ChunkAppend) on append_test a (actual rows=0 loops=1) + Hypertables excluded during runtime: 1 + InitPlan 1 (returns $0) + -> Limit (actual rows=0 loops=1) + -> Seq Scan on join_test_plain (actual rows=0 loops=1) + Filter: (temp > '100'::double precision) + Rows Removed by Filter: 3 + -> Seq Scan on _hyper_1_1_chunk a_1 (never executed) + Filter: (attr @> $0) + -> Seq Scan on _hyper_1_2_chunk a_2 (never executed) + Filter: (attr @> $0) + -> Seq Scan on _hyper_1_3_chunk a_3 (never executed) + Filter: (attr @> $0) +(13 rows) + +--optimization works with function calls +CREATE OR REPLACE FUNCTION select_tag(_min_temp int) + RETURNS jsonb[] + LANGUAGE sql + STABLE PARALLEL SAFE +AS $function$ + SELECT coalesce(array_agg(attr), array[]::jsonb[]) + FROM join_test_plain + WHERE temp > _min_temp +$function$; +:PREFIX +SELECT * +FROM append_test a +WHERE a.attr @> ANY((SELECT select_tag(100))::jsonb[]); + QUERY PLAN +-------------------------------------------------------------------- + Custom Scan (ChunkAppend) on append_test a (actual rows=0 loops=1) + Hypertables excluded during runtime: 1 + InitPlan 1 (returns $0) + -> Result (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_1_chunk a_1 (never executed) + Filter: (attr @> ANY ($0)) + -> Seq Scan on _hyper_1_2_chunk a_2 (never executed) + Filter: (attr @> ANY ($0)) + -> Seq Scan on _hyper_1_3_chunk a_3 (never executed) + Filter: (attr @> ANY ($0)) +(10 rows) + +--optimization does not work when result is null +:PREFIX +SELECT * +FROM append_test a +WHERE a.attr @> ANY((SELECT array_agg(attr) FROM join_test_plain WHERE temp > 100)::jsonb[]); + QUERY PLAN +-------------------------------------------------------------------- + Custom Scan (ChunkAppend) on append_test a (actual rows=0 loops=1) + Hypertables excluded during runtime: 0 + InitPlan 1 (returns $0) + -> Aggregate (actual rows=1 loops=1) + -> Seq Scan on join_test_plain (actual rows=0 loops=1) + Filter: (temp > '100'::double precision) + Rows Removed by Filter: 3 + -> Seq Scan on _hyper_1_1_chunk a_1 (actual rows=0 loops=1) + Filter: (attr @> ANY ($0)) + Rows Removed by Filter: 2 + -> Seq Scan on _hyper_1_2_chunk a_2 (actual rows=0 loops=1) + Filter: (attr @> ANY ($0)) + Rows Removed by Filter: 2 + -> Seq Scan on _hyper_1_3_chunk a_3 (actual rows=0 loops=1) + Filter: (attr @> ANY ($0)) + Rows Removed by Filter: 1 +(16 rows) + +--generate the results into two different files +\set ECHO errors +--- Unoptimized results ++++ Optimized results +@@ -1,6 +1,6 @@ + setting | value + ----------------------------------+------- +- timescaledb.enable_optimizations | off ++ timescaledb.enable_optimizations | on + timescaledb.enable_chunk_append | on + (2 rows) + +--- Unoptimized results ++++ Optimized results +@@ -1,7 +1,7 @@ + setting | value + ----------------------------------+------- +- timescaledb.enable_optimizations | off +- timescaledb.enable_chunk_append | on ++ timescaledb.enable_optimizations | on ++ timescaledb.enable_chunk_append | off + (2 rows) + + time | temp | colorid | attr diff --git a/test/expected/cluster-16.out b/test/expected/cluster-16.out new file mode 100644 index 00000000000..630262f2a38 --- /dev/null +++ b/test/expected/cluster-16.out @@ -0,0 +1,168 @@ +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +CREATE TABLE cluster_test(time timestamptz, temp float, location int); +SELECT create_hypertable('cluster_test', 'time', chunk_time_interval => interval '1 day'); +NOTICE: adding not-null constraint to column "time" + create_hypertable +--------------------------- + (1,public,cluster_test,t) +(1 row) + +-- Show default indexes +SELECT * FROM test.show_indexes('cluster_test'); + Index | Columns | Expr | Unique | Primary | Exclusion | Tablespace +-----------------------+---------+------+--------+---------+-----------+------------ + cluster_test_time_idx | {time} | | f | f | f | +(1 row) + +-- Create two chunks +INSERT INTO cluster_test VALUES ('2017-01-20T09:00:01', 23.4, 1), + ('2017-01-21T09:00:01', 21.3, 2); +-- Run cluster +CLUSTER VERBOSE cluster_test USING cluster_test_time_idx; +INFO: clustering "_timescaledb_internal._hyper_1_1_chunk" using index scan on "_hyper_1_1_chunk_cluster_test_time_idx" +INFO: "_timescaledb_internal._hyper_1_1_chunk": found 0 removable, 1 nonremovable row versions in 1 pages +INFO: clustering "_timescaledb_internal._hyper_1_2_chunk" using index scan on "_hyper_1_2_chunk_cluster_test_time_idx" +INFO: "_timescaledb_internal._hyper_1_2_chunk": found 0 removable, 1 nonremovable row versions in 1 pages +-- Create a third chunk +INSERT INTO cluster_test VALUES ('2017-01-22T09:00:01', 19.5, 3); +-- Show clustered indexes +SELECT indexrelid::regclass, indisclustered +FROM pg_index +WHERE indisclustered = true ORDER BY 1; + indexrelid | indisclustered +--------------------------------------------------------------+---------------- + cluster_test_time_idx | t + _timescaledb_internal._hyper_1_1_chunk_cluster_test_time_idx | t + _timescaledb_internal._hyper_1_2_chunk_cluster_test_time_idx | t +(3 rows) + +-- Reorder just our table +CLUSTER VERBOSE cluster_test; +INFO: clustering "_timescaledb_internal._hyper_1_1_chunk" using sequential scan and sort +INFO: "_timescaledb_internal._hyper_1_1_chunk": found 0 removable, 1 nonremovable row versions in 1 pages +INFO: clustering "_timescaledb_internal._hyper_1_2_chunk" using sequential scan and sort +INFO: "_timescaledb_internal._hyper_1_2_chunk": found 0 removable, 1 nonremovable row versions in 1 pages +INFO: clustering "_timescaledb_internal._hyper_1_3_chunk" using index scan on "_hyper_1_3_chunk_cluster_test_time_idx" +INFO: "_timescaledb_internal._hyper_1_3_chunk": found 0 removable, 1 nonremovable row versions in 1 pages +-- Show clustered indexes, including new chunk +SELECT indexrelid::regclass, indisclustered +FROM pg_index +WHERE indisclustered = true ORDER BY 1; + indexrelid | indisclustered +--------------------------------------------------------------+---------------- + cluster_test_time_idx | t + _timescaledb_internal._hyper_1_1_chunk_cluster_test_time_idx | t + _timescaledb_internal._hyper_1_2_chunk_cluster_test_time_idx | t + _timescaledb_internal._hyper_1_3_chunk_cluster_test_time_idx | t +(4 rows) + +-- Reorder all tables (although will only be our test table) +CLUSTER VERBOSE; +INFO: clustering "_timescaledb_internal._hyper_1_1_chunk" using sequential scan and sort +INFO: "_timescaledb_internal._hyper_1_1_chunk": found 0 removable, 1 nonremovable row versions in 1 pages +INFO: clustering "_timescaledb_internal._hyper_1_2_chunk" using sequential scan and sort +INFO: "_timescaledb_internal._hyper_1_2_chunk": found 0 removable, 1 nonremovable row versions in 1 pages +INFO: clustering "public.cluster_test" using sequential scan and sort +INFO: "public.cluster_test": found 0 removable, 0 nonremovable row versions in 0 pages +INFO: clustering "_timescaledb_internal._hyper_1_3_chunk" using sequential scan and sort +INFO: "_timescaledb_internal._hyper_1_3_chunk": found 0 removable, 1 nonremovable row versions in 1 pages +-- Change the clustered index +CREATE INDEX ON cluster_test (time, location); +CLUSTER VERBOSE cluster_test using cluster_test_time_location_idx; +INFO: clustering "_timescaledb_internal._hyper_1_1_chunk" using sequential scan and sort +INFO: "_timescaledb_internal._hyper_1_1_chunk": found 0 removable, 1 nonremovable row versions in 1 pages +INFO: clustering "_timescaledb_internal._hyper_1_2_chunk" using sequential scan and sort +INFO: "_timescaledb_internal._hyper_1_2_chunk": found 0 removable, 1 nonremovable row versions in 1 pages +INFO: clustering "_timescaledb_internal._hyper_1_3_chunk" using sequential scan and sort +INFO: "_timescaledb_internal._hyper_1_3_chunk": found 0 removable, 1 nonremovable row versions in 1 pages +-- Show updated clustered indexes +SELECT indexrelid::regclass, indisclustered +FROM pg_index +WHERE indisclustered = true ORDER BY 1; + indexrelid | indisclustered +-----------------------------------------------------------------------+---------------- + cluster_test_time_location_idx | t + _timescaledb_internal._hyper_1_1_chunk_cluster_test_time_location_idx | t + _timescaledb_internal._hyper_1_2_chunk_cluster_test_time_location_idx | t + _timescaledb_internal._hyper_1_3_chunk_cluster_test_time_location_idx | t +(4 rows) + +--check the setting of cluster indexes on hypertables and chunks +ALTER TABLE cluster_test CLUSTER ON cluster_test_time_idx; +SELECT indexrelid::regclass, indisclustered +FROM pg_index +WHERE indisclustered = true +ORDER BY 1,2; + indexrelid | indisclustered +--------------------------------------------------------------+---------------- + cluster_test_time_idx | t + _timescaledb_internal._hyper_1_1_chunk_cluster_test_time_idx | t + _timescaledb_internal._hyper_1_2_chunk_cluster_test_time_idx | t + _timescaledb_internal._hyper_1_3_chunk_cluster_test_time_idx | t +(4 rows) + +CLUSTER VERBOSE cluster_test; +INFO: clustering "_timescaledb_internal._hyper_1_1_chunk" using sequential scan and sort +INFO: "_timescaledb_internal._hyper_1_1_chunk": found 0 removable, 1 nonremovable row versions in 1 pages +INFO: clustering "_timescaledb_internal._hyper_1_2_chunk" using sequential scan and sort +INFO: "_timescaledb_internal._hyper_1_2_chunk": found 0 removable, 1 nonremovable row versions in 1 pages +INFO: clustering "_timescaledb_internal._hyper_1_3_chunk" using sequential scan and sort +INFO: "_timescaledb_internal._hyper_1_3_chunk": found 0 removable, 1 nonremovable row versions in 1 pages +ALTER TABLE cluster_test SET WITHOUT CLUSTER; +SELECT indexrelid::regclass, indisclustered +FROM pg_index +WHERE indisclustered = true +ORDER BY 1,2; + indexrelid | indisclustered +------------+---------------- +(0 rows) + +\set ON_ERROR_STOP 0 +CLUSTER VERBOSE cluster_test; +ERROR: there is no previously clustered index for table "cluster_test" +\set ON_ERROR_STOP 1 +ALTER TABLE _timescaledb_internal._hyper_1_1_chunk CLUSTER ON _hyper_1_1_chunk_cluster_test_time_idx; +SELECT indexrelid::regclass, indisclustered +FROM pg_index +WHERE indisclustered = true +ORDER BY 1,2; + indexrelid | indisclustered +--------------------------------------------------------------+---------------- + _timescaledb_internal._hyper_1_1_chunk_cluster_test_time_idx | t +(1 row) + +CLUSTER VERBOSE _timescaledb_internal._hyper_1_1_chunk; +INFO: clustering "_timescaledb_internal._hyper_1_1_chunk" using sequential scan and sort +INFO: "_timescaledb_internal._hyper_1_1_chunk": found 0 removable, 1 nonremovable row versions in 1 pages +ALTER TABLE _timescaledb_internal._hyper_1_1_chunk SET WITHOUT CLUSTER; +SELECT indexrelid::regclass, indisclustered +FROM pg_index +WHERE indisclustered = true +ORDER BY 1,2; + indexrelid | indisclustered +------------+---------------- +(0 rows) + +\set ON_ERROR_STOP 0 +CLUSTER VERBOSE _timescaledb_internal._hyper_1_1_chunk; +ERROR: there is no previously clustered index for table "_hyper_1_1_chunk" +\set ON_ERROR_STOP 1 +-- test alter column type on hypertable with clustering +CREATE TABLE cluster_alter(time timestamp, id text, val int); +CREATE INDEX idstuff ON cluster_alter USING btree (id ASC NULLS LAST, time); +SELECT table_name FROM create_hypertable('cluster_alter', 'time'); +WARNING: column type "timestamp without time zone" used for "time" does not follow best practices +NOTICE: adding not-null constraint to column "time" + table_name +--------------- + cluster_alter +(1 row) + +INSERT INTO cluster_alter VALUES('2020-01-01', '123', 1); +CLUSTER cluster_alter using idstuff; +--attempt the alter table +ALTER TABLE cluster_alter ALTER COLUMN id TYPE int USING id::int; +CLUSTER cluster_alter; +CLUSTER cluster_alter using idstuff; diff --git a/test/expected/cursor-16.out b/test/expected/cursor-16.out new file mode 100644 index 00000000000..fb991bcb41f --- /dev/null +++ b/test/expected/cursor-16.out @@ -0,0 +1,52 @@ +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +CREATE TABLE cursor_test(time timestamptz, device_id int, temp float); +SELECT create_hypertable('cursor_test','time'); +NOTICE: adding not-null constraint to column "time" + create_hypertable +-------------------------- + (1,public,cursor_test,t) +(1 row) + +INSERT INTO cursor_test SELECT '2000-01-01',1,0.5; +INSERT INTO cursor_test SELECT '2001-01-01',1,0.5; +INSERT INTO cursor_test SELECT '2002-01-01',1,0.5; +\set ON_ERROR_STOP 0 +BEGIN; +DECLARE c1 SCROLL CURSOR FOR SELECT * FROM cursor_test; +FETCH NEXT FROM c1; + time | device_id | temp +------------------------------+-----------+------ + Sat Jan 01 00:00:00 2000 PST | 1 | 0.5 +(1 row) + +-- this will produce an error on PG < 14 because PostgreSQL checks +-- for the existence of a scan node with the relation id for every relation +-- used in the update plan in the plan of the cursor. +UPDATE cursor_test SET temp = 0.7 WHERE CURRENT OF c1; +COMMIT; +-- test cursor with no chunks left after runtime exclusion +BEGIN; +DECLARE c1 SCROLL CURSOR FOR SELECT * FROM cursor_test WHERE time > now(); +UPDATE cursor_test SET temp = 0.7 WHERE CURRENT OF c1; +ERROR: cursor "c1" is not a simply updatable scan of table "_hyper_1_1_chunk" +COMMIT; +-- test cursor with no chunks left after planning exclusion +BEGIN; +DECLARE c1 SCROLL CURSOR FOR SELECT * FROM cursor_test WHERE time > '2010-01-01'; +UPDATE cursor_test SET temp = 0.7 WHERE CURRENT OF c1; +ERROR: cursor "c1" is not a simply updatable scan of table "_hyper_1_1_chunk" +COMMIT; +\set ON_ERROR_STOP 1 +SET timescaledb.enable_constraint_exclusion TO off; +BEGIN; +DECLARE c1 SCROLL CURSOR FOR SELECT * FROM cursor_test; +FETCH NEXT FROM c1; + time | device_id | temp +------------------------------+-----------+------ + Sat Jan 01 00:00:00 2000 PST | 1 | 0.7 +(1 row) + +UPDATE cursor_test SET temp = 0.7 WHERE CURRENT OF c1; +COMMIT; diff --git a/test/expected/ddl-16.out b/test/expected/ddl-16.out new file mode 100644 index 00000000000..a61e572e08f --- /dev/null +++ b/test/expected/ddl-16.out @@ -0,0 +1,594 @@ +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +\c :TEST_DBNAME :ROLE_SUPERUSER +CREATE SCHEMA IF NOT EXISTS "customSchema" AUTHORIZATION :ROLE_DEFAULT_PERM_USER; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +\ir include/ddl_ops_1.sql +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +CREATE TABLE PUBLIC."Hypertable_1" ( + time BIGINT NOT NULL, + "Device_id" TEXT NOT NULL, + temp_c int NOT NULL DEFAULT -1, + humidity numeric NULL DEFAULT 0, + sensor_1 NUMERIC NULL DEFAULT 1, + sensor_2 NUMERIC NOT NULL DEFAULT 1, + sensor_3 NUMERIC NOT NULL DEFAULT 1, + sensor_4 NUMERIC NOT NULL DEFAULT 1 +); +CREATE INDEX ON PUBLIC."Hypertable_1" (time, "Device_id"); +CREATE TABLE "customSchema"."Hypertable_1" ( + time BIGINT NOT NULL, + "Device_id" TEXT NOT NULL, + temp_c int NOT NULL DEFAULT -1, + humidity numeric NULL DEFAULT 0, + sensor_1 NUMERIC NULL DEFAULT 1, + sensor_2 NUMERIC NOT NULL DEFAULT 1, + sensor_3 NUMERIC NOT NULL DEFAULT 1, + sensor_4 NUMERIC NOT NULL DEFAULT 1 +); +CREATE INDEX ON "customSchema"."Hypertable_1" (time, "Device_id"); +SELECT * FROM create_hypertable('"public"."Hypertable_1"', 'time', 'Device_id', 1, chunk_time_interval=>_timescaledb_functions.interval_to_usec('1 month')); + hypertable_id | schema_name | table_name | created +---------------+-------------+--------------+--------- + 1 | public | Hypertable_1 | t +(1 row) + +SELECT * FROM create_hypertable('"customSchema"."Hypertable_1"', 'time', NULL, 1, chunk_time_interval=>_timescaledb_functions.interval_to_usec('1 month')); + hypertable_id | schema_name | table_name | created +---------------+--------------+--------------+--------- + 2 | customSchema | Hypertable_1 | t +(1 row) + +SELECT * FROM _timescaledb_catalog.hypertable; + id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor | status +----+--------------+--------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+--------------------+-------- + 1 | public | Hypertable_1 | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | | 0 + 2 | customSchema | Hypertable_1 | _timescaledb_internal | _hyper_2 | 1 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | | 0 +(2 rows) + +CREATE INDEX ON PUBLIC."Hypertable_1" (time, "temp_c"); +CREATE INDEX "ind_humidity" ON PUBLIC."Hypertable_1" (time, "humidity"); +CREATE INDEX "ind_sensor_1" ON PUBLIC."Hypertable_1" (time, "sensor_1"); +INSERT INTO PUBLIC."Hypertable_1"(time, "Device_id", temp_c, humidity, sensor_1, sensor_2, sensor_3, sensor_4) +VALUES(1257894000000000000, 'dev1', 30, 70, 1, 2, 3, 100); +CREATE UNIQUE INDEX "Unique1" ON PUBLIC."Hypertable_1" (time, "Device_id"); +CREATE UNIQUE INDEX "Unique1" ON "customSchema"."Hypertable_1" (time); +INSERT INTO "customSchema"."Hypertable_1"(time, "Device_id", temp_c, humidity, sensor_1, sensor_2, sensor_3, sensor_4) +VALUES(1257894000000000000, 'dev1', 30, 70, 1, 2, 3, 100); +INSERT INTO "customSchema"."Hypertable_1"(time, "Device_id", temp_c, humidity, sensor_1, sensor_2, sensor_3, sensor_4) +VALUES(1257894000000000001, 'dev1', 30, 70, 1, 2, 3, 100); +SELECT * FROM _timescaledb_catalog.chunk_index ORDER BY hypertable_id, hypertable_index_name, chunk_id; + chunk_id | index_name | hypertable_id | hypertable_index_name +----------+--------------------------------------------------+---------------+--------------------------------- + 1 | _hyper_1_1_chunk_Hypertable_1_Device_id_time_idx | 1 | Hypertable_1_Device_id_time_idx + 1 | _hyper_1_1_chunk_Hypertable_1_time_Device_id_idx | 1 | Hypertable_1_time_Device_id_idx + 1 | _hyper_1_1_chunk_Hypertable_1_time_idx | 1 | Hypertable_1_time_idx + 1 | _hyper_1_1_chunk_Hypertable_1_time_temp_c_idx | 1 | Hypertable_1_time_temp_c_idx + 1 | _hyper_1_1_chunk_Unique1 | 1 | Unique1 + 1 | _hyper_1_1_chunk_ind_humidity | 1 | ind_humidity + 1 | _hyper_1_1_chunk_ind_sensor_1 | 1 | ind_sensor_1 + 2 | _hyper_2_2_chunk_Hypertable_1_time_Device_id_idx | 2 | Hypertable_1_time_Device_id_idx + 2 | _hyper_2_2_chunk_Hypertable_1_time_idx | 2 | Hypertable_1_time_idx + 2 | _hyper_2_2_chunk_Unique1 | 2 | Unique1 +(10 rows) + +--expect error cases +\set ON_ERROR_STOP 0 +INSERT INTO "customSchema"."Hypertable_1"(time, "Device_id", temp_c, humidity, sensor_1, sensor_2, sensor_3, sensor_4) +VALUES(1257894000000000000, 'dev1', 31, 71, 72, 4, 1, 102); +psql:include/ddl_ops_1.sql:56: ERROR: duplicate key value violates unique constraint "_hyper_2_2_chunk_Unique1" +CREATE UNIQUE INDEX "Unique2" ON PUBLIC."Hypertable_1" ("Device_id"); +psql:include/ddl_ops_1.sql:57: ERROR: cannot create a unique index without the column "time" (used in partitioning) +CREATE UNIQUE INDEX "Unique2" ON PUBLIC."Hypertable_1" (time); +psql:include/ddl_ops_1.sql:58: ERROR: cannot create a unique index without the column "Device_id" (used in partitioning) +CREATE UNIQUE INDEX "Unique2" ON PUBLIC."Hypertable_1" (sensor_1); +psql:include/ddl_ops_1.sql:59: ERROR: cannot create a unique index without the column "time" (used in partitioning) +UPDATE ONLY PUBLIC."Hypertable_1" SET time = 0 WHERE TRUE; +DELETE FROM ONLY PUBLIC."Hypertable_1" WHERE "Device_id" = 'dev1'; +\set ON_ERROR_STOP 1 +CREATE TABLE my_ht (time BIGINT, val integer); +SELECT * FROM create_hypertable('my_ht', 'time', chunk_time_interval=>_timescaledb_functions.interval_to_usec('1 month')); +psql:include/ddl_ops_1.sql:66: NOTICE: adding not-null constraint to column "time" + hypertable_id | schema_name | table_name | created +---------------+-------------+------------+--------- + 3 | public | my_ht | t +(1 row) + +ALTER TABLE my_ht ADD COLUMN val2 integer; +SELECT * FROM test.show_columns('my_ht'); + Column | Type | NotNull +--------+---------+--------- + time | bigint | t + val | integer | f + val2 | integer | f +(3 rows) + +-- Should error when adding again +\set ON_ERROR_STOP 0 +ALTER TABLE my_ht ADD COLUMN val2 integer; +psql:include/ddl_ops_1.sql:72: ERROR: column "val2" of relation "my_ht" already exists +\set ON_ERROR_STOP 1 +-- Should create +ALTER TABLE my_ht ADD COLUMN IF NOT EXISTS val3 integer; +SELECT * FROM test.show_columns('my_ht'); + Column | Type | NotNull +--------+---------+--------- + time | bigint | t + val | integer | f + val2 | integer | f + val3 | integer | f +(4 rows) + +-- Should skip and not error +ALTER TABLE my_ht ADD COLUMN IF NOT EXISTS val3 integer; +psql:include/ddl_ops_1.sql:80: NOTICE: column "val3" of relation "my_ht" already exists, skipping +SELECT * FROM test.show_columns('my_ht'); + Column | Type | NotNull +--------+---------+--------- + time | bigint | t + val | integer | f + val2 | integer | f + val3 | integer | f +(4 rows) + +-- Should drop +ALTER TABLE my_ht DROP COLUMN IF EXISTS val3; +SELECT * FROM test.show_columns('my_ht'); + Column | Type | NotNull +--------+---------+--------- + time | bigint | t + val | integer | f + val2 | integer | f +(3 rows) + +-- Should skip and not error +ALTER TABLE my_ht DROP COLUMN IF EXISTS val3; +psql:include/ddl_ops_1.sql:88: NOTICE: column "val3" of relation "my_ht" does not exist, skipping +SELECT * FROM test.show_columns('my_ht'); + Column | Type | NotNull +--------+---------+--------- + time | bigint | t + val | integer | f + val2 | integer | f +(3 rows) + +--Test default index creation on create_hypertable(). +--Make sure that we do not duplicate indexes that already exists +-- +--No existing indexes: both time and space-time indexes created +BEGIN; +CREATE TABLE PUBLIC."Hypertable_1_with_default_index_enabled" ( + "Time" BIGINT NOT NULL, + "Device_id" TEXT NOT NULL, + sensor_1 NUMERIC NULL DEFAULT 1 +); +SELECT * FROM create_hypertable('"public"."Hypertable_1_with_default_index_enabled"', 'Time', 'Device_id', 1, chunk_time_interval=>_timescaledb_functions.interval_to_usec('1 month')); + hypertable_id | schema_name | table_name | created +---------------+-------------+-----------------------------------------+--------- + 4 | public | Hypertable_1_with_default_index_enabled | t +(1 row) + +SELECT * FROM test.show_indexes('"Hypertable_1_with_default_index_enabled"'); + Index | Columns | Expr | Unique | Primary | Exclusion | Tablespace +--------------------------------------------------------------+------------------+------+--------+---------+-----------+------------ + "Hypertable_1_with_default_index_enabled_Device_id_Time_idx" | {Device_id,Time} | | f | f | f | + "Hypertable_1_with_default_index_enabled_Time_idx" | {Time} | | f | f | f | +(2 rows) + +ROLLBACK; +--Space index exists: only time index created +BEGIN; +CREATE TABLE PUBLIC."Hypertable_1_with_default_index_enabled" ( + "Time" BIGINT NOT NULL, + "Device_id" TEXT NOT NULL, + sensor_1 NUMERIC NULL DEFAULT 1 +); +CREATE INDEX ON PUBLIC."Hypertable_1_with_default_index_enabled" ("Device_id", "Time" DESC); +SELECT * FROM create_hypertable('"public"."Hypertable_1_with_default_index_enabled"', 'Time', 'Device_id', 1, chunk_time_interval=>_timescaledb_functions.interval_to_usec('1 month')); + hypertable_id | schema_name | table_name | created +---------------+-------------+-----------------------------------------+--------- + 5 | public | Hypertable_1_with_default_index_enabled | t +(1 row) + +SELECT * FROM test.show_indexes('"Hypertable_1_with_default_index_enabled"'); + Index | Columns | Expr | Unique | Primary | Exclusion | Tablespace +--------------------------------------------------------------+------------------+------+--------+---------+-----------+------------ + "Hypertable_1_with_default_index_enabled_Device_id_Time_idx" | {Device_id,Time} | | f | f | f | + "Hypertable_1_with_default_index_enabled_Time_idx" | {Time} | | f | f | f | +(2 rows) + +ROLLBACK; +--Time index exists, only partition index created +BEGIN; +CREATE TABLE PUBLIC."Hypertable_1_with_default_index_enabled" ( + "Time" BIGINT NOT NULL, + "Device_id" TEXT NOT NULL, + sensor_1 NUMERIC NULL DEFAULT 1 +); +CREATE INDEX ON PUBLIC."Hypertable_1_with_default_index_enabled" ("Time" DESC); +SELECT * FROM create_hypertable('"public"."Hypertable_1_with_default_index_enabled"', 'Time', 'Device_id', 1, chunk_time_interval=>_timescaledb_functions.interval_to_usec('1 month')); + hypertable_id | schema_name | table_name | created +---------------+-------------+-----------------------------------------+--------- + 6 | public | Hypertable_1_with_default_index_enabled | t +(1 row) + +SELECT * FROM test.show_indexes('"Hypertable_1_with_default_index_enabled"'); + Index | Columns | Expr | Unique | Primary | Exclusion | Tablespace +--------------------------------------------------------------+------------------+------+--------+---------+-----------+------------ + "Hypertable_1_with_default_index_enabled_Device_id_Time_idx" | {Device_id,Time} | | f | f | f | + "Hypertable_1_with_default_index_enabled_Time_idx" | {Time} | | f | f | f | +(2 rows) + +ROLLBACK; +--No space partitioning, only time index created +BEGIN; +CREATE TABLE PUBLIC."Hypertable_1_with_default_index_enabled" ( + "Time" BIGINT NOT NULL, + "Device_id" TEXT NOT NULL, + sensor_1 NUMERIC NULL DEFAULT 1 +); +SELECT * FROM create_hypertable('"public"."Hypertable_1_with_default_index_enabled"', 'Time', chunk_time_interval=>_timescaledb_functions.interval_to_usec('1 month')); + hypertable_id | schema_name | table_name | created +---------------+-------------+-----------------------------------------+--------- + 7 | public | Hypertable_1_with_default_index_enabled | t +(1 row) + +SELECT * FROM test.show_indexes('"Hypertable_1_with_default_index_enabled"'); + Index | Columns | Expr | Unique | Primary | Exclusion | Tablespace +----------------------------------------------------+---------+------+--------+---------+-----------+------------ + "Hypertable_1_with_default_index_enabled_Time_idx" | {Time} | | f | f | f | +(1 row) + +ROLLBACK; +--Disable index creation: no default indexes created +BEGIN; +CREATE TABLE PUBLIC."Hypertable_1_with_default_index_enabled" ( + "Time" BIGINT NOT NULL, + "Device_id" TEXT NOT NULL, + sensor_1 NUMERIC NULL DEFAULT 1 +); +SELECT * FROM create_hypertable('"public"."Hypertable_1_with_default_index_enabled"', 'Time', 'Device_id', 1, create_default_indexes=>FALSE, chunk_time_interval=>_timescaledb_functions.interval_to_usec('1 month')); + hypertable_id | schema_name | table_name | created +---------------+-------------+-----------------------------------------+--------- + 8 | public | Hypertable_1_with_default_index_enabled | t +(1 row) + +SELECT * FROM test.show_indexes('"Hypertable_1_with_default_index_enabled"'); + Index | Columns | Expr | Unique | Primary | Exclusion | Tablespace +-------+---------+------+--------+---------+-----------+------------ +(0 rows) + +ROLLBACK; +SELECT * FROM PUBLIC."Hypertable_1"; + time | Device_id | temp_c | humidity | sensor_1 | sensor_2 | sensor_3 | sensor_4 +---------------------+-----------+--------+----------+----------+----------+----------+---------- + 1257894000000000000 | dev1 | 30 | 70 | 1 | 2 | 3 | 100 +(1 row) + +SELECT * FROM ONLY PUBLIC."Hypertable_1"; + time | Device_id | temp_c | humidity | sensor_1 | sensor_2 | sensor_3 | sensor_4 +------+-----------+--------+----------+----------+----------+----------+---------- +(0 rows) + +EXPLAIN (costs off) SELECT * FROM ONLY PUBLIC."Hypertable_1"; + QUERY PLAN +---------------------------- + Seq Scan on "Hypertable_1" +(1 row) + +SELECT * FROM test.show_columns('PUBLIC."Hypertable_1"'); + Column | Type | NotNull +-----------+---------+--------- + time | bigint | t + Device_id | text | t + temp_c | integer | t + humidity | numeric | f + sensor_1 | numeric | f + sensor_2 | numeric | t + sensor_3 | numeric | t + sensor_4 | numeric | t +(8 rows) + +SELECT * FROM test.show_columns('_timescaledb_internal._hyper_1_1_chunk'); + Column | Type | NotNull +-----------+---------+--------- + time | bigint | t + Device_id | text | t + temp_c | integer | t + humidity | numeric | f + sensor_1 | numeric | f + sensor_2 | numeric | t + sensor_3 | numeric | t + sensor_4 | numeric | t +(8 rows) + +\ir include/ddl_ops_2.sql +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +ALTER TABLE PUBLIC."Hypertable_1" ADD COLUMN temp_f INTEGER NOT NULL DEFAULT 31; +ALTER TABLE PUBLIC."Hypertable_1" DROP COLUMN temp_c; +ALTER TABLE PUBLIC."Hypertable_1" DROP COLUMN sensor_4; +ALTER TABLE PUBLIC."Hypertable_1" ALTER COLUMN humidity SET DEFAULT 100; +ALTER TABLE PUBLIC."Hypertable_1" ALTER COLUMN sensor_1 DROP DEFAULT; +ALTER TABLE PUBLIC."Hypertable_1" ALTER COLUMN sensor_2 SET DEFAULT NULL; +ALTER TABLE PUBLIC."Hypertable_1" ALTER COLUMN sensor_1 SET NOT NULL; +ALTER TABLE PUBLIC."Hypertable_1" ALTER COLUMN sensor_2 DROP NOT NULL; +ALTER TABLE PUBLIC."Hypertable_1" RENAME COLUMN sensor_2 TO sensor_2_renamed; +ALTER TABLE PUBLIC."Hypertable_1" RENAME COLUMN sensor_3 TO sensor_3_renamed; +DROP INDEX "ind_sensor_1"; +CREATE OR REPLACE FUNCTION empty_trigger_func() + RETURNS TRIGGER LANGUAGE PLPGSQL AS +$BODY$ +BEGIN +END +$BODY$; +CREATE TRIGGER test_trigger BEFORE UPDATE OR DELETE ON PUBLIC."Hypertable_1" +FOR EACH STATEMENT EXECUTE FUNCTION empty_trigger_func(); +ALTER TABLE PUBLIC."Hypertable_1" ALTER COLUMN sensor_2_renamed SET DATA TYPE int; +ALTER INDEX "ind_humidity" RENAME TO "ind_humdity2"; +-- Change should be reflected here +SELECT * FROM _timescaledb_catalog.chunk_index; + chunk_id | index_name | hypertable_id | hypertable_index_name +----------+--------------------------------------------------+---------------+--------------------------------- + 1 | _hyper_1_1_chunk_Hypertable_1_time_Device_id_idx | 1 | Hypertable_1_time_Device_id_idx + 1 | _hyper_1_1_chunk_Hypertable_1_time_idx | 1 | Hypertable_1_time_idx + 1 | _hyper_1_1_chunk_Hypertable_1_Device_id_time_idx | 1 | Hypertable_1_Device_id_time_idx + 1 | _hyper_1_1_chunk_Unique1 | 1 | Unique1 + 2 | _hyper_2_2_chunk_Hypertable_1_time_Device_id_idx | 2 | Hypertable_1_time_Device_id_idx + 2 | _hyper_2_2_chunk_Hypertable_1_time_idx | 2 | Hypertable_1_time_idx + 2 | _hyper_2_2_chunk_Unique1 | 2 | Unique1 + 1 | _hyper_1_1_chunk_ind_humdity2 | 1 | ind_humdity2 +(8 rows) + +--create column with same name as previously renamed one +ALTER TABLE PUBLIC."Hypertable_1" ADD COLUMN sensor_3 BIGINT NOT NULL DEFAULT 131; +--create column with same name as previously dropped one +ALTER TABLE PUBLIC."Hypertable_1" ADD COLUMN sensor_4 BIGINT NOT NULL DEFAULT 131; +SELECT * FROM test.show_columns('PUBLIC."Hypertable_1"'); + Column | Type | NotNull +------------------+---------+--------- + time | bigint | t + Device_id | text | t + humidity | numeric | f + sensor_1 | numeric | t + sensor_2_renamed | integer | f + sensor_3_renamed | numeric | t + temp_f | integer | t + sensor_3 | bigint | t + sensor_4 | bigint | t +(9 rows) + +SELECT * FROM test.show_columns('_timescaledb_internal._hyper_1_1_chunk'); + Column | Type | NotNull +------------------+---------+--------- + time | bigint | t + Device_id | text | t + humidity | numeric | f + sensor_1 | numeric | t + sensor_2_renamed | integer | f + sensor_3_renamed | numeric | t + temp_f | integer | t + sensor_3 | bigint | t + sensor_4 | bigint | t +(9 rows) + +SELECT * FROM PUBLIC."Hypertable_1"; + time | Device_id | humidity | sensor_1 | sensor_2_renamed | sensor_3_renamed | temp_f | sensor_3 | sensor_4 +---------------------+-----------+----------+----------+------------------+------------------+--------+----------+---------- + 1257894000000000000 | dev1 | 70 | 1 | 2 | 3 | 31 | 131 | 131 +(1 row) + +-- alter column tests +CREATE TABLE alter_test(time timestamptz, temp float, color varchar(10)); +-- create hypertable with two chunks +SELECT create_hypertable('alter_test', 'time', 'color', 2, chunk_time_interval => 2628000000000); +WARNING: column type "character varying" used for "color" does not follow best practices +NOTICE: adding not-null constraint to column "time" + create_hypertable +------------------------- + (9,public,alter_test,t) +(1 row) + +INSERT INTO alter_test VALUES ('2017-01-20T09:00:01', 17.5, 'blue'), + ('2017-01-21T09:00:01', 19.1, 'yellow'), + ('2017-04-20T09:00:01', 89.5, 'green'), + ('2017-04-21T09:00:01', 17.1, 'black'); +SELECT * FROM test.show_columns('alter_test'); + Column | Type | NotNull +--------+--------------------------+--------- + time | timestamp with time zone | t + temp | double precision | f + color | character varying | f +(3 rows) + +SELECT * FROM test.show_columnsp('_timescaledb_internal._hyper_9_%chunk'); + Relation | Kind | Column | Column type | NotNull +----------------------------------------+------+--------+--------------------------+--------- + _timescaledb_internal._hyper_9_3_chunk | r | time | timestamp with time zone | t + _timescaledb_internal._hyper_9_3_chunk | r | temp | double precision | f + _timescaledb_internal._hyper_9_3_chunk | r | color | character varying | f + _timescaledb_internal._hyper_9_4_chunk | r | time | timestamp with time zone | t + _timescaledb_internal._hyper_9_4_chunk | r | temp | double precision | f + _timescaledb_internal._hyper_9_4_chunk | r | color | character varying | f + _timescaledb_internal._hyper_9_5_chunk | r | time | timestamp with time zone | t + _timescaledb_internal._hyper_9_5_chunk | r | temp | double precision | f + _timescaledb_internal._hyper_9_5_chunk | r | color | character varying | f +(9 rows) + +-- show the column name and type of the partitioning dimension in the +-- metadata table +SELECT * FROM _timescaledb_catalog.dimension WHERE hypertable_id = 9; + id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func +----+---------------+-------------+--------------------------+---------+------------+--------------------------+--------------------+-----------------+--------------------------+-------------------------+------------------ + 15 | 9 | color | character varying | f | 2 | _timescaledb_functions | get_partition_hash | | | | + 14 | 9 | time | timestamp with time zone | t | | | | 2628000000000 | | | +(2 rows) + +EXPLAIN (costs off) +SELECT * FROM alter_test WHERE time > '2017-05-20T10:00:01'; + QUERY PLAN +----------------------------------------------------------------------------------------- + Append + -> Index Scan using _hyper_9_4_chunk_alter_test_time_idx on _hyper_9_4_chunk + Index Cond: ("time" > 'Sat May 20 10:00:01 2017 PDT'::timestamp with time zone) + -> Index Scan using _hyper_9_5_chunk_alter_test_time_idx on _hyper_9_5_chunk + Index Cond: ("time" > 'Sat May 20 10:00:01 2017 PDT'::timestamp with time zone) +(5 rows) + +-- rename column and change its type +ALTER TABLE alter_test RENAME COLUMN time TO time_us; +--converting timestamptz->timestamp should happen under UTC +SET timezone = 'UTC'; +ALTER TABLE alter_test ALTER COLUMN time_us TYPE timestamp; +RESET timezone; +ALTER TABLE alter_test RENAME COLUMN color TO colorname; +\set ON_ERROR_STOP 0 +-- Changing types on hash-partitioned columns is not safe for some +-- types and is therefore blocked. +ALTER TABLE alter_test ALTER COLUMN colorname TYPE text; +ERROR: cannot change the type of a hash-partitioned column +\set ON_ERROR_STOP 1 +SELECT * FROM test.show_columns('alter_test'); + Column | Type | NotNull +-----------+-----------------------------+--------- + time_us | timestamp without time zone | t + temp | double precision | f + colorname | character varying | f +(3 rows) + +SELECT * FROM test.show_columnsp('_timescaledb_internal._hyper_9_%chunk'); + Relation | Kind | Column | Column type | NotNull +----------------------------------------+------+-----------+-----------------------------+--------- + _timescaledb_internal._hyper_9_3_chunk | r | time_us | timestamp without time zone | t + _timescaledb_internal._hyper_9_3_chunk | r | temp | double precision | f + _timescaledb_internal._hyper_9_3_chunk | r | colorname | character varying | f + _timescaledb_internal._hyper_9_4_chunk | r | time_us | timestamp without time zone | t + _timescaledb_internal._hyper_9_4_chunk | r | temp | double precision | f + _timescaledb_internal._hyper_9_4_chunk | r | colorname | character varying | f + _timescaledb_internal._hyper_9_5_chunk | r | time_us | timestamp without time zone | t + _timescaledb_internal._hyper_9_5_chunk | r | temp | double precision | f + _timescaledb_internal._hyper_9_5_chunk | r | colorname | character varying | f +(9 rows) + +-- show that the metadata has been updated +SELECT * FROM _timescaledb_catalog.dimension WHERE hypertable_id = 9; + id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func +----+---------------+-------------+-----------------------------+---------+------------+--------------------------+--------------------+-----------------+--------------------------+-------------------------+------------------ + 15 | 9 | colorname | character varying | f | 2 | _timescaledb_functions | get_partition_hash | | | | + 14 | 9 | time_us | timestamp without time zone | t | | | | 2628000000000 | | | +(2 rows) + +-- constraint exclusion should still work with updated column +EXPLAIN (costs off) +SELECT * FROM alter_test WHERE time_us > '2017-05-20T10:00:01'; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Seq Scan on _hyper_9_4_chunk + Filter: (time_us > 'Sat May 20 10:00:01 2017'::timestamp without time zone) + -> Seq Scan on _hyper_9_5_chunk + Filter: (time_us > 'Sat May 20 10:00:01 2017'::timestamp without time zone) +(5 rows) + +\set ON_ERROR_STOP 0 +-- verify that we cannot change the column type to something incompatible +ALTER TABLE alter_test ALTER COLUMN colorname TYPE varchar(3); +ERROR: cannot change the type of a hash-partitioned column +-- conversion that messes up partitioning fails +ALTER TABLE alter_test ALTER COLUMN time_us TYPE timestamptz USING time_us::timestamptz+INTERVAL '1 year'; +ERROR: check constraint "constraint_4" of relation "_hyper_9_3_chunk" is violated by some row +-- dropping column that messes up partiitoning fails +ALTER TABLE alter_test DROP COLUMN colorname; +ERROR: cannot drop column named in partition key +--ONLY blocked +ALTER TABLE ONLY alter_test RENAME COLUMN colorname TO colorname2; +ERROR: inherited column "colorname" must be renamed in child tables too +ALTER TABLE ONLY alter_test ALTER COLUMN colorname TYPE varchar(10); +ERROR: ONLY option not supported on hypertable operations +\set ON_ERROR_STOP 1 +CREATE TABLE alter_test_bigint(time bigint, temp float); +SELECT create_hypertable('alter_test_bigint', 'time', chunk_time_interval => 2628000000000); +NOTICE: adding not-null constraint to column "time" + create_hypertable +--------------------------------- + (10,public,alter_test_bigint,t) +(1 row) + +\set ON_ERROR_STOP 0 +-- Changing type of time dimension to a non-supported type +-- shall not be allowed +ALTER TABLE alter_test_bigint +ALTER COLUMN time TYPE TEXT; +ERROR: cannot change data type of hypertable column "time" from bigint to text +-- dropping open time dimension shall not be allowed. +ALTER TABLE alter_test_bigint +DROP COLUMN time; +ERROR: cannot drop column named in partition key +\set ON_ERROR_STOP 1 +-- test expression index creation where physical layout of chunks differs from hypertable +CREATE TABLE i2504(time timestamp NOT NULL, a int, b int, c int, d int); +select create_hypertable('i2504', 'time'); +WARNING: column type "timestamp without time zone" used for "time" does not follow best practices + create_hypertable +--------------------- + (11,public,i2504,t) +(1 row) + +INSERT INTO i2504 VALUES (now(), 1, 2, 3, 4); +ALTER TABLE i2504 DROP COLUMN b; +INSERT INTO i2504(time, a, c, d) VALUES +(now() - interval '1 year', 1, 2, 3), +(now() - interval '2 years', 1, 2, 3); +CREATE INDEX idx2 ON i2504(a,d) WHERE c IS NOT NULL; +DROP INDEX idx2; +CREATE INDEX idx2 ON i2504(a,d) WITH (timescaledb.transaction_per_chunk) WHERE c IS NOT NULL; +-- Make sure custom composite types are supported as dimensions +CREATE TYPE TUPLE as (val1 int4, val2 int4); +CREATE TABLE part_custom_dim (time TIMESTAMPTZ, combo TUPLE, device TEXT); +\set ON_ERROR_STOP 0 +-- should fail on PG < 14 because no partitioning function supplied and the given custom type +-- has no default hash function +-- on PG14 custom types are hashable +SELECT create_hypertable('part_custom_dim', 'time', 'combo', 4); +NOTICE: adding not-null constraint to column "time" + create_hypertable +------------------------------- + (12,public,part_custom_dim,t) +(1 row) + +\set ON_ERROR_STOP 1 +-- immutable functions with sub-transaction (issue #4489) +CREATE FUNCTION i4489(value TEXT DEFAULT '') RETURNS INTEGER +AS +$$ +BEGIN + RETURN value::INTEGER; +EXCEPTION WHEN invalid_text_representation THEN + RETURN 0; +END; +$$ +LANGUAGE PLPGSQL IMMUTABLE; +-- should return 1 (one) in both cases +SELECT i4489('1'), i4489('1'); + i4489 | i4489 +-------+------- + 1 | 1 +(1 row) + +-- should return 0 (zero) in all cases handled by the exception +SELECT i4489(), i4489(); + i4489 | i4489 +-------+------- + 0 | 0 +(1 row) + +SELECT i4489('a'), i4489('a'); + i4489 | i4489 +-------+------- + 0 | 0 +(1 row) + diff --git a/test/expected/delete-16.out b/test/expected/delete-16.out new file mode 100644 index 00000000000..fd9c4c23b70 --- /dev/null +++ b/test/expected/delete-16.out @@ -0,0 +1,185 @@ +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +\o /dev/null +\ir include/insert_two_partitions.sql +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +CREATE TABLE PUBLIC."two_Partitions" ( + "timeCustom" BIGINT NOT NULL, + device_id TEXT NOT NULL, + series_0 DOUBLE PRECISION NULL, + series_1 DOUBLE PRECISION NULL, + series_2 DOUBLE PRECISION NULL, + series_bool BOOLEAN NULL +); +CREATE INDEX ON PUBLIC."two_Partitions" (device_id, "timeCustom" DESC NULLS LAST) WHERE device_id IS NOT NULL; +CREATE INDEX ON PUBLIC."two_Partitions" ("timeCustom" DESC NULLS LAST, series_0) WHERE series_0 IS NOT NULL; +CREATE INDEX ON PUBLIC."two_Partitions" ("timeCustom" DESC NULLS LAST, series_1) WHERE series_1 IS NOT NULL; +CREATE INDEX ON PUBLIC."two_Partitions" ("timeCustom" DESC NULLS LAST, series_2) WHERE series_2 IS NOT NULL; +CREATE INDEX ON PUBLIC."two_Partitions" ("timeCustom" DESC NULLS LAST, series_bool) WHERE series_bool IS NOT NULL; +CREATE INDEX ON PUBLIC."two_Partitions" ("timeCustom" DESC NULLS LAST, device_id); +SELECT * FROM create_hypertable('"public"."two_Partitions"'::regclass, 'timeCustom'::name, 'device_id'::name, associated_schema_name=>'_timescaledb_internal'::text, number_partitions => 2, chunk_time_interval=>_timescaledb_functions.interval_to_usec('1 month')); +\set QUIET off +BEGIN; +\COPY public."two_Partitions" FROM 'data/ds1_dev1_1.tsv' NULL AS ''; +COMMIT; +INSERT INTO public."two_Partitions"("timeCustom", device_id, series_0, series_1) VALUES +(1257987600000000000, 'dev1', 1.5, 1), +(1257987600000000000, 'dev1', 1.5, 2), +(1257894000000000000, 'dev2', 1.5, 1), +(1257894002000000000, 'dev1', 2.5, 3); +INSERT INTO "two_Partitions"("timeCustom", device_id, series_0, series_1) VALUES +(1257894000000000000, 'dev2', 1.5, 2); +\set QUIET on +\o +SELECT * FROM "two_Partitions" ORDER BY "timeCustom", device_id, series_0, series_1; + timeCustom | device_id | series_0 | series_1 | series_2 | series_bool +---------------------+-----------+----------+----------+----------+------------- + 1257894000000000000 | dev1 | 1.5 | 1 | 2 | t + 1257894000000000000 | dev1 | 1.5 | 2 | | + 1257894000000000000 | dev2 | 1.5 | 1 | | + 1257894000000000000 | dev2 | 1.5 | 2 | | + 1257894000000001000 | dev1 | 2.5 | 3 | | + 1257894001000000000 | dev1 | 3.5 | 4 | | + 1257894002000000000 | dev1 | 2.5 | 3 | | + 1257894002000000000 | dev1 | 5.5 | 6 | | t + 1257894002000000000 | dev1 | 5.5 | 7 | | f + 1257897600000000000 | dev1 | 4.5 | 5 | | f + 1257987600000000000 | dev1 | 1.5 | 1 | | + 1257987600000000000 | dev1 | 1.5 | 2 | | +(12 rows) + +DELETE FROM "two_Partitions" WHERE series_0 = 1.5; +DELETE FROM "two_Partitions" WHERE series_0 = 100; +SELECT * FROM "two_Partitions" ORDER BY "timeCustom", device_id, series_0, series_1; + timeCustom | device_id | series_0 | series_1 | series_2 | series_bool +---------------------+-----------+----------+----------+----------+------------- + 1257894000000001000 | dev1 | 2.5 | 3 | | + 1257894001000000000 | dev1 | 3.5 | 4 | | + 1257894002000000000 | dev1 | 2.5 | 3 | | + 1257894002000000000 | dev1 | 5.5 | 6 | | t + 1257894002000000000 | dev1 | 5.5 | 7 | | f + 1257897600000000000 | dev1 | 4.5 | 5 | | f +(6 rows) + +-- Make sure DELETE isn't optimized if it includes Append plans +-- Need to turn of nestloop to make append appear the same on PG96 and PG10 +set enable_nestloop = 'off'; +CREATE OR REPLACE FUNCTION series_val() +RETURNS integer LANGUAGE PLPGSQL STABLE AS +$BODY$ +BEGIN + RETURN 5; +END; +$BODY$; +-- ConstraintAwareAppend applied for SELECT +EXPLAIN (costs off) +SELECT FROM "two_Partitions" +WHERE series_1 IN (SELECT series_1 FROM "two_Partitions" WHERE series_1 > series_val()); + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------ + Hash Join + Hash Cond: ("two_Partitions".series_1 = "two_Partitions_1".series_1) + -> Custom Scan (ChunkAppend) on "two_Partitions" + Chunks excluded during startup: 0 + -> Index Only Scan using "_hyper_1_1_chunk_two_Partitions_timeCustom_series_1_idx" on _hyper_1_1_chunk + Index Cond: (series_1 > (series_val())::double precision) + -> Index Only Scan using "_hyper_1_2_chunk_two_Partitions_timeCustom_series_1_idx" on _hyper_1_2_chunk + Index Cond: (series_1 > (series_val())::double precision) + -> Index Only Scan using "_hyper_1_3_chunk_two_Partitions_timeCustom_series_1_idx" on _hyper_1_3_chunk + Index Cond: (series_1 > (series_val())::double precision) + -> Index Only Scan using "_hyper_1_4_chunk_two_Partitions_timeCustom_series_1_idx" on _hyper_1_4_chunk + Index Cond: (series_1 > (series_val())::double precision) + -> Hash + -> HashAggregate + Group Key: "two_Partitions_1".series_1 + -> Custom Scan (ChunkAppend) on "two_Partitions" "two_Partitions_1" + Chunks excluded during startup: 0 + -> Index Only Scan using "_hyper_1_1_chunk_two_Partitions_timeCustom_series_1_idx" on _hyper_1_1_chunk _hyper_1_1_chunk_1 + Index Cond: (series_1 > (series_val())::double precision) + -> Index Only Scan using "_hyper_1_2_chunk_two_Partitions_timeCustom_series_1_idx" on _hyper_1_2_chunk _hyper_1_2_chunk_1 + Index Cond: (series_1 > (series_val())::double precision) + -> Index Only Scan using "_hyper_1_3_chunk_two_Partitions_timeCustom_series_1_idx" on _hyper_1_3_chunk _hyper_1_3_chunk_1 + Index Cond: (series_1 > (series_val())::double precision) + -> Index Only Scan using "_hyper_1_4_chunk_two_Partitions_timeCustom_series_1_idx" on _hyper_1_4_chunk _hyper_1_4_chunk_1 + Index Cond: (series_1 > (series_val())::double precision) +(25 rows) + +-- ConstraintAwareAppend NOT applied for DELETE +EXPLAIN (costs off) +DELETE FROM "two_Partitions" +WHERE series_1 IN (SELECT series_1 FROM "two_Partitions" WHERE series_1 > series_val()); + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Delete on "two_Partitions" + Delete on _hyper_1_1_chunk "two_Partitions_2" + Delete on _hyper_1_2_chunk "two_Partitions_3" + Delete on _hyper_1_3_chunk "two_Partitions_4" + Delete on _hyper_1_4_chunk "two_Partitions_5" + -> Hash Join + Hash Cond: ("two_Partitions".series_1 = "two_Partitions_1".series_1) + -> Append + -> Seq Scan on _hyper_1_1_chunk "two_Partitions_2" + -> Seq Scan on _hyper_1_2_chunk "two_Partitions_3" + -> Seq Scan on _hyper_1_3_chunk "two_Partitions_4" + -> Seq Scan on _hyper_1_4_chunk "two_Partitions_5" + -> Hash + -> HashAggregate + Group Key: "two_Partitions_1".series_1 + -> Append + -> Index Scan using "_hyper_1_1_chunk_two_Partitions_timeCustom_series_1_idx" on _hyper_1_1_chunk "two_Partitions_6" + Index Cond: (series_1 > (series_val())::double precision) + -> Index Scan using "_hyper_1_2_chunk_two_Partitions_timeCustom_series_1_idx" on _hyper_1_2_chunk "two_Partitions_7" + Index Cond: (series_1 > (series_val())::double precision) + -> Index Scan using "_hyper_1_3_chunk_two_Partitions_timeCustom_series_1_idx" on _hyper_1_3_chunk "two_Partitions_8" + Index Cond: (series_1 > (series_val())::double precision) + -> Index Scan using "_hyper_1_4_chunk_two_Partitions_timeCustom_series_1_idx" on _hyper_1_4_chunk "two_Partitions_9" + Index Cond: (series_1 > (series_val())::double precision) +(25 rows) + +SELECT * FROM "two_Partitions" ORDER BY "timeCustom", device_id, series_0, series_1; + timeCustom | device_id | series_0 | series_1 | series_2 | series_bool +---------------------+-----------+----------+----------+----------+------------- + 1257894000000001000 | dev1 | 2.5 | 3 | | + 1257894001000000000 | dev1 | 3.5 | 4 | | + 1257894002000000000 | dev1 | 2.5 | 3 | | + 1257894002000000000 | dev1 | 5.5 | 6 | | t + 1257894002000000000 | dev1 | 5.5 | 7 | | f + 1257897600000000000 | dev1 | 4.5 | 5 | | f +(6 rows) + +BEGIN; +DELETE FROM "two_Partitions" +WHERE series_1 IN (SELECT series_1 FROM "two_Partitions" WHERE series_1 > series_val()); +SELECT * FROM "two_Partitions" ORDER BY "timeCustom", device_id, series_0, series_1; + timeCustom | device_id | series_0 | series_1 | series_2 | series_bool +---------------------+-----------+----------+----------+----------+------------- + 1257894000000001000 | dev1 | 2.5 | 3 | | + 1257894001000000000 | dev1 | 3.5 | 4 | | + 1257894002000000000 | dev1 | 2.5 | 3 | | + 1257897600000000000 | dev1 | 4.5 | 5 | | f +(4 rows) + +ROLLBACK; +BEGIN; +DELETE FROM "two_Partitions" +WHERE series_1 IN (SELECT series_1 FROM "two_Partitions" WHERE series_1 > series_val()) RETURNING "timeCustom"; + timeCustom +--------------------- + 1257894002000000000 + 1257894002000000000 +(2 rows) + +SELECT * FROM "two_Partitions" ORDER BY "timeCustom", device_id, series_0, series_1; + timeCustom | device_id | series_0 | series_1 | series_2 | series_bool +---------------------+-----------+----------+----------+----------+------------- + 1257894000000001000 | dev1 | 2.5 | 3 | | + 1257894001000000000 | dev1 | 3.5 | 4 | | + 1257894002000000000 | dev1 | 2.5 | 3 | | + 1257897600000000000 | dev1 | 4.5 | 5 | | f +(4 rows) + +ROLLBACK; diff --git a/test/expected/parallel-16.out b/test/expected/parallel-16.out new file mode 100644 index 00000000000..34f0e848a3e --- /dev/null +++ b/test/expected/parallel-16.out @@ -0,0 +1,754 @@ +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +--parallel queries require big-ish tables so collect them all here +--so that we need to generate queries only once. +-- output with analyze is not stable because it depends on worker assignment +\set PREFIX 'EXPLAIN (costs off)' +\set CHUNK1 _timescaledb_internal._hyper_1_1_chunk +\set CHUNK2 _timescaledb_internal._hyper_1_2_chunk +CREATE TABLE test (i int, j double precision, ts timestamp); +SELECT create_hypertable('test','i',chunk_time_interval:=500000); +WARNING: column type "timestamp without time zone" used for "ts" does not follow best practices +NOTICE: adding not-null constraint to column "i" + create_hypertable +------------------- + (1,public,test,t) +(1 row) + +INSERT INTO test SELECT x, x+0.1, _timescaledb_functions.to_timestamp(x*1000) FROM generate_series(0,1000000-1,10) AS x; +ANALYZE test; +ALTER TABLE :CHUNK1 SET (parallel_workers=2); +ALTER TABLE :CHUNK2 SET (parallel_workers=2); +SET work_mem TO '50MB'; +SELECT set_config(CASE WHEN current_setting('server_version_num')::int < 160000 THEN 'force_parallel_mode' ELSE 'debug_parallel_query' END,'on', false); + set_config +------------ + on +(1 row) + +SET max_parallel_workers_per_gather = 4; +SET parallel_setup_cost TO 0; +EXPLAIN (costs off) SELECT first(i, j) FROM "test"; + QUERY PLAN +--------------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 2 + -> Parallel Append + -> Partial Aggregate + -> Parallel Seq Scan on _hyper_1_1_chunk + -> Partial Aggregate + -> Parallel Seq Scan on _hyper_1_2_chunk +(8 rows) + +SELECT first(i, j) FROM "test"; + first +------- + 0 +(1 row) + +EXPLAIN (costs off) SELECT last(i, j) FROM "test"; + QUERY PLAN +--------------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 2 + -> Parallel Append + -> Partial Aggregate + -> Parallel Seq Scan on _hyper_1_1_chunk + -> Partial Aggregate + -> Parallel Seq Scan on _hyper_1_2_chunk +(8 rows) + +SELECT last(i, j) FROM "test"; + last +-------- + 999990 +(1 row) + +EXPLAIN (costs off) SELECT time_bucket('1 second', ts) sec, last(i, j) +FROM "test" +GROUP BY sec +ORDER BY sec +LIMIT 5; + QUERY PLAN +-------------------------------------------------------------------------------------- + Gather + Workers Planned: 1 + Single Copy: true + -> Limit + -> Sort + Sort Key: (time_bucket('@ 1 sec'::interval, _hyper_1_1_chunk.ts)) + -> HashAggregate + Group Key: time_bucket('@ 1 sec'::interval, _hyper_1_1_chunk.ts) + -> Result + -> Append + -> Seq Scan on _hyper_1_1_chunk + -> Seq Scan on _hyper_1_2_chunk +(12 rows) + +-- test single copy parallel plan with parallel chunk append +:PREFIX SELECT time_bucket('1 second', ts) sec, last(i, j) +FROM "test" +WHERE length(version()) > 0 +GROUP BY sec +ORDER BY sec +LIMIT 5; + QUERY PLAN +-------------------------------------------------------------------------------------- + Gather + Workers Planned: 1 + Single Copy: true + -> Limit + -> Sort + Sort Key: (time_bucket('@ 1 sec'::interval, test.ts)) + -> HashAggregate + Group Key: time_bucket('@ 1 sec'::interval, test.ts) + -> Result + -> Result + One-Time Filter: (length(version()) > 0) + -> Custom Scan (ChunkAppend) on test + Chunks excluded during startup: 0 + -> Result + One-Time Filter: (length(version()) > 0) + -> Seq Scan on _hyper_1_1_chunk + -> Result + One-Time Filter: (length(version()) > 0) + -> Seq Scan on _hyper_1_2_chunk +(19 rows) + +SELECT time_bucket('1 second', ts) sec, last(i, j) +FROM "test" +GROUP BY sec +ORDER BY sec +LIMIT 5; + sec | last +--------------------------+------ + Wed Dec 31 16:00:00 1969 | 990 + Wed Dec 31 16:00:01 1969 | 1990 + Wed Dec 31 16:00:02 1969 | 2990 + Wed Dec 31 16:00:03 1969 | 3990 + Wed Dec 31 16:00:04 1969 | 4990 +(5 rows) + +--test variants of histogram +EXPLAIN (costs off) SELECT histogram(i, 1, 1000000, 2) FROM "test"; + QUERY PLAN +--------------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 2 + -> Parallel Append + -> Partial Aggregate + -> Parallel Seq Scan on _hyper_1_1_chunk + -> Partial Aggregate + -> Parallel Seq Scan on _hyper_1_2_chunk +(8 rows) + +SELECT histogram(i, 1, 1000000, 2) FROM "test"; + histogram +------------------- + {1,50000,49999,0} +(1 row) + +EXPLAIN (costs off) SELECT histogram(i, 1,1000001,10) FROM "test"; + QUERY PLAN +--------------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 2 + -> Parallel Append + -> Partial Aggregate + -> Parallel Seq Scan on _hyper_1_1_chunk + -> Partial Aggregate + -> Parallel Seq Scan on _hyper_1_2_chunk +(8 rows) + +SELECT histogram(i, 1, 1000001, 10) FROM "test"; + histogram +------------------------------------------------------------------ + {1,10000,10000,10000,10000,10000,10000,10000,10000,10000,9999,0} +(1 row) + +EXPLAIN (costs off) SELECT histogram(i, 0,100000,5) FROM "test"; + QUERY PLAN +--------------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 2 + -> Parallel Append + -> Partial Aggregate + -> Parallel Seq Scan on _hyper_1_1_chunk + -> Partial Aggregate + -> Parallel Seq Scan on _hyper_1_2_chunk +(8 rows) + +SELECT histogram(i, 0, 100000, 5) FROM "test"; + histogram +------------------------------------ + {0,2000,2000,2000,2000,2000,90000} +(1 row) + +EXPLAIN (costs off) SELECT histogram(i, 10,100000,5) FROM "test"; + QUERY PLAN +--------------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 2 + -> Parallel Append + -> Partial Aggregate + -> Parallel Seq Scan on _hyper_1_1_chunk + -> Partial Aggregate + -> Parallel Seq Scan on _hyper_1_2_chunk +(8 rows) + +SELECT histogram(i, 10, 100000, 5) FROM "test"; + histogram +------------------------------------ + {1,2000,2000,2000,2000,1999,90000} +(1 row) + +EXPLAIN (costs off) SELECT histogram(NULL, 10,100000,5) FROM "test" WHERE i = coalesce(-1,j); + QUERY PLAN +------------------------------------------------------------------------------------ + Finalize Aggregate + -> Gather + Workers Planned: 2 + -> Parallel Append + -> Partial Aggregate + -> Parallel Seq Scan on _hyper_1_1_chunk + Filter: ((i)::double precision = '-1'::double precision) + -> Partial Aggregate + -> Parallel Seq Scan on _hyper_1_2_chunk + Filter: ((i)::double precision = '-1'::double precision) +(10 rows) + +SELECT histogram(NULL, 10,100000,5) FROM "test" WHERE i = coalesce(-1,j); + histogram +----------- + +(1 row) + +-- test parallel ChunkAppend +:PREFIX SELECT i FROM "test" WHERE length(version()) > 0; + QUERY PLAN +-------------------------------------------------------------- + Gather + Workers Planned: 1 + Single Copy: true + -> Result + One-Time Filter: (length(version()) > 0) + -> Custom Scan (ChunkAppend) on test + Chunks excluded during startup: 0 + -> Result + One-Time Filter: (length(version()) > 0) + -> Seq Scan on _hyper_1_1_chunk + -> Result + One-Time Filter: (length(version()) > 0) + -> Seq Scan on _hyper_1_2_chunk +(13 rows) + +:PREFIX SELECT count(*) FROM "test" WHERE i > 1 AND length(version()) > 0; + QUERY PLAN +--------------------------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 2 + -> Result + One-Time Filter: (length(version()) > 0) + -> Parallel Custom Scan (ChunkAppend) on test + Chunks excluded during startup: 0 + -> Partial Aggregate + -> Result + One-Time Filter: (length(version()) > 0) + -> Parallel Seq Scan on _hyper_1_1_chunk + Filter: (i > 1) + -> Partial Aggregate + -> Result + One-Time Filter: (length(version()) > 0) + -> Parallel Seq Scan on _hyper_1_2_chunk + Filter: (i > 1) +(17 rows) + +SELECT count(*) FROM "test" WHERE i > 1 AND length(version()) > 0; + count +------- + 99999 +(1 row) + +-- test parallel ChunkAppend with only work done in the parallel workers +SET parallel_leader_participation = off; +SELECT count(*) FROM "test" WHERE i > 1 AND length(version()) > 0; + count +------- + 99999 +(1 row) + +RESET parallel_leader_participation; +-- Test parallel chunk append is used +SET parallel_tuple_cost = 0; +:PREFIX SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 LIMIT 10) AS t1 LEFT JOIN (SELECT * FROM "test" WHERE i < 500000 LIMIT 10) AS t2 ON (t1.i = t2.i) ORDER BY t1.i, t2.i; + QUERY PLAN +---------------------------------------------------------------------------------------------------------- + Sort + Sort Key: test.i, _hyper_1_1_chunk.i + -> Hash Right Join + Hash Cond: (_hyper_1_1_chunk.i = test.i) + -> Limit + -> Gather + Workers Planned: 2 + -> Parallel Seq Scan on _hyper_1_1_chunk + Filter: (i < 500000) + -> Hash + -> Limit + -> Gather + Workers Planned: 2 + -> Result + One-Time Filter: (length(version()) > 0) + -> Parallel Custom Scan (ChunkAppend) on test + Chunks excluded during startup: 0 + -> Result + One-Time Filter: (length(version()) > 0) + -> Parallel Seq Scan on _hyper_1_1_chunk _hyper_1_1_chunk_1 + -> Result + One-Time Filter: (length(version()) > 0) + -> Parallel Seq Scan on _hyper_1_2_chunk +(23 rows) + +SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 LIMIT 10) AS t1 LEFT JOIN (SELECT * FROM "test" WHERE i < 500000 LIMIT 10) AS t2 ON (t1.i = t2.i) ORDER BY t1.i, t2.i; + i | j | ts | i | j | ts +----+------+-----------------------------+----+------+----------------------------- + 0 | 0.1 | Wed Dec 31 16:00:00 1969 | 0 | 0.1 | Wed Dec 31 16:00:00 1969 + 10 | 10.1 | Wed Dec 31 16:00:00.01 1969 | 10 | 10.1 | Wed Dec 31 16:00:00.01 1969 + 20 | 20.1 | Wed Dec 31 16:00:00.02 1969 | 20 | 20.1 | Wed Dec 31 16:00:00.02 1969 + 30 | 30.1 | Wed Dec 31 16:00:00.03 1969 | 30 | 30.1 | Wed Dec 31 16:00:00.03 1969 + 40 | 40.1 | Wed Dec 31 16:00:00.04 1969 | 40 | 40.1 | Wed Dec 31 16:00:00.04 1969 + 50 | 50.1 | Wed Dec 31 16:00:00.05 1969 | 50 | 50.1 | Wed Dec 31 16:00:00.05 1969 + 60 | 60.1 | Wed Dec 31 16:00:00.06 1969 | 60 | 60.1 | Wed Dec 31 16:00:00.06 1969 + 70 | 70.1 | Wed Dec 31 16:00:00.07 1969 | 70 | 70.1 | Wed Dec 31 16:00:00.07 1969 + 80 | 80.1 | Wed Dec 31 16:00:00.08 1969 | 80 | 80.1 | Wed Dec 31 16:00:00.08 1969 + 90 | 90.1 | Wed Dec 31 16:00:00.09 1969 | 90 | 90.1 | Wed Dec 31 16:00:00.09 1969 +(10 rows) + +-- Test normal chunk append can be used in a parallel worker +:PREFIX SELECT * FROM (SELECT * FROM "test" WHERE i >= 999000 ORDER BY i) AS t1 JOIN (SELECT * FROM "test" WHERE i >= 400000 ORDER BY i) AS t2 ON (TRUE) ORDER BY t1.i, t2.i LIMIT 10; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Gather + Workers Planned: 1 + Single Copy: true + -> Limit + -> Incremental Sort + Sort Key: _hyper_1_2_chunk.i, test.i + Presorted Key: _hyper_1_2_chunk.i + -> Nested Loop + -> Index Scan Backward using _hyper_1_2_chunk_test_i_idx on _hyper_1_2_chunk + Index Cond: (i >= 999000) + -> Materialize + -> Custom Scan (ChunkAppend) on test + Order: test.i + -> Index Scan Backward using _hyper_1_1_chunk_test_i_idx on _hyper_1_1_chunk + Index Cond: (i >= 400000) + -> Index Scan Backward using _hyper_1_2_chunk_test_i_idx on _hyper_1_2_chunk _hyper_1_2_chunk_1 + Index Cond: (i >= 400000) +(17 rows) + +SELECT * FROM (SELECT * FROM "test" WHERE i >= 999000 ORDER BY i) AS t1 JOIN (SELECT * FROM "test" WHERE i >= 400000 ORDER BY i) AS t2 ON (TRUE) ORDER BY t1.i, t2.i LIMIT 10; + i | j | ts | i | j | ts +--------+----------+--------------------------+--------+----------+----------------------------- + 999000 | 999000.1 | Wed Dec 31 16:16:39 1969 | 400000 | 400000.1 | Wed Dec 31 16:06:40 1969 + 999000 | 999000.1 | Wed Dec 31 16:16:39 1969 | 400010 | 400010.1 | Wed Dec 31 16:06:40.01 1969 + 999000 | 999000.1 | Wed Dec 31 16:16:39 1969 | 400020 | 400020.1 | Wed Dec 31 16:06:40.02 1969 + 999000 | 999000.1 | Wed Dec 31 16:16:39 1969 | 400030 | 400030.1 | Wed Dec 31 16:06:40.03 1969 + 999000 | 999000.1 | Wed Dec 31 16:16:39 1969 | 400040 | 400040.1 | Wed Dec 31 16:06:40.04 1969 + 999000 | 999000.1 | Wed Dec 31 16:16:39 1969 | 400050 | 400050.1 | Wed Dec 31 16:06:40.05 1969 + 999000 | 999000.1 | Wed Dec 31 16:16:39 1969 | 400060 | 400060.1 | Wed Dec 31 16:06:40.06 1969 + 999000 | 999000.1 | Wed Dec 31 16:16:39 1969 | 400070 | 400070.1 | Wed Dec 31 16:06:40.07 1969 + 999000 | 999000.1 | Wed Dec 31 16:16:39 1969 | 400080 | 400080.1 | Wed Dec 31 16:06:40.08 1969 + 999000 | 999000.1 | Wed Dec 31 16:16:39 1969 | 400090 | 400090.1 | Wed Dec 31 16:06:40.09 1969 +(10 rows) + +-- Test parallel ChunkAppend reinit +SET enable_material = off; +SET min_parallel_table_scan_size = 0; +SET min_parallel_index_scan_size = 0; +SET enable_hashjoin = 'off'; +SET enable_nestloop = 'off'; +CREATE TABLE sensor_data( + time timestamptz NOT NULL, + sensor_id integer NOT NULL); +SELECT FROM create_hypertable(relation=>'sensor_data', time_column_name=> 'time'); +-- +(1 row) + +-- Sensors 1 and 2 +INSERT INTO sensor_data +SELECT time, sensor_id +FROM +generate_series('2000-01-01 00:00:30', '2022-01-01 00:00:30', INTERVAL '3 months') AS g1(time), +generate_series(1, 2, 1) AS g2(sensor_id) +ORDER BY time; +-- Sensor 100 +INSERT INTO sensor_data +SELECT time, 100 as sensor_id +FROM +generate_series('2000-01-01 00:00:30', '2022-01-01 00:00:30', INTERVAL '1 year') AS g1(time) +ORDER BY time; +:PREFIX SELECT * FROM sensor_data AS s1 JOIN sensor_data AS s2 ON (TRUE) WHERE s1.time > '2020-01-01 00:00:30'::text::timestamptz AND s2.time > '2020-01-01 00:00:30' AND s2.time < '2021-01-01 00:00:30' AND s1.sensor_id > 50 ORDER BY s2.time, s1.time, s1.sensor_id, s2.sensor_id; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Gather Merge + Workers Planned: 4 + -> Sort + Sort Key: s2."time", s1."time", s1.sensor_id, s2.sensor_id + -> Nested Loop + -> Parallel Custom Scan (ChunkAppend) on sensor_data s1 + Chunks excluded during startup: 80 + -> Parallel Bitmap Heap Scan on _hyper_2_83_chunk s1_1 + Recheck Cond: ("time" > ('2020-01-01 00:00:30'::cstring)::timestamp with time zone) + Filter: (sensor_id > 50) + -> Bitmap Index Scan on _hyper_2_83_chunk_sensor_data_time_idx + Index Cond: ("time" > ('2020-01-01 00:00:30'::cstring)::timestamp with time zone) + -> Parallel Bitmap Heap Scan on _hyper_2_84_chunk s1_2 + Recheck Cond: ("time" > ('2020-01-01 00:00:30'::cstring)::timestamp with time zone) + Filter: (sensor_id > 50) + -> Bitmap Index Scan on _hyper_2_84_chunk_sensor_data_time_idx + Index Cond: ("time" > ('2020-01-01 00:00:30'::cstring)::timestamp with time zone) + -> Parallel Bitmap Heap Scan on _hyper_2_85_chunk s1_3 + Recheck Cond: ("time" > ('2020-01-01 00:00:30'::cstring)::timestamp with time zone) + Filter: (sensor_id > 50) + -> Bitmap Index Scan on _hyper_2_85_chunk_sensor_data_time_idx + Index Cond: ("time" > ('2020-01-01 00:00:30'::cstring)::timestamp with time zone) + -> Parallel Bitmap Heap Scan on _hyper_2_86_chunk s1_4 + Recheck Cond: ("time" > ('2020-01-01 00:00:30'::cstring)::timestamp with time zone) + Filter: (sensor_id > 50) + -> Bitmap Index Scan on _hyper_2_86_chunk_sensor_data_time_idx + Index Cond: ("time" > ('2020-01-01 00:00:30'::cstring)::timestamp with time zone) + -> Parallel Bitmap Heap Scan on _hyper_2_87_chunk s1_5 + Recheck Cond: ("time" > ('2020-01-01 00:00:30'::cstring)::timestamp with time zone) + Filter: (sensor_id > 50) + -> Bitmap Index Scan on _hyper_2_87_chunk_sensor_data_time_idx + Index Cond: ("time" > ('2020-01-01 00:00:30'::cstring)::timestamp with time zone) + -> Parallel Bitmap Heap Scan on _hyper_2_88_chunk s1_6 + Recheck Cond: ("time" > ('2020-01-01 00:00:30'::cstring)::timestamp with time zone) + Filter: (sensor_id > 50) + -> Bitmap Index Scan on _hyper_2_88_chunk_sensor_data_time_idx + Index Cond: ("time" > ('2020-01-01 00:00:30'::cstring)::timestamp with time zone) + -> Parallel Bitmap Heap Scan on _hyper_2_89_chunk s1_7 + Recheck Cond: ("time" > ('2020-01-01 00:00:30'::cstring)::timestamp with time zone) + Filter: (sensor_id > 50) + -> Bitmap Index Scan on _hyper_2_89_chunk_sensor_data_time_idx + Index Cond: ("time" > ('2020-01-01 00:00:30'::cstring)::timestamp with time zone) + -> Parallel Bitmap Heap Scan on _hyper_2_90_chunk s1_8 + Recheck Cond: ("time" > ('2020-01-01 00:00:30'::cstring)::timestamp with time zone) + Filter: (sensor_id > 50) + -> Bitmap Index Scan on _hyper_2_90_chunk_sensor_data_time_idx + Index Cond: ("time" > ('2020-01-01 00:00:30'::cstring)::timestamp with time zone) + -> Parallel Bitmap Heap Scan on _hyper_2_91_chunk s1_9 + Recheck Cond: ("time" > ('2020-01-01 00:00:30'::cstring)::timestamp with time zone) + Filter: (sensor_id > 50) + -> Bitmap Index Scan on _hyper_2_91_chunk_sensor_data_time_idx + Index Cond: ("time" > ('2020-01-01 00:00:30'::cstring)::timestamp with time zone) + -> Custom Scan (ChunkAppend) on sensor_data s2 + Order: s2."time" + -> Index Scan Backward using _hyper_2_83_chunk_sensor_data_time_idx on _hyper_2_83_chunk s2_1 + Index Cond: (("time" > 'Wed Jan 01 00:00:30 2020 PST'::timestamp with time zone) AND ("time" < 'Fri Jan 01 00:00:30 2021 PST'::timestamp with time zone)) + -> Index Scan Backward using _hyper_2_84_chunk_sensor_data_time_idx on _hyper_2_84_chunk s2_2 + Index Cond: (("time" > 'Wed Jan 01 00:00:30 2020 PST'::timestamp with time zone) AND ("time" < 'Fri Jan 01 00:00:30 2021 PST'::timestamp with time zone)) + -> Index Scan Backward using _hyper_2_85_chunk_sensor_data_time_idx on _hyper_2_85_chunk s2_3 + Index Cond: (("time" > 'Wed Jan 01 00:00:30 2020 PST'::timestamp with time zone) AND ("time" < 'Fri Jan 01 00:00:30 2021 PST'::timestamp with time zone)) + -> Index Scan Backward using _hyper_2_86_chunk_sensor_data_time_idx on _hyper_2_86_chunk s2_4 + Index Cond: (("time" > 'Wed Jan 01 00:00:30 2020 PST'::timestamp with time zone) AND ("time" < 'Fri Jan 01 00:00:30 2021 PST'::timestamp with time zone)) + -> Index Scan Backward using _hyper_2_87_chunk_sensor_data_time_idx on _hyper_2_87_chunk s2_5 + Index Cond: (("time" > 'Wed Jan 01 00:00:30 2020 PST'::timestamp with time zone) AND ("time" < 'Fri Jan 01 00:00:30 2021 PST'::timestamp with time zone)) +(64 rows) + +-- Check query result +SELECT * FROM sensor_data AS s1 JOIN sensor_data AS s2 ON (TRUE) WHERE s1.time > '2020-01-01 00:00:30'::text::timestamptz AND s2.time > '2020-01-01 00:00:30' AND s2.time < '2021-01-01 00:00:30' AND s1.sensor_id > 50 ORDER BY s2.time, s1.time, s1.sensor_id, s2.sensor_id; + time | sensor_id | time | sensor_id +------------------------------+-----------+------------------------------+----------- + Fri Jan 01 00:00:30 2021 PST | 100 | Wed Apr 01 00:00:30 2020 PDT | 1 + Fri Jan 01 00:00:30 2021 PST | 100 | Wed Apr 01 00:00:30 2020 PDT | 2 + Sat Jan 01 00:00:30 2022 PST | 100 | Wed Apr 01 00:00:30 2020 PDT | 1 + Sat Jan 01 00:00:30 2022 PST | 100 | Wed Apr 01 00:00:30 2020 PDT | 2 + Fri Jan 01 00:00:30 2021 PST | 100 | Wed Jul 01 00:00:30 2020 PDT | 1 + Fri Jan 01 00:00:30 2021 PST | 100 | Wed Jul 01 00:00:30 2020 PDT | 2 + Sat Jan 01 00:00:30 2022 PST | 100 | Wed Jul 01 00:00:30 2020 PDT | 1 + Sat Jan 01 00:00:30 2022 PST | 100 | Wed Jul 01 00:00:30 2020 PDT | 2 + Fri Jan 01 00:00:30 2021 PST | 100 | Thu Oct 01 00:00:30 2020 PDT | 1 + Fri Jan 01 00:00:30 2021 PST | 100 | Thu Oct 01 00:00:30 2020 PDT | 2 + Sat Jan 01 00:00:30 2022 PST | 100 | Thu Oct 01 00:00:30 2020 PDT | 1 + Sat Jan 01 00:00:30 2022 PST | 100 | Thu Oct 01 00:00:30 2020 PDT | 2 +(12 rows) + +-- Ensure the same result is produced if only the parallel workers have to produce them (i.e., the pstate is reinitialized properly) +SET parallel_leader_participation = off; +SELECT * FROM sensor_data AS s1 JOIN sensor_data AS s2 ON (TRUE) WHERE s1.time > '2020-01-01 00:00:30'::text::timestamptz AND s2.time > '2020-01-01 00:00:30' AND s2.time < '2021-01-01 00:00:30' AND s1.sensor_id > 50 ORDER BY s2.time, s1.time, s1.sensor_id, s2.sensor_id; + time | sensor_id | time | sensor_id +------------------------------+-----------+------------------------------+----------- + Fri Jan 01 00:00:30 2021 PST | 100 | Wed Apr 01 00:00:30 2020 PDT | 1 + Fri Jan 01 00:00:30 2021 PST | 100 | Wed Apr 01 00:00:30 2020 PDT | 2 + Sat Jan 01 00:00:30 2022 PST | 100 | Wed Apr 01 00:00:30 2020 PDT | 1 + Sat Jan 01 00:00:30 2022 PST | 100 | Wed Apr 01 00:00:30 2020 PDT | 2 + Fri Jan 01 00:00:30 2021 PST | 100 | Wed Jul 01 00:00:30 2020 PDT | 1 + Fri Jan 01 00:00:30 2021 PST | 100 | Wed Jul 01 00:00:30 2020 PDT | 2 + Sat Jan 01 00:00:30 2022 PST | 100 | Wed Jul 01 00:00:30 2020 PDT | 1 + Sat Jan 01 00:00:30 2022 PST | 100 | Wed Jul 01 00:00:30 2020 PDT | 2 + Fri Jan 01 00:00:30 2021 PST | 100 | Thu Oct 01 00:00:30 2020 PDT | 1 + Fri Jan 01 00:00:30 2021 PST | 100 | Thu Oct 01 00:00:30 2020 PDT | 2 + Sat Jan 01 00:00:30 2022 PST | 100 | Thu Oct 01 00:00:30 2020 PDT | 1 + Sat Jan 01 00:00:30 2022 PST | 100 | Thu Oct 01 00:00:30 2020 PDT | 2 +(12 rows) + +RESET parallel_leader_participation; +-- Ensure the same query result is produced by a sequencial query +SET max_parallel_workers_per_gather TO 0; +SELECT set_config(CASE WHEN current_setting('server_version_num')::int < 160000 THEN 'force_parallel_mode' ELSE 'debug_parallel_query' END,'off', false); + set_config +------------ + off +(1 row) + +SELECT * FROM sensor_data AS s1 JOIN sensor_data AS s2 ON (TRUE) WHERE s1.time > '2020-01-01 00:00:30'::text::timestamptz AND s2.time > '2020-01-01 00:00:30' AND s2.time < '2021-01-01 00:00:30' AND s1.sensor_id > 50 ORDER BY s2.time, s1.time, s1.sensor_id, s2.sensor_id; + time | sensor_id | time | sensor_id +------------------------------+-----------+------------------------------+----------- + Fri Jan 01 00:00:30 2021 PST | 100 | Wed Apr 01 00:00:30 2020 PDT | 1 + Fri Jan 01 00:00:30 2021 PST | 100 | Wed Apr 01 00:00:30 2020 PDT | 2 + Sat Jan 01 00:00:30 2022 PST | 100 | Wed Apr 01 00:00:30 2020 PDT | 1 + Sat Jan 01 00:00:30 2022 PST | 100 | Wed Apr 01 00:00:30 2020 PDT | 2 + Fri Jan 01 00:00:30 2021 PST | 100 | Wed Jul 01 00:00:30 2020 PDT | 1 + Fri Jan 01 00:00:30 2021 PST | 100 | Wed Jul 01 00:00:30 2020 PDT | 2 + Sat Jan 01 00:00:30 2022 PST | 100 | Wed Jul 01 00:00:30 2020 PDT | 1 + Sat Jan 01 00:00:30 2022 PST | 100 | Wed Jul 01 00:00:30 2020 PDT | 2 + Fri Jan 01 00:00:30 2021 PST | 100 | Thu Oct 01 00:00:30 2020 PDT | 1 + Fri Jan 01 00:00:30 2021 PST | 100 | Thu Oct 01 00:00:30 2020 PDT | 2 + Sat Jan 01 00:00:30 2022 PST | 100 | Thu Oct 01 00:00:30 2020 PDT | 1 + Sat Jan 01 00:00:30 2022 PST | 100 | Thu Oct 01 00:00:30 2020 PDT | 2 +(12 rows) + +RESET enable_material; +RESET min_parallel_table_scan_size; +RESET min_parallel_index_scan_size; +RESET enable_hashjoin; +RESET enable_nestloop; +RESET parallel_tuple_cost; +SELECT set_config(CASE WHEN current_setting('server_version_num')::int < 160000 THEN 'force_parallel_mode' ELSE 'debug_parallel_query' END,'on', false); + set_config +------------ + on +(1 row) + +-- test worker assignment +-- first chunk should have 1 worker and second chunk should have 2 +SET max_parallel_workers_per_gather TO 2; +:PREFIX SELECT count(*) FROM "test" WHERE i >= 400000 AND length(version()) > 0; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 2 + -> Result + One-Time Filter: (length(version()) > 0) + -> Parallel Custom Scan (ChunkAppend) on test + Chunks excluded during startup: 0 + -> Partial Aggregate + -> Result + One-Time Filter: (length(version()) > 0) + -> Parallel Index Only Scan using _hyper_1_1_chunk_test_i_idx on _hyper_1_1_chunk + Index Cond: (i >= 400000) + -> Partial Aggregate + -> Result + One-Time Filter: (length(version()) > 0) + -> Parallel Seq Scan on _hyper_1_2_chunk + Filter: (i >= 400000) +(17 rows) + +SELECT count(*) FROM "test" WHERE i >= 400000 AND length(version()) > 0; + count +------- + 60000 +(1 row) + +-- test worker assignment +-- first chunk should have 2 worker and second chunk should have 1 +:PREFIX SELECT count(*) FROM "test" WHERE i < 600000 AND length(version()) > 0; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 2 + -> Result + One-Time Filter: (length(version()) > 0) + -> Parallel Custom Scan (ChunkAppend) on test + Chunks excluded during startup: 0 + -> Partial Aggregate + -> Result + One-Time Filter: (length(version()) > 0) + -> Parallel Index Only Scan using _hyper_1_2_chunk_test_i_idx on _hyper_1_2_chunk + Index Cond: (i < 600000) + -> Partial Aggregate + -> Result + One-Time Filter: (length(version()) > 0) + -> Parallel Seq Scan on _hyper_1_1_chunk + Filter: (i < 600000) +(17 rows) + +SELECT count(*) FROM "test" WHERE i < 600000 AND length(version()) > 0; + count +------- + 60000 +(1 row) + +-- test ChunkAppend with # workers < # childs +SET max_parallel_workers_per_gather TO 1; +:PREFIX SELECT count(*) FROM "test" WHERE length(version()) > 0; + QUERY PLAN +--------------------------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 1 + -> Result + One-Time Filter: (length(version()) > 0) + -> Parallel Custom Scan (ChunkAppend) on test + Chunks excluded during startup: 0 + -> Partial Aggregate + -> Result + One-Time Filter: (length(version()) > 0) + -> Parallel Seq Scan on _hyper_1_1_chunk + -> Partial Aggregate + -> Result + One-Time Filter: (length(version()) > 0) + -> Parallel Seq Scan on _hyper_1_2_chunk +(15 rows) + +SELECT count(*) FROM "test" WHERE length(version()) > 0; + count +-------- + 100000 +(1 row) + +-- test ChunkAppend with # workers > # childs +SET max_parallel_workers_per_gather TO 2; +:PREFIX SELECT count(*) FROM "test" WHERE i >= 500000 AND length(version()) > 0; + QUERY PLAN +--------------------------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 2 + -> Result + One-Time Filter: (length(version()) > 0) + -> Parallel Custom Scan (ChunkAppend) on test + Chunks excluded during startup: 0 + -> Partial Aggregate + -> Result + One-Time Filter: (length(version()) > 0) + -> Parallel Seq Scan on _hyper_1_2_chunk + Filter: (i >= 500000) +(12 rows) + +SELECT count(*) FROM "test" WHERE i >= 500000 AND length(version()) > 0; + count +------- + 50000 +(1 row) + +RESET max_parallel_workers_per_gather; +-- test partial and non-partial plans +-- these will not be parallel on PG < 11 +ALTER TABLE :CHUNK1 SET (parallel_workers=0); +ALTER TABLE :CHUNK2 SET (parallel_workers=2); +:PREFIX SELECT count(*) FROM "test" WHERE i > 400000 AND length(version()) > 0; + QUERY PLAN +----------------------------------------------------------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 2 + -> Result + One-Time Filter: (length(version()) > 0) + -> Parallel Custom Scan (ChunkAppend) on test + Chunks excluded during startup: 0 + -> Partial Aggregate + -> Result + One-Time Filter: (length(version()) > 0) + -> Index Only Scan using _hyper_1_1_chunk_test_i_idx on _hyper_1_1_chunk + Index Cond: (i > 400000) + -> Partial Aggregate + -> Result + One-Time Filter: (length(version()) > 0) + -> Parallel Seq Scan on _hyper_1_2_chunk + Filter: (i > 400000) +(17 rows) + +ALTER TABLE :CHUNK1 SET (parallel_workers=2); +ALTER TABLE :CHUNK2 SET (parallel_workers=0); +:PREFIX SELECT count(*) FROM "test" WHERE i < 600000 AND length(version()) > 0; + QUERY PLAN +----------------------------------------------------------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 2 + -> Result + One-Time Filter: (length(version()) > 0) + -> Parallel Custom Scan (ChunkAppend) on test + Chunks excluded during startup: 0 + -> Partial Aggregate + -> Result + One-Time Filter: (length(version()) > 0) + -> Index Only Scan using _hyper_1_2_chunk_test_i_idx on _hyper_1_2_chunk + Index Cond: (i < 600000) + -> Partial Aggregate + -> Result + One-Time Filter: (length(version()) > 0) + -> Parallel Seq Scan on _hyper_1_1_chunk + Filter: (i < 600000) +(17 rows) + +ALTER TABLE :CHUNK1 RESET (parallel_workers); +ALTER TABLE :CHUNK2 RESET (parallel_workers); +-- now() is not marked parallel safe in PostgreSQL < 12 so using now() +-- in a query will prevent parallelism but CURRENT_TIMESTAMP and +-- transaction_timestamp() are marked parallel safe +:PREFIX SELECT i FROM "test" WHERE ts < CURRENT_TIMESTAMP; + QUERY PLAN +------------------------------------------------ + Gather + Workers Planned: 1 + Single Copy: true + -> Custom Scan (ChunkAppend) on test + Chunks excluded during startup: 0 + -> Seq Scan on _hyper_1_1_chunk + Filter: (ts < CURRENT_TIMESTAMP) + -> Seq Scan on _hyper_1_2_chunk + Filter: (ts < CURRENT_TIMESTAMP) +(9 rows) + +:PREFIX SELECT i FROM "test" WHERE ts < transaction_timestamp(); + QUERY PLAN +------------------------------------------------------ + Gather + Workers Planned: 1 + Single Copy: true + -> Custom Scan (ChunkAppend) on test + Chunks excluded during startup: 0 + -> Seq Scan on _hyper_1_1_chunk + Filter: (ts < transaction_timestamp()) + -> Seq Scan on _hyper_1_2_chunk + Filter: (ts < transaction_timestamp()) +(9 rows) + +-- this won't be parallel query because now() is parallel restricted in PG < 12 +:PREFIX SELECT i FROM "test" WHERE ts < now(); + QUERY PLAN +------------------------------------------- + Gather + Workers Planned: 1 + Single Copy: true + -> Custom Scan (ChunkAppend) on test + Chunks excluded during startup: 0 + -> Seq Scan on _hyper_1_1_chunk + Filter: (ts < now()) + -> Seq Scan on _hyper_1_2_chunk + Filter: (ts < now()) +(9 rows) + diff --git a/test/expected/plan_hashagg-16.out b/test/expected/plan_hashagg-16.out new file mode 100644 index 00000000000..8d1c9ec5518 --- /dev/null +++ b/test/expected/plan_hashagg-16.out @@ -0,0 +1,330 @@ +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +\set PREFIX 'EXPLAIN (costs off) ' +\ir include/plan_hashagg_load.sql +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +CREATE TABLE metric (id SERIAL PRIMARY KEY, value INT); +CREATE TABLE hyper(time TIMESTAMP NOT NULL, time_int BIGINT, time_broken DATE, metricid int, value double precision); +CREATE TABLE regular(time TIMESTAMP NOT NULL, time_int BIGINT, time_date DATE, metricid int, value double precision); +SELECT create_hypertable('hyper', 'time', chunk_time_interval => interval '20 day', create_default_indexes=>FALSE); +psql:include/plan_hashagg_load.sql:9: WARNING: column type "timestamp without time zone" used for "time" does not follow best practices + create_hypertable +-------------------- + (1,public,hyper,t) +(1 row) + +ALTER TABLE hyper +DROP COLUMN time_broken, +ADD COLUMN time_date DATE; +INSERT INTO metric(value) SELECT random()*100 FROM generate_series(0,10); +INSERT INTO hyper SELECT t, EXTRACT(EPOCH FROM t), (EXTRACT(EPOCH FROM t)::int % 10)+1, 1.0, t::date FROM generate_series('2001-01-01', '2001-01-10', INTERVAL '1 second') t; +INSERT INTO regular(time, time_int, time_date, metricid, value) + SELECT t, EXTRACT(EPOCH FROM t), t::date, (EXTRACT(EPOCH FROM t)::int % 10) + 1, 1.0 FROM generate_series('2001-01-01', '2001-01-02', INTERVAL '1 second') t; +--test some queries before analyze; +EXPLAIN (costs off) SELECT time_bucket('1 minute', time) AS MetricMinuteTs, AVG(value) as avg +FROM hyper +WHERE time >= '2001-01-04T00:00:00' AND time <= '2001-01-05T01:00:00' +GROUP BY MetricMinuteTs +ORDER BY MetricMinuteTs DESC; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Finalize GroupAggregate + Group Key: (time_bucket('@ 1 min'::interval, _hyper_1_1_chunk."time")) + -> Gather Merge + Workers Planned: 2 + -> Partial GroupAggregate + Group Key: (time_bucket('@ 1 min'::interval, _hyper_1_1_chunk."time")) + -> Sort + Sort Key: (time_bucket('@ 1 min'::interval, _hyper_1_1_chunk."time")) DESC + -> Result + -> Parallel Seq Scan on _hyper_1_1_chunk + Filter: (("time" >= 'Thu Jan 04 00:00:00 2001'::timestamp without time zone) AND ("time" <= 'Fri Jan 05 01:00:00 2001'::timestamp without time zone)) +(11 rows) + +EXPLAIN (costs off) SELECT date_trunc('minute', time) AS MetricMinuteTs, AVG(value) as avg +FROM hyper +WHERE time >= '2001-01-04T00:00:00' AND time <= '2001-01-05T01:00:00' +GROUP BY MetricMinuteTs +ORDER BY MetricMinuteTs DESC; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Finalize GroupAggregate + Group Key: (date_trunc('minute'::text, _hyper_1_1_chunk."time")) + -> Gather Merge + Workers Planned: 2 + -> Partial GroupAggregate + Group Key: (date_trunc('minute'::text, _hyper_1_1_chunk."time")) + -> Sort + Sort Key: (date_trunc('minute'::text, _hyper_1_1_chunk."time")) DESC + -> Result + -> Parallel Seq Scan on _hyper_1_1_chunk + Filter: (("time" >= 'Thu Jan 04 00:00:00 2001'::timestamp without time zone) AND ("time" <= 'Fri Jan 05 01:00:00 2001'::timestamp without time zone)) +(11 rows) + +-- Test partitioning function on an open (time) dimension +CREATE OR REPLACE FUNCTION unix_to_timestamp(unixtime float8) + RETURNS TIMESTAMPTZ LANGUAGE SQL IMMUTABLE AS +$BODY$ + SELECT to_timestamp(unixtime); +$BODY$; +CREATE TABLE hyper_timefunc(time float8 NOT NULL, metricid int, VALUE double precision, time_date DATE); +SELECT create_hypertable('hyper_timefunc', 'time', chunk_time_interval => interval '20 day', create_default_indexes=>FALSE, time_partitioning_func => 'unix_to_timestamp'); + create_hypertable +----------------------------- + (2,public,hyper_timefunc,t) +(1 row) + +INSERT INTO hyper_timefunc SELECT time_int, metricid, VALUE, time_date FROM hyper; +ANALYZE metric; +ANALYZE hyper; +ANALYZE regular; +ANALYZE hyper_timefunc; +\ir include/plan_hashagg_query.sql +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +:PREFIX SELECT time_bucket('1 minute', time) AS MetricMinuteTs, AVG(value) as avg +FROM hyper +WHERE time >= '2001-01-04T00:00:00' AND time <= '2001-01-05T01:00:00' +GROUP BY MetricMinuteTs +ORDER BY MetricMinuteTs DESC; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: (time_bucket('@ 1 min'::interval, _hyper_1_1_chunk."time")) DESC + -> HashAggregate + Group Key: time_bucket('@ 1 min'::interval, _hyper_1_1_chunk."time") + -> Result + -> Seq Scan on _hyper_1_1_chunk + Filter: (("time" >= 'Thu Jan 04 00:00:00 2001'::timestamp without time zone) AND ("time" <= 'Fri Jan 05 01:00:00 2001'::timestamp without time zone)) +(7 rows) + +:PREFIX SELECT time_bucket('1 hour', time) AS MetricMinuteTs, metricid, AVG(value) as avg +FROM hyper +WHERE time >= '2001-01-04T00:00:00' AND time <= '2001-01-05T01:00:00' +GROUP BY MetricMinuteTs, metricid +ORDER BY MetricMinuteTs DESC, metricid; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: (time_bucket('@ 1 hour'::interval, _hyper_1_1_chunk."time")) DESC, _hyper_1_1_chunk.metricid + -> HashAggregate + Group Key: time_bucket('@ 1 hour'::interval, _hyper_1_1_chunk."time"), _hyper_1_1_chunk.metricid + -> Result + -> Seq Scan on _hyper_1_1_chunk + Filter: (("time" >= 'Thu Jan 04 00:00:00 2001'::timestamp without time zone) AND ("time" <= 'Fri Jan 05 01:00:00 2001'::timestamp without time zone)) +(7 rows) + +--should be too many groups will not hashaggregate +:PREFIX SELECT time_bucket('1 second', time) AS MetricMinuteTs, metricid, AVG(value) as avg +FROM hyper +WHERE time >= '2001-01-04T00:00:00' AND time <= '2001-01-05T01:00:00' +GROUP BY MetricMinuteTs, metricid +ORDER BY MetricMinuteTs DESC, metricid; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Finalize GroupAggregate + Group Key: (time_bucket('@ 1 sec'::interval, _hyper_1_1_chunk."time")), _hyper_1_1_chunk.metricid + -> Gather Merge + Workers Planned: 2 + -> Partial GroupAggregate + Group Key: (time_bucket('@ 1 sec'::interval, _hyper_1_1_chunk."time")), _hyper_1_1_chunk.metricid + -> Sort + Sort Key: (time_bucket('@ 1 sec'::interval, _hyper_1_1_chunk."time")) DESC, _hyper_1_1_chunk.metricid + -> Result + -> Parallel Seq Scan on _hyper_1_1_chunk + Filter: (("time" >= 'Thu Jan 04 00:00:00 2001'::timestamp without time zone) AND ("time" <= 'Fri Jan 05 01:00:00 2001'::timestamp without time zone)) +(11 rows) + +:PREFIX SELECT time_bucket('1 minute', time, INTERVAL '30 seconds') AS MetricMinuteTs, AVG(value) as avg +FROM hyper +WHERE time >= '2001-01-04T00:00:00' AND time <= '2001-01-05T01:00:00' +GROUP BY MetricMinuteTs +ORDER BY MetricMinuteTs DESC; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: (time_bucket('@ 1 min'::interval, _hyper_1_1_chunk."time", '@ 30 secs'::interval)) DESC + -> HashAggregate + Group Key: time_bucket('@ 1 min'::interval, _hyper_1_1_chunk."time", '@ 30 secs'::interval) + -> Result + -> Seq Scan on _hyper_1_1_chunk + Filter: (("time" >= 'Thu Jan 04 00:00:00 2001'::timestamp without time zone) AND ("time" <= 'Fri Jan 05 01:00:00 2001'::timestamp without time zone)) +(7 rows) + +:PREFIX SELECT time_bucket(60, time_int) AS MetricMinuteTs, AVG(value) as avg +FROM hyper +WHERE time >= '2001-01-04T00:00:00' AND time <= '2001-01-05T01:00:00' +GROUP BY MetricMinuteTs +ORDER BY MetricMinuteTs DESC; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: (time_bucket('60'::bigint, _hyper_1_1_chunk.time_int)) DESC + -> HashAggregate + Group Key: time_bucket('60'::bigint, _hyper_1_1_chunk.time_int) + -> Result + -> Seq Scan on _hyper_1_1_chunk + Filter: (("time" >= 'Thu Jan 04 00:00:00 2001'::timestamp without time zone) AND ("time" <= 'Fri Jan 05 01:00:00 2001'::timestamp without time zone)) +(7 rows) + +:PREFIX SELECT time_bucket(60, time_int, 10) AS MetricMinuteTs, AVG(value) as avg +FROM hyper +WHERE time >= '2001-01-04T00:00:00' AND time <= '2001-01-05T01:00:00' +GROUP BY MetricMinuteTs +ORDER BY MetricMinuteTs DESC; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: (time_bucket('60'::bigint, _hyper_1_1_chunk.time_int, '10'::bigint)) DESC + -> HashAggregate + Group Key: time_bucket('60'::bigint, _hyper_1_1_chunk.time_int, '10'::bigint) + -> Result + -> Seq Scan on _hyper_1_1_chunk + Filter: (("time" >= 'Thu Jan 04 00:00:00 2001'::timestamp without time zone) AND ("time" <= 'Fri Jan 05 01:00:00 2001'::timestamp without time zone)) +(7 rows) + +:PREFIX SELECT time_bucket('1 day', time_date) AS MetricMinuteTs, AVG(value) as avg +FROM hyper +WHERE time >= '2001-01-04T00:00:00' AND time <= '2001-01-05T01:00:00' +GROUP BY MetricMinuteTs +ORDER BY MetricMinuteTs DESC; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Finalize GroupAggregate + Group Key: (time_bucket('@ 1 day'::interval, _hyper_1_1_chunk.time_date)) + -> Gather Merge + Workers Planned: 2 + -> Sort + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_1_1_chunk.time_date)) DESC + -> Partial HashAggregate + Group Key: time_bucket('@ 1 day'::interval, _hyper_1_1_chunk.time_date) + -> Result + -> Parallel Seq Scan on _hyper_1_1_chunk + Filter: (("time" >= 'Thu Jan 04 00:00:00 2001'::timestamp without time zone) AND ("time" <= 'Fri Jan 05 01:00:00 2001'::timestamp without time zone)) +(11 rows) + +:PREFIX SELECT date_trunc('minute', time) AS MetricMinuteTs, AVG(value) as avg +FROM hyper +WHERE time >= '2001-01-04T00:00:00' AND time <= '2001-01-05T01:00:00' +GROUP BY MetricMinuteTs +ORDER BY MetricMinuteTs DESC; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: (date_trunc('minute'::text, _hyper_1_1_chunk."time")) DESC + -> HashAggregate + Group Key: date_trunc('minute'::text, _hyper_1_1_chunk."time") + -> Result + -> Seq Scan on _hyper_1_1_chunk + Filter: (("time" >= 'Thu Jan 04 00:00:00 2001'::timestamp without time zone) AND ("time" <= 'Fri Jan 05 01:00:00 2001'::timestamp without time zone)) +(7 rows) + +\set ON_ERROR_STOP 0 +--can't optimize invalid time unit +:PREFIX SELECT date_trunc('invalid', time) AS MetricMinuteTs, AVG(value) as avg +FROM hyper +WHERE time >= '2001-01-04T00:00:00' AND time <= '2001-01-05T01:00:00' +GROUP BY MetricMinuteTs +ORDER BY MetricMinuteTs DESC; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Finalize GroupAggregate + Group Key: (date_trunc('invalid'::text, _hyper_1_1_chunk."time")) + -> Gather Merge + Workers Planned: 2 + -> Partial GroupAggregate + Group Key: (date_trunc('invalid'::text, _hyper_1_1_chunk."time")) + -> Sort + Sort Key: (date_trunc('invalid'::text, _hyper_1_1_chunk."time")) DESC + -> Result + -> Parallel Seq Scan on _hyper_1_1_chunk + Filter: (("time" >= 'Thu Jan 04 00:00:00 2001'::timestamp without time zone) AND ("time" <= 'Fri Jan 05 01:00:00 2001'::timestamp without time zone)) +(11 rows) + +\set ON_ERROR_STOP 1 +:PREFIX SELECT date_trunc('day', time_date) AS MetricMinuteTs, AVG(value) as avg +FROM hyper +WHERE time >= '2001-01-04T00:00:00' AND time <= '2001-01-05T01:00:00' +GROUP BY MetricMinuteTs +ORDER BY MetricMinuteTs DESC; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Finalize GroupAggregate + Group Key: (date_trunc('day'::text, (_hyper_1_1_chunk.time_date)::timestamp with time zone)) + -> Gather Merge + Workers Planned: 2 + -> Sort + Sort Key: (date_trunc('day'::text, (_hyper_1_1_chunk.time_date)::timestamp with time zone)) DESC + -> Partial HashAggregate + Group Key: date_trunc('day'::text, (_hyper_1_1_chunk.time_date)::timestamp with time zone) + -> Result + -> Parallel Seq Scan on _hyper_1_1_chunk + Filter: (("time" >= 'Thu Jan 04 00:00:00 2001'::timestamp without time zone) AND ("time" <= 'Fri Jan 05 01:00:00 2001'::timestamp without time zone)) +(11 rows) + +--joins +--with hypertable, optimize +:PREFIX SELECT time_bucket(3600, time_int, 10) AS MetricMinuteTs, metric.value, AVG(hyper.value) as avg +FROM hyper +JOIN metric ON (hyper.metricid = metric.id) +WHERE time >= '2001-01-04T00:00:00' AND time <= '2001-01-05T01:00:00' +GROUP BY MetricMinuteTs, metric.id +ORDER BY MetricMinuteTs DESC, metric.id; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: (time_bucket('3600'::bigint, _hyper_1_1_chunk.time_int, '10'::bigint)) DESC, metric.id + -> HashAggregate + Group Key: time_bucket('3600'::bigint, _hyper_1_1_chunk.time_int, '10'::bigint), metric.id + -> Hash Join + Hash Cond: (_hyper_1_1_chunk.metricid = metric.id) + -> Seq Scan on _hyper_1_1_chunk + Filter: (("time" >= 'Thu Jan 04 00:00:00 2001'::timestamp without time zone) AND ("time" <= 'Fri Jan 05 01:00:00 2001'::timestamp without time zone)) + -> Hash + -> Seq Scan on metric +(10 rows) + +--no hypertable involved, no optimization +:PREFIX SELECT time_bucket(3600, time_int, 10) AS MetricMinuteTs, metric.value, AVG(regular.value) as avg +FROM regular +JOIN metric ON (regular.metricid = metric.id) +WHERE time >= '2001-01-04T00:00:00' AND time <= '2001-01-05T01:00:00' +GROUP BY MetricMinuteTs, metric.id +ORDER BY MetricMinuteTs DESC, metric.id; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Group Key: (time_bucket('3600'::bigint, regular.time_int, '10'::bigint)), metric.id + -> Sort + Sort Key: (time_bucket('3600'::bigint, regular.time_int, '10'::bigint)) DESC, metric.id + -> Nested Loop + Join Filter: (metric.id = regular.metricid) + -> Seq Scan on regular + Filter: (("time" >= 'Thu Jan 04 00:00:00 2001'::timestamp without time zone) AND ("time" <= 'Fri Jan 05 01:00:00 2001'::timestamp without time zone)) + -> Seq Scan on metric +(9 rows) + +-- Try with time partitioning function. Currently not optimized for hash aggregates +:PREFIX SELECT time_bucket('1 minute', unix_to_timestamp(time)) AS MetricMinuteTs, AVG(value) as avg +FROM hyper_timefunc +WHERE unix_to_timestamp(time) >= '2001-01-04T00:00:00' AND unix_to_timestamp(time) <= '2001-01-05T01:00:00' +GROUP BY MetricMinuteTs +ORDER BY MetricMinuteTs DESC; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Group Key: (time_bucket('@ 1 min'::interval, to_timestamp(_hyper_2_2_chunk."time"))) + -> Sort + Sort Key: (time_bucket('@ 1 min'::interval, to_timestamp(_hyper_2_2_chunk."time"))) DESC + -> Result + -> Seq Scan on _hyper_2_2_chunk + Filter: ((to_timestamp("time") >= 'Thu Jan 04 00:00:00 2001 PST'::timestamp with time zone) AND (to_timestamp("time") <= 'Fri Jan 05 01:00:00 2001 PST'::timestamp with time zone)) +(7 rows) + +\set ECHO none +psql:include/plan_hashagg_query.sql:60: ERROR: unit "invalid" not recognized for type timestamp without time zone +psql:include/plan_hashagg_query.sql:60: ERROR: unit "invalid" not recognized for type timestamp without time zone diff --git a/test/expected/query-16.out b/test/expected/query-16.out new file mode 100644 index 00000000000..418da001ea4 --- /dev/null +++ b/test/expected/query-16.out @@ -0,0 +1,401 @@ +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +\set TEST_BASE_NAME query +SELECT format('include/%s_load.sql', :'TEST_BASE_NAME') as "TEST_LOAD_NAME", + format('include/%s_query.sql', :'TEST_BASE_NAME') as "TEST_QUERY_NAME", + format('%s/results/%s_results_optimized.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') as "TEST_RESULTS_OPTIMIZED", + format('%s/results/%s_results_unoptimized.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') as "TEST_RESULTS_UNOPTIMIZED" +\gset +SELECT format('\! diff -u --label "Unoptimized result" --label "Optimized result" %s %s', :'TEST_RESULTS_UNOPTIMIZED', :'TEST_RESULTS_OPTIMIZED') as "DIFF_CMD" +\gset +\set PREFIX 'EXPLAIN (costs OFF)' +\ir :TEST_LOAD_NAME +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +CREATE TABLE PUBLIC.hyper_1 ( + time TIMESTAMP NOT NULL, + series_0 DOUBLE PRECISION NULL, + series_1 DOUBLE PRECISION NULL, + series_2 DOUBLE PRECISION NULL +); +CREATE INDEX "time_plain" ON PUBLIC.hyper_1 (time DESC, series_0); +SELECT * FROM create_hypertable('"public"."hyper_1"'::regclass, 'time'::name, number_partitions => 1, create_default_indexes=>false); +psql:include/query_load.sql:13: WARNING: column type "timestamp without time zone" used for "time" does not follow best practices + hypertable_id | schema_name | table_name | created +---------------+-------------+------------+--------- + 1 | public | hyper_1 | t +(1 row) + +INSERT INTO hyper_1 SELECT to_timestamp(ser), ser, ser+10000, sqrt(ser::numeric) FROM generate_series(0,10000) ser; +INSERT INTO hyper_1 SELECT to_timestamp(ser), ser, ser+10000, sqrt(ser::numeric) FROM generate_series(10001,20000) ser; +CREATE TABLE PUBLIC.hyper_1_tz ( + time TIMESTAMPTZ NOT NULL, + series_0 DOUBLE PRECISION NULL, + series_1 DOUBLE PRECISION NULL, + series_2 DOUBLE PRECISION NULL +); +CREATE INDEX "time_plain_tz" ON PUBLIC.hyper_1_tz (time DESC, series_0); +SELECT * FROM create_hypertable('"public"."hyper_1_tz"'::regclass, 'time'::name, number_partitions => 1, create_default_indexes=>false); + hypertable_id | schema_name | table_name | created +---------------+-------------+------------+--------- + 2 | public | hyper_1_tz | t +(1 row) + +INSERT INTO hyper_1_tz SELECT to_timestamp(ser), ser, ser+10000, sqrt(ser::numeric) FROM generate_series(0,10000) ser; +INSERT INTO hyper_1_tz SELECT to_timestamp(ser), ser, ser+10000, sqrt(ser::numeric) FROM generate_series(10001,20000) ser; +CREATE TABLE PUBLIC.hyper_1_int ( + time int NOT NULL, + series_0 DOUBLE PRECISION NULL, + series_1 DOUBLE PRECISION NULL, + series_2 DOUBLE PRECISION NULL +); +CREATE INDEX "time_plain_int" ON PUBLIC.hyper_1_int (time DESC, series_0); +SELECT * FROM create_hypertable('"public"."hyper_1_int"'::regclass, 'time'::name, number_partitions => 1, chunk_time_interval=>10000, create_default_indexes=>FALSE); + hypertable_id | schema_name | table_name | created +---------------+-------------+-------------+--------- + 3 | public | hyper_1_int | t +(1 row) + +INSERT INTO hyper_1_int SELECT ser, ser, ser+10000, sqrt(ser::numeric) FROM generate_series(0,10000) ser; +INSERT INTO hyper_1_int SELECT ser, ser, ser+10000, sqrt(ser::numeric) FROM generate_series(10001,20000) ser; +CREATE TABLE PUBLIC.hyper_1_date ( + time date NOT NULL, + series_0 DOUBLE PRECISION NULL, + series_1 DOUBLE PRECISION NULL, + series_2 DOUBLE PRECISION NULL +); +CREATE INDEX "time_plain_date" ON PUBLIC.hyper_1_date (time DESC, series_0); +SELECT * FROM create_hypertable('"public"."hyper_1_date"'::regclass, 'time'::name, number_partitions => 1, chunk_time_interval=>86400000000, create_default_indexes=>FALSE); + hypertable_id | schema_name | table_name | created +---------------+-------------+--------------+--------- + 4 | public | hyper_1_date | t +(1 row) + +INSERT INTO hyper_1_date SELECT to_timestamp(ser)::date, ser, ser+10000, sqrt(ser::numeric) FROM generate_series(0,10000) ser; +INSERT INTO hyper_1_date SELECT to_timestamp(ser)::date, ser, ser+10000, sqrt(ser::numeric) FROM generate_series(10001,20000) ser; +--below needed to create enough unique dates to trigger an index scan +INSERT INTO hyper_1_date SELECT to_timestamp(ser*100)::date, ser, ser+10000, sqrt(ser::numeric) FROM generate_series(10001,20000) ser; +CREATE TABLE PUBLIC.plain_table ( + time TIMESTAMPTZ NOT NULL, + series_0 DOUBLE PRECISION NULL, + series_1 DOUBLE PRECISION NULL, + series_2 DOUBLE PRECISION NULL +); +CREATE INDEX "time_plain_plain_table" ON PUBLIC.plain_table (time DESC, series_0); +INSERT INTO plain_table SELECT to_timestamp(ser), ser, ser+10000, sqrt(ser::numeric) FROM generate_series(0,10000) ser; +INSERT INTO plain_table SELECT to_timestamp(ser), ser, ser+10000, sqrt(ser::numeric) FROM generate_series(10001,20000) ser; +-- Table with a time partitioning function +CREATE TABLE PUBLIC.hyper_timefunc ( + time float8 NOT NULL, + series_0 DOUBLE PRECISION NULL, + series_1 DOUBLE PRECISION NULL, + series_2 DOUBLE PRECISION NULL +); +CREATE OR REPLACE FUNCTION unix_to_timestamp(unixtime float8) + RETURNS TIMESTAMPTZ LANGUAGE SQL IMMUTABLE AS +$BODY$ + SELECT to_timestamp(unixtime); +$BODY$; +CREATE INDEX "time_plain_timefunc" ON PUBLIC.hyper_timefunc (to_timestamp(time) DESC, series_0); +SELECT * FROM create_hypertable('"public"."hyper_timefunc"'::regclass, 'time'::name, number_partitions => 1, create_default_indexes=>false, time_partitioning_func => 'unix_to_timestamp'); + hypertable_id | schema_name | table_name | created +---------------+-------------+----------------+--------- + 5 | public | hyper_timefunc | t +(1 row) + +INSERT INTO hyper_timefunc SELECT ser, ser, ser+10000, sqrt(ser::numeric) FROM generate_series(0,10000) ser; +INSERT INTO hyper_timefunc SELECT ser, ser, ser+10000, sqrt(ser::numeric) FROM generate_series(10001,20000) ser; +ANALYZE plain_table; +ANALYZE hyper_timefunc; +ANALYZE hyper_1; +ANALYZE hyper_1_tz; +ANALYZE hyper_1_int; +ANALYZE hyper_1_date; +\ir :TEST_QUERY_NAME +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +SHOW timescaledb.enable_optimizations; + timescaledb.enable_optimizations +---------------------------------- + on +(1 row) + +--non-aggregates use MergeAppend in both optimized and non-optimized +:PREFIX SELECT * FROM hyper_1 ORDER BY "time" DESC limit 2; + QUERY PLAN +------------------------------------------------------------------------ + Limit + -> Index Scan using _hyper_1_1_chunk_time_plain on _hyper_1_1_chunk +(2 rows) + +:PREFIX SELECT * FROM hyper_timefunc ORDER BY unix_to_timestamp("time") DESC limit 2; + QUERY PLAN +----------------------------------------------------------------------------------- + Limit + -> Index Scan using _hyper_5_19_chunk_time_plain_timefunc on _hyper_5_19_chunk +(2 rows) + +--Aggregates use MergeAppend only in optimized +:PREFIX SELECT date_trunc('minute', time) t, avg(series_0), min(series_1), avg(series_2) FROM hyper_1 GROUP BY t ORDER BY t DESC limit 2; + QUERY PLAN +------------------------------------------------------------------------------------ + Limit + -> GroupAggregate + Group Key: (date_trunc('minute'::text, _hyper_1_1_chunk."time")) + -> Result + -> Index Scan using _hyper_1_1_chunk_time_plain on _hyper_1_1_chunk +(5 rows) + +:PREFIX SELECT date_trunc('minute', time) t, avg(series_0), min(series_1), avg(series_2) FROM hyper_1_date GROUP BY t ORDER BY t DESC limit 2; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------- + Limit + -> GroupAggregate + Group Key: (date_trunc('minute'::text, (_hyper_4_6_chunk."time")::timestamp with time zone)) + -> Result + -> Merge Append + Sort Key: (date_trunc('minute'::text, (_hyper_4_6_chunk."time")::timestamp with time zone)) DESC + -> Index Scan using _hyper_4_6_chunk_time_plain_date on _hyper_4_6_chunk + -> Index Scan using _hyper_4_7_chunk_time_plain_date on _hyper_4_7_chunk + -> Index Scan using _hyper_4_8_chunk_time_plain_date on _hyper_4_8_chunk + -> Index Scan using _hyper_4_9_chunk_time_plain_date on _hyper_4_9_chunk + -> Index Scan using _hyper_4_10_chunk_time_plain_date on _hyper_4_10_chunk + -> Index Scan using _hyper_4_11_chunk_time_plain_date on _hyper_4_11_chunk + -> Index Scan using _hyper_4_12_chunk_time_plain_date on _hyper_4_12_chunk + -> Index Scan using _hyper_4_13_chunk_time_plain_date on _hyper_4_13_chunk + -> Index Scan using _hyper_4_14_chunk_time_plain_date on _hyper_4_14_chunk + -> Index Scan using _hyper_4_15_chunk_time_plain_date on _hyper_4_15_chunk + -> Index Scan using _hyper_4_16_chunk_time_plain_date on _hyper_4_16_chunk + -> Index Scan using _hyper_4_17_chunk_time_plain_date on _hyper_4_17_chunk + -> Index Scan using _hyper_4_18_chunk_time_plain_date on _hyper_4_18_chunk +(19 rows) + +--the minute and second results should be diff +:PREFIX SELECT date_trunc('minute', time) t, avg(series_0), min(series_1), avg(series_2) FROM hyper_1 GROUP BY t ORDER BY t DESC limit 2; + QUERY PLAN +------------------------------------------------------------------------------------ + Limit + -> GroupAggregate + Group Key: (date_trunc('minute'::text, _hyper_1_1_chunk."time")) + -> Result + -> Index Scan using _hyper_1_1_chunk_time_plain on _hyper_1_1_chunk +(5 rows) + +:PREFIX SELECT date_trunc('second', time) t, avg(series_0), min(series_1), avg(series_2) FROM hyper_1 GROUP BY t ORDER BY t DESC limit 2; + QUERY PLAN +------------------------------------------------------------------------------------ + Limit + -> GroupAggregate + Group Key: (date_trunc('second'::text, _hyper_1_1_chunk."time")) + -> Result + -> Index Scan using _hyper_1_1_chunk_time_plain on _hyper_1_1_chunk +(5 rows) + +--test that when index on time used by constraint, still works correctly +:PREFIX +SELECT date_trunc('minute', time) t, avg(series_0), min(series_1), avg(series_2) +FROM hyper_1 +WHERE time < to_timestamp(900) +GROUP BY t +ORDER BY t DESC +LIMIT 2; + QUERY PLAN +----------------------------------------------------------------------------------------------------------- + Limit + -> GroupAggregate + Group Key: (date_trunc('minute'::text, hyper_1."time")) + -> Result + -> Custom Scan (ChunkAppend) on hyper_1 + Order: date_trunc('minute'::text, hyper_1."time") DESC + Chunks excluded during startup: 0 + -> Index Scan using _hyper_1_1_chunk_time_plain on _hyper_1_1_chunk + Index Cond: ("time" < 'Wed Dec 31 16:15:00 1969 PST'::timestamp with time zone) +(9 rows) + +--test on table with time partitioning function. Currently not +--optimized to use index for ordering since the index is an expression +--on time (e.g., timefunc(time)), and we currently don't handle that +--case. +:PREFIX +SELECT date_trunc('minute', to_timestamp(time)) t, avg(series_0), min(series_1), avg(series_2) +FROM hyper_timefunc +WHERE to_timestamp(time) < to_timestamp(900) +GROUP BY t +ORDER BY t DESC +LIMIT 2; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------- + Limit + -> Sort + Sort Key: (date_trunc('minute'::text, to_timestamp(_hyper_5_19_chunk."time"))) DESC + -> HashAggregate + Group Key: date_trunc('minute'::text, to_timestamp(_hyper_5_19_chunk."time")) + -> Result + -> Index Scan using _hyper_5_19_chunk_time_plain_timefunc on _hyper_5_19_chunk + Index Cond: (to_timestamp("time") < 'Wed Dec 31 16:15:00 1969 PST'::timestamp with time zone) +(8 rows) + +BEGIN; + --test that still works with an expression index on data_trunc. + DROP INDEX "time_plain"; + CREATE INDEX "time_trunc" ON PUBLIC.hyper_1 (date_trunc('minute', time)); + ANALYZE hyper_1; + :PREFIX SELECT date_trunc('minute', time) t, avg(series_0), min(series_1), avg(series_2) FROM hyper_1 GROUP BY t ORDER BY t DESC limit 2; + QUERY PLAN +--------------------------------------------------------------------------------------------- + Limit + -> GroupAggregate + Group Key: (date_trunc('minute'::text, _hyper_1_1_chunk."time")) + -> Result + -> Index Scan Backward using _hyper_1_1_chunk_time_trunc on _hyper_1_1_chunk +(5 rows) + + --test that works with both indexes + CREATE INDEX "time_plain" ON PUBLIC.hyper_1 (time DESC, series_0); + ANALYZE hyper_1; + :PREFIX SELECT date_trunc('minute', time) t, avg(series_0), min(series_1), avg(series_2) FROM hyper_1 GROUP BY t ORDER BY t DESC limit 2; + QUERY PLAN +--------------------------------------------------------------------------------------------- + Limit + -> GroupAggregate + Group Key: (date_trunc('minute'::text, _hyper_1_1_chunk."time")) + -> Result + -> Index Scan Backward using _hyper_1_1_chunk_time_trunc on _hyper_1_1_chunk +(5 rows) + + :PREFIX SELECT time_bucket('1 minute', time) t, avg(series_0), min(series_1), trunc(avg(series_2)::numeric, 5) + FROM hyper_1 GROUP BY t ORDER BY t DESC limit 2; + QUERY PLAN +------------------------------------------------------------------------------------ + Limit + -> GroupAggregate + Group Key: (time_bucket('@ 1 min'::interval, _hyper_1_1_chunk."time")) + -> Result + -> Index Scan using _hyper_1_1_chunk_time_plain on _hyper_1_1_chunk +(5 rows) + + :PREFIX SELECT time_bucket('1 minute', time, INTERVAL '30 seconds') t, avg(series_0), min(series_1), trunc(avg(series_2)::numeric,5) + FROM hyper_1 GROUP BY t ORDER BY t DESC limit 2; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Limit + -> GroupAggregate + Group Key: (time_bucket('@ 1 min'::interval, _hyper_1_1_chunk."time", '@ 30 secs'::interval)) + -> Result + -> Index Scan using _hyper_1_1_chunk_time_plain on _hyper_1_1_chunk +(5 rows) + + :PREFIX SELECT time_bucket('1 minute', time - INTERVAL '30 seconds') t, avg(series_0), min(series_1), trunc(avg(series_2)::numeric,5) + FROM hyper_1 GROUP BY t ORDER BY t DESC limit 2; + QUERY PLAN +---------------------------------------------------------------------------------------------------------- + Limit + -> GroupAggregate + Group Key: (time_bucket('@ 1 min'::interval, (_hyper_1_1_chunk."time" - '@ 30 secs'::interval))) + -> Result + -> Index Scan using _hyper_1_1_chunk_time_plain on _hyper_1_1_chunk +(5 rows) + + :PREFIX SELECT time_bucket('1 minute', time - INTERVAL '30 seconds') + INTERVAL '30 seconds' t, avg(series_0), min(series_1), trunc(avg(series_2)::numeric,5) + FROM hyper_1 GROUP BY t ORDER BY t DESC limit 2; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------ + Limit + -> GroupAggregate + Group Key: ((time_bucket('@ 1 min'::interval, (_hyper_1_1_chunk."time" - '@ 30 secs'::interval)) + '@ 30 secs'::interval)) + -> Result + -> Index Scan using _hyper_1_1_chunk_time_plain on _hyper_1_1_chunk +(5 rows) + + :PREFIX SELECT time_bucket('1 minute', time) t, avg(series_0), min(series_1), avg(series_2) + FROM hyper_1_tz GROUP BY t ORDER BY t DESC limit 2; + QUERY PLAN +--------------------------------------------------------------------------------------- + Limit + -> GroupAggregate + Group Key: (time_bucket('@ 1 min'::interval, _hyper_2_2_chunk."time")) + -> Result + -> Index Scan using _hyper_2_2_chunk_time_plain_tz on _hyper_2_2_chunk +(5 rows) + + :PREFIX SELECT time_bucket('1 minute', time::timestamp) t, avg(series_0), min(series_1), avg(series_2) + FROM hyper_1_tz GROUP BY t ORDER BY t DESC limit 2; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------- + Limit + -> GroupAggregate + Group Key: (time_bucket('@ 1 min'::interval, (_hyper_2_2_chunk."time")::timestamp without time zone)) + -> Result + -> Index Scan using _hyper_2_2_chunk_time_plain_tz on _hyper_2_2_chunk +(5 rows) + + :PREFIX SELECT time_bucket(10, time) t, avg(series_0), min(series_1), avg(series_2) + FROM hyper_1_int GROUP BY t ORDER BY t DESC limit 2; + QUERY PLAN +---------------------------------------------------------------------------------------------- + Limit + -> GroupAggregate + Group Key: (time_bucket(10, hyper_1_int."time")) + -> Result + -> Custom Scan (ChunkAppend) on hyper_1_int + Order: time_bucket(10, hyper_1_int."time") DESC + -> Index Scan using _hyper_3_5_chunk_time_plain_int on _hyper_3_5_chunk + -> Index Scan using _hyper_3_4_chunk_time_plain_int on _hyper_3_4_chunk + -> Index Scan using _hyper_3_3_chunk_time_plain_int on _hyper_3_3_chunk +(9 rows) + + :PREFIX SELECT time_bucket(10, time, 2) t, avg(series_0), min(series_1), avg(series_2) + FROM hyper_1_int GROUP BY t ORDER BY t DESC limit 2; + QUERY PLAN +---------------------------------------------------------------------------------------------- + Limit + -> GroupAggregate + Group Key: (time_bucket(10, hyper_1_int."time", 2)) + -> Result + -> Custom Scan (ChunkAppend) on hyper_1_int + Order: time_bucket(10, hyper_1_int."time", 2) DESC + -> Index Scan using _hyper_3_5_chunk_time_plain_int on _hyper_3_5_chunk + -> Index Scan using _hyper_3_4_chunk_time_plain_int on _hyper_3_4_chunk + -> Index Scan using _hyper_3_3_chunk_time_plain_int on _hyper_3_3_chunk +(9 rows) + +ROLLBACK; +-- sort order optimization should not be applied to non-hypertables +:PREFIX +SELECT date_trunc('minute', time) t, avg(series_0), min(series_1), avg(series_2) +FROM plain_table +WHERE time < to_timestamp(900) +GROUP BY t +ORDER BY t DESC +LIMIT 2; + QUERY PLAN +----------------------------------------------------------------------------------------------------- + Limit + -> Sort + Sort Key: (date_trunc('minute'::text, "time")) DESC + -> HashAggregate + Group Key: date_trunc('minute'::text, "time") + -> Index Scan using time_plain_plain_table on plain_table + Index Cond: ("time" < 'Wed Dec 31 16:15:00 1969 PST'::timestamp with time zone) +(7 rows) + +--generate the results into two different files +\set ECHO errors +--- Unoptimized result ++++ Optimized result +@@ -1,6 +1,6 @@ + timescaledb.enable_optimizations + ---------------------------------- +- off ++ on + (1 row) + + time | series_0 | series_1 | series_2 + ?column? +---------- + Done +(1 row) + diff --git a/test/expected/rowsecurity-16.out b/test/expected/rowsecurity-16.out new file mode 100644 index 00000000000..e2ce06ccb2a --- /dev/null +++ b/test/expected/rowsecurity-16.out @@ -0,0 +1,5037 @@ +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +-- +-- Test of Row-level security feature +-- +-- Clean up in case a prior regression run failed +\c :TEST_DBNAME :ROLE_SUPERUSER +\set ON_ERROR_STOP 0 +\set VERBOSITY default +SET timescaledb.enable_constraint_exclusion TO off; +-- Suppress NOTICE messages when users/groups don't exist +SET client_min_messages TO 'warning'; +DROP USER IF EXISTS regress_rls_alice; +DROP USER IF EXISTS regress_rls_bob; +DROP USER IF EXISTS regress_rls_carol; +DROP USER IF EXISTS regress_rls_dave; +DROP USER IF EXISTS regress_rls_exempt_user; +DROP ROLE IF EXISTS regress_rls_group1; +DROP ROLE IF EXISTS regress_rls_group2; +DROP SCHEMA IF EXISTS regress_rls_schema CASCADE; +RESET client_min_messages; +-- initial setup +CREATE USER regress_rls_alice NOLOGIN; +CREATE USER regress_rls_bob NOLOGIN; +CREATE USER regress_rls_carol NOLOGIN; +CREATE USER regress_rls_dave NOLOGIN; +CREATE USER regress_rls_exempt_user BYPASSRLS NOLOGIN; +CREATE ROLE regress_rls_group1 NOLOGIN; +CREATE ROLE regress_rls_group2 NOLOGIN; +GRANT regress_rls_group1 TO regress_rls_bob; +GRANT regress_rls_group2 TO regress_rls_carol; +CREATE SCHEMA regress_rls_schema; +GRANT ALL ON SCHEMA regress_rls_schema to public; +SET search_path = regress_rls_schema; +-- setup of malicious function +CREATE OR REPLACE FUNCTION f_leak(text) RETURNS bool + COST 0.0000001 LANGUAGE plpgsql + AS 'BEGIN RAISE NOTICE ''f_leak => %'', $1; RETURN true; END'; +GRANT EXECUTE ON FUNCTION f_leak(text) TO public; +-- BASIC Row-Level Security Scenario +SET SESSION AUTHORIZATION regress_rls_alice; +CREATE TABLE uaccount ( + pguser name primary key, + seclv int +); +GRANT SELECT ON uaccount TO public; +INSERT INTO uaccount VALUES + ('regress_rls_alice', 99), + ('regress_rls_bob', 1), + ('regress_rls_carol', 2), + ('regress_rls_dave', 3); +CREATE TABLE category ( + cid int primary key, + cname text +); +GRANT ALL ON category TO public; +INSERT INTO category VALUES + (11, 'novel'), + (22, 'science fiction'), + (33, 'technology'), + (44, 'manga'); +CREATE TABLE document ( + did int primary key, + cid int references category(cid), + dlevel int not null, + dauthor name, + dtitle text +); +GRANT ALL ON document TO public; +SELECT public.create_hypertable('document', 'did', chunk_time_interval=>2); + create_hypertable +----------------------------------- + (1,regress_rls_schema,document,t) +(1 row) + +INSERT INTO document VALUES + ( 1, 11, 1, 'regress_rls_bob', 'my first novel'), + ( 2, 11, 2, 'regress_rls_bob', 'my second novel'), + ( 3, 22, 2, 'regress_rls_bob', 'my science fiction'), + ( 4, 44, 1, 'regress_rls_bob', 'my first manga'), + ( 5, 44, 2, 'regress_rls_bob', 'my second manga'), + ( 6, 22, 1, 'regress_rls_carol', 'great science fiction'), + ( 7, 33, 2, 'regress_rls_carol', 'great technology book'), + ( 8, 44, 1, 'regress_rls_carol', 'great manga'), + ( 9, 22, 1, 'regress_rls_dave', 'awesome science fiction'), + (10, 33, 2, 'regress_rls_dave', 'awesome technology book'); +ALTER TABLE document ENABLE ROW LEVEL SECURITY; +-- user's security level must be higher than or equal to document's +CREATE POLICY p1 ON document AS PERMISSIVE + USING (dlevel <= (SELECT seclv FROM uaccount WHERE pguser = current_user)); +-- try to create a policy of bogus type +CREATE POLICY p1 ON document AS UGLY + USING (dlevel <= (SELECT seclv FROM uaccount WHERE pguser = current_user)); +ERROR: unrecognized row security option "ugly" +LINE 1: CREATE POLICY p1 ON document AS UGLY + ^ +HINT: Only PERMISSIVE or RESTRICTIVE policies are supported currently. +-- but Dave isn't allowed to anything at cid 50 or above +-- this is to make sure that we sort the policies by name first +-- when applying WITH CHECK, a later INSERT by Dave should fail due +-- to p1r first +CREATE POLICY p2r ON document AS RESTRICTIVE TO regress_rls_dave + USING (cid <> 44 AND cid < 50); +-- and Dave isn't allowed to see manga documents +CREATE POLICY p1r ON document AS RESTRICTIVE TO regress_rls_dave + USING (cid <> 44); +\dp + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------------------+----------+-------+---------------------------------------------+-------------------+-------------------------------------------- + regress_rls_schema | category | table | regress_rls_alice=arwdDxt/regress_rls_alice+| | + | | | =arwdDxt/regress_rls_alice | | + regress_rls_schema | document | table | regress_rls_alice=arwdDxt/regress_rls_alice+| | p1: + + | | | =arwdDxt/regress_rls_alice | | (u): (dlevel <= ( SELECT uaccount.seclv + + | | | | | FROM uaccount + + | | | | | WHERE (uaccount.pguser = CURRENT_USER)))+ + | | | | | p2r (RESTRICTIVE): + + | | | | | (u): ((cid <> 44) AND (cid < 50)) + + | | | | | to: regress_rls_dave + + | | | | | p1r (RESTRICTIVE): + + | | | | | (u): (cid <> 44) + + | | | | | to: regress_rls_dave + regress_rls_schema | uaccount | table | regress_rls_alice=arwdDxt/regress_rls_alice+| | + | | | =r/regress_rls_alice | | +(3 rows) + +\d document + Table "regress_rls_schema.document" + Column | Type | Collation | Nullable | Default +---------+---------+-----------+----------+--------- + did | integer | | not null | + cid | integer | | | + dlevel | integer | | not null | + dauthor | name | | | + dtitle | text | | | +Indexes: + "document_pkey" PRIMARY KEY, btree (did) +Foreign-key constraints: + "document_cid_fkey" FOREIGN KEY (cid) REFERENCES category(cid) +Policies: + POLICY "p1" + USING ((dlevel <= ( SELECT uaccount.seclv + FROM uaccount + WHERE (uaccount.pguser = CURRENT_USER)))) + POLICY "p1r" AS RESTRICTIVE + TO regress_rls_dave + USING ((cid <> 44)) + POLICY "p2r" AS RESTRICTIVE + TO regress_rls_dave + USING (((cid <> 44) AND (cid < 50))) +Triggers: + ts_insert_blocker BEFORE INSERT ON document FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker() +Number of child tables: 6 (Use \d+ to list them.) + +SELECT * FROM pg_policies WHERE schemaname = 'regress_rls_schema' AND tablename = 'document' ORDER BY policyname; + schemaname | tablename | policyname | permissive | roles | cmd | qual | with_check +--------------------+-----------+------------+-------------+--------------------+-----+--------------------------------------------+------------ + regress_rls_schema | document | p1 | PERMISSIVE | {public} | ALL | (dlevel <= ( SELECT uaccount.seclv +| + | | | | | | FROM uaccount +| + | | | | | | WHERE (uaccount.pguser = CURRENT_USER))) | + regress_rls_schema | document | p1r | RESTRICTIVE | {regress_rls_dave} | ALL | (cid <> 44) | + regress_rls_schema | document | p2r | RESTRICTIVE | {regress_rls_dave} | ALL | ((cid <> 44) AND (cid < 50)) | +(3 rows) + +-- viewpoint from regress_rls_bob +SET SESSION AUTHORIZATION regress_rls_bob; +SET row_security TO ON; +SELECT * FROM document WHERE f_leak(dtitle) ORDER BY did; +NOTICE: f_leak => my first novel +NOTICE: f_leak => my first manga +NOTICE: f_leak => great science fiction +NOTICE: f_leak => great manga +NOTICE: f_leak => awesome science fiction + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-------------------+------------------------- + 1 | 11 | 1 | regress_rls_bob | my first novel + 4 | 44 | 1 | regress_rls_bob | my first manga + 6 | 22 | 1 | regress_rls_carol | great science fiction + 8 | 44 | 1 | regress_rls_carol | great manga + 9 | 22 | 1 | regress_rls_dave | awesome science fiction +(5 rows) + +SELECT * FROM document NATURAL JOIN category WHERE f_leak(dtitle) ORDER BY did; +NOTICE: f_leak => my first novel +NOTICE: f_leak => my first manga +NOTICE: f_leak => great science fiction +NOTICE: f_leak => great manga +NOTICE: f_leak => awesome science fiction + cid | did | dlevel | dauthor | dtitle | cname +-----+-----+--------+-------------------+-------------------------+----------------- + 11 | 1 | 1 | regress_rls_bob | my first novel | novel + 44 | 4 | 1 | regress_rls_bob | my first manga | manga + 22 | 6 | 1 | regress_rls_carol | great science fiction | science fiction + 44 | 8 | 1 | regress_rls_carol | great manga | manga + 22 | 9 | 1 | regress_rls_dave | awesome science fiction | science fiction +(5 rows) + +-- try a sampled version +SELECT * FROM document TABLESAMPLE BERNOULLI(50) REPEATABLE(0) + WHERE f_leak(dtitle) ORDER BY did; + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+---------+-------- +(0 rows) + +-- viewpoint from regress_rls_carol +SET SESSION AUTHORIZATION regress_rls_carol; +SELECT * FROM document WHERE f_leak(dtitle) ORDER BY did; +NOTICE: f_leak => my first novel +NOTICE: f_leak => my second novel +NOTICE: f_leak => my science fiction +NOTICE: f_leak => my first manga +NOTICE: f_leak => my second manga +NOTICE: f_leak => great science fiction +NOTICE: f_leak => great technology book +NOTICE: f_leak => great manga +NOTICE: f_leak => awesome science fiction +NOTICE: f_leak => awesome technology book + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-------------------+------------------------- + 1 | 11 | 1 | regress_rls_bob | my first novel + 2 | 11 | 2 | regress_rls_bob | my second novel + 3 | 22 | 2 | regress_rls_bob | my science fiction + 4 | 44 | 1 | regress_rls_bob | my first manga + 5 | 44 | 2 | regress_rls_bob | my second manga + 6 | 22 | 1 | regress_rls_carol | great science fiction + 7 | 33 | 2 | regress_rls_carol | great technology book + 8 | 44 | 1 | regress_rls_carol | great manga + 9 | 22 | 1 | regress_rls_dave | awesome science fiction + 10 | 33 | 2 | regress_rls_dave | awesome technology book +(10 rows) + +SELECT * FROM document NATURAL JOIN category WHERE f_leak(dtitle) ORDER BY did; +NOTICE: f_leak => my first novel +NOTICE: f_leak => my second novel +NOTICE: f_leak => my science fiction +NOTICE: f_leak => my first manga +NOTICE: f_leak => my second manga +NOTICE: f_leak => great science fiction +NOTICE: f_leak => great technology book +NOTICE: f_leak => great manga +NOTICE: f_leak => awesome science fiction +NOTICE: f_leak => awesome technology book + cid | did | dlevel | dauthor | dtitle | cname +-----+-----+--------+-------------------+-------------------------+----------------- + 11 | 1 | 1 | regress_rls_bob | my first novel | novel + 11 | 2 | 2 | regress_rls_bob | my second novel | novel + 22 | 3 | 2 | regress_rls_bob | my science fiction | science fiction + 44 | 4 | 1 | regress_rls_bob | my first manga | manga + 44 | 5 | 2 | regress_rls_bob | my second manga | manga + 22 | 6 | 1 | regress_rls_carol | great science fiction | science fiction + 33 | 7 | 2 | regress_rls_carol | great technology book | technology + 44 | 8 | 1 | regress_rls_carol | great manga | manga + 22 | 9 | 1 | regress_rls_dave | awesome science fiction | science fiction + 33 | 10 | 2 | regress_rls_dave | awesome technology book | technology +(10 rows) + +-- try a sampled version +SELECT * FROM document TABLESAMPLE BERNOULLI(50) REPEATABLE(0) + WHERE f_leak(dtitle) ORDER BY did; + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+---------+-------- +(0 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM document WHERE f_leak(dtitle); + QUERY PLAN +----------------------------------------------------- + Custom Scan (ChunkAppend) on document + Chunks excluded during startup: 0 + InitPlan 1 (returns $0) + -> Index Scan using uaccount_pkey on uaccount + Index Cond: (pguser = CURRENT_USER) + -> Seq Scan on document document_1 + Filter: ((dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_1_1_chunk document_2 + Filter: ((dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_1_2_chunk document_3 + Filter: ((dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_1_3_chunk document_4 + Filter: ((dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_1_4_chunk document_5 + Filter: ((dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_1_5_chunk document_6 + Filter: ((dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_1_6_chunk document_7 + Filter: ((dlevel <= $0) AND f_leak(dtitle)) +(19 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM document NATURAL JOIN category WHERE f_leak(dtitle); + QUERY PLAN +----------------------------------------------------------- + Hash Join + Hash Cond: (document.cid = category.cid) + InitPlan 1 (returns $0) + -> Index Scan using uaccount_pkey on uaccount + Index Cond: (pguser = CURRENT_USER) + -> Custom Scan (ChunkAppend) on document + Chunks excluded during startup: 0 + -> Seq Scan on document document_1 + Filter: ((dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_1_1_chunk document_2 + Filter: ((dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_1_2_chunk document_3 + Filter: ((dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_1_3_chunk document_4 + Filter: ((dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_1_4_chunk document_5 + Filter: ((dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_1_5_chunk document_6 + Filter: ((dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_1_6_chunk document_7 + Filter: ((dlevel <= $0) AND f_leak(dtitle)) + -> Hash + -> Seq Scan on category +(23 rows) + +-- viewpoint from regress_rls_dave +SET SESSION AUTHORIZATION regress_rls_dave; +SELECT * FROM document WHERE f_leak(dtitle) ORDER BY did; +NOTICE: f_leak => my first novel +NOTICE: f_leak => my second novel +NOTICE: f_leak => my science fiction +NOTICE: f_leak => great science fiction +NOTICE: f_leak => great technology book +NOTICE: f_leak => awesome science fiction +NOTICE: f_leak => awesome technology book + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-------------------+------------------------- + 1 | 11 | 1 | regress_rls_bob | my first novel + 2 | 11 | 2 | regress_rls_bob | my second novel + 3 | 22 | 2 | regress_rls_bob | my science fiction + 6 | 22 | 1 | regress_rls_carol | great science fiction + 7 | 33 | 2 | regress_rls_carol | great technology book + 9 | 22 | 1 | regress_rls_dave | awesome science fiction + 10 | 33 | 2 | regress_rls_dave | awesome technology book +(7 rows) + +SELECT * FROM document NATURAL JOIN category WHERE f_leak(dtitle) ORDER BY did; +NOTICE: f_leak => my first novel +NOTICE: f_leak => my second novel +NOTICE: f_leak => my science fiction +NOTICE: f_leak => great science fiction +NOTICE: f_leak => great technology book +NOTICE: f_leak => awesome science fiction +NOTICE: f_leak => awesome technology book + cid | did | dlevel | dauthor | dtitle | cname +-----+-----+--------+-------------------+-------------------------+----------------- + 11 | 1 | 1 | regress_rls_bob | my first novel | novel + 11 | 2 | 2 | regress_rls_bob | my second novel | novel + 22 | 3 | 2 | regress_rls_bob | my science fiction | science fiction + 22 | 6 | 1 | regress_rls_carol | great science fiction | science fiction + 33 | 7 | 2 | regress_rls_carol | great technology book | technology + 22 | 9 | 1 | regress_rls_dave | awesome science fiction | science fiction + 33 | 10 | 2 | regress_rls_dave | awesome technology book | technology +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM document WHERE f_leak(dtitle); + QUERY PLAN +---------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on document + Chunks excluded during startup: 0 + InitPlan 1 (returns $0) + -> Index Scan using uaccount_pkey on uaccount + Index Cond: (pguser = CURRENT_USER) + -> Seq Scan on document document_1 + Filter: ((cid <> 44) AND (cid <> 44) AND (cid < 50) AND (dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_1_1_chunk document_2 + Filter: ((cid <> 44) AND (cid <> 44) AND (cid < 50) AND (dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_1_2_chunk document_3 + Filter: ((cid <> 44) AND (cid <> 44) AND (cid < 50) AND (dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_1_3_chunk document_4 + Filter: ((cid <> 44) AND (cid <> 44) AND (cid < 50) AND (dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_1_4_chunk document_5 + Filter: ((cid <> 44) AND (cid <> 44) AND (cid < 50) AND (dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_1_5_chunk document_6 + Filter: ((cid <> 44) AND (cid <> 44) AND (cid < 50) AND (dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_1_6_chunk document_7 + Filter: ((cid <> 44) AND (cid <> 44) AND (cid < 50) AND (dlevel <= $0) AND f_leak(dtitle)) +(19 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM document NATURAL JOIN category WHERE f_leak(dtitle); + QUERY PLAN +---------------------------------------------------------------------------------------------------------------- + Hash Join + Hash Cond: (category.cid = document.cid) + InitPlan 1 (returns $0) + -> Index Scan using uaccount_pkey on uaccount + Index Cond: (pguser = CURRENT_USER) + -> Seq Scan on category + -> Hash + -> Custom Scan (ChunkAppend) on document + Chunks excluded during startup: 0 + -> Seq Scan on document document_1 + Filter: ((cid <> 44) AND (cid <> 44) AND (cid < 50) AND (dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_1_1_chunk document_2 + Filter: ((cid <> 44) AND (cid <> 44) AND (cid < 50) AND (dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_1_2_chunk document_3 + Filter: ((cid <> 44) AND (cid <> 44) AND (cid < 50) AND (dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_1_3_chunk document_4 + Filter: ((cid <> 44) AND (cid <> 44) AND (cid < 50) AND (dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_1_4_chunk document_5 + Filter: ((cid <> 44) AND (cid <> 44) AND (cid < 50) AND (dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_1_5_chunk document_6 + Filter: ((cid <> 44) AND (cid <> 44) AND (cid < 50) AND (dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_1_6_chunk document_7 + Filter: ((cid <> 44) AND (cid <> 44) AND (cid < 50) AND (dlevel <= $0) AND f_leak(dtitle)) +(23 rows) + +-- 44 would technically fail for both p2r and p1r, but we should get an error +-- back from p1r for this because it sorts first +INSERT INTO document VALUES (100, 44, 1, 'regress_rls_dave', 'testing sorting of policies'); -- fail +ERROR: new row violates row-level security policy "p1r" for table "document" +-- Just to see a p2r error +INSERT INTO document VALUES (100, 55, 1, 'regress_rls_dave', 'testing sorting of policies'); -- fail +ERROR: new row violates row-level security policy "p2r" for table "document" +-- only owner can change policies +ALTER POLICY p1 ON document USING (true); --fail +ERROR: must be owner of table document +DROP POLICY p1 ON document; --fail +ERROR: must be owner of relation document +SET SESSION AUTHORIZATION regress_rls_alice; +ALTER POLICY p1 ON document USING (dauthor = current_user); +-- viewpoint from regress_rls_bob again +SET SESSION AUTHORIZATION regress_rls_bob; +SELECT * FROM document WHERE f_leak(dtitle) ORDER BY did; +NOTICE: f_leak => my first novel +NOTICE: f_leak => my second novel +NOTICE: f_leak => my science fiction +NOTICE: f_leak => my first manga +NOTICE: f_leak => my second manga + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-----------------+-------------------- + 1 | 11 | 1 | regress_rls_bob | my first novel + 2 | 11 | 2 | regress_rls_bob | my second novel + 3 | 22 | 2 | regress_rls_bob | my science fiction + 4 | 44 | 1 | regress_rls_bob | my first manga + 5 | 44 | 2 | regress_rls_bob | my second manga +(5 rows) + +SELECT * FROM document NATURAL JOIN category WHERE f_leak(dtitle) ORDER by did; +NOTICE: f_leak => my first novel +NOTICE: f_leak => my second novel +NOTICE: f_leak => my science fiction +NOTICE: f_leak => my first manga +NOTICE: f_leak => my second manga + cid | did | dlevel | dauthor | dtitle | cname +-----+-----+--------+-----------------+--------------------+----------------- + 11 | 1 | 1 | regress_rls_bob | my first novel | novel + 11 | 2 | 2 | regress_rls_bob | my second novel | novel + 22 | 3 | 2 | regress_rls_bob | my science fiction | science fiction + 44 | 4 | 1 | regress_rls_bob | my first manga | manga + 44 | 5 | 2 | regress_rls_bob | my second manga | manga +(5 rows) + +-- viewpoint from rls_regres_carol again +SET SESSION AUTHORIZATION regress_rls_carol; +SELECT * FROM document WHERE f_leak(dtitle) ORDER BY did; +NOTICE: f_leak => great science fiction +NOTICE: f_leak => great technology book +NOTICE: f_leak => great manga + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-------------------+----------------------- + 6 | 22 | 1 | regress_rls_carol | great science fiction + 7 | 33 | 2 | regress_rls_carol | great technology book + 8 | 44 | 1 | regress_rls_carol | great manga +(3 rows) + +SELECT * FROM document NATURAL JOIN category WHERE f_leak(dtitle) ORDER by did; +NOTICE: f_leak => great science fiction +NOTICE: f_leak => great technology book +NOTICE: f_leak => great manga + cid | did | dlevel | dauthor | dtitle | cname +-----+-----+--------+-------------------+-----------------------+----------------- + 22 | 6 | 1 | regress_rls_carol | great science fiction | science fiction + 33 | 7 | 2 | regress_rls_carol | great technology book | technology + 44 | 8 | 1 | regress_rls_carol | great manga | manga +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM document WHERE f_leak(dtitle); + QUERY PLAN +--------------------------------------------------------------- + Custom Scan (ChunkAppend) on document + Chunks excluded during startup: 0 + -> Seq Scan on document document_1 + Filter: ((dauthor = CURRENT_USER) AND f_leak(dtitle)) + -> Seq Scan on _hyper_1_1_chunk document_2 + Filter: ((dauthor = CURRENT_USER) AND f_leak(dtitle)) + -> Seq Scan on _hyper_1_2_chunk document_3 + Filter: ((dauthor = CURRENT_USER) AND f_leak(dtitle)) + -> Seq Scan on _hyper_1_3_chunk document_4 + Filter: ((dauthor = CURRENT_USER) AND f_leak(dtitle)) + -> Seq Scan on _hyper_1_4_chunk document_5 + Filter: ((dauthor = CURRENT_USER) AND f_leak(dtitle)) + -> Seq Scan on _hyper_1_5_chunk document_6 + Filter: ((dauthor = CURRENT_USER) AND f_leak(dtitle)) + -> Seq Scan on _hyper_1_6_chunk document_7 + Filter: ((dauthor = CURRENT_USER) AND f_leak(dtitle)) +(16 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM document NATURAL JOIN category WHERE f_leak(dtitle); + QUERY PLAN +--------------------------------------------------------------------- + Nested Loop + -> Custom Scan (ChunkAppend) on document + Chunks excluded during startup: 0 + -> Seq Scan on document document_1 + Filter: ((dauthor = CURRENT_USER) AND f_leak(dtitle)) + -> Seq Scan on _hyper_1_1_chunk document_2 + Filter: ((dauthor = CURRENT_USER) AND f_leak(dtitle)) + -> Seq Scan on _hyper_1_2_chunk document_3 + Filter: ((dauthor = CURRENT_USER) AND f_leak(dtitle)) + -> Seq Scan on _hyper_1_3_chunk document_4 + Filter: ((dauthor = CURRENT_USER) AND f_leak(dtitle)) + -> Seq Scan on _hyper_1_4_chunk document_5 + Filter: ((dauthor = CURRENT_USER) AND f_leak(dtitle)) + -> Seq Scan on _hyper_1_5_chunk document_6 + Filter: ((dauthor = CURRENT_USER) AND f_leak(dtitle)) + -> Seq Scan on _hyper_1_6_chunk document_7 + Filter: ((dauthor = CURRENT_USER) AND f_leak(dtitle)) + -> Index Scan using category_pkey on category + Index Cond: (cid = document.cid) +(19 rows) + +-- interaction of FK/PK constraints +SET SESSION AUTHORIZATION regress_rls_alice; +CREATE POLICY p2 ON category + USING (CASE WHEN current_user = 'regress_rls_bob' THEN cid IN (11, 33) + WHEN current_user = 'regress_rls_carol' THEN cid IN (22, 44) + ELSE false END); +ALTER TABLE category ENABLE ROW LEVEL SECURITY; +-- cannot delete PK referenced by invisible FK +SET SESSION AUTHORIZATION regress_rls_bob; +SELECT * FROM document d FULL OUTER JOIN category c on d.cid = c.cid ORDER BY d.did, c.cid; + did | cid | dlevel | dauthor | dtitle | cid | cname +-----+-----+--------+-----------------+--------------------+-----+------------ + 1 | 11 | 1 | regress_rls_bob | my first novel | 11 | novel + 2 | 11 | 2 | regress_rls_bob | my second novel | 11 | novel + 3 | 22 | 2 | regress_rls_bob | my science fiction | | + 4 | 44 | 1 | regress_rls_bob | my first manga | | + 5 | 44 | 2 | regress_rls_bob | my second manga | | + | | | | | 33 | technology +(6 rows) + +DELETE FROM category WHERE cid = 33; -- fails with FK violation +ERROR: update or delete on table "category" violates foreign key constraint "4_7_document_cid_fkey" on table "_hyper_1_4_chunk" +DETAIL: Key is still referenced from table "_hyper_1_4_chunk". +-- can insert FK referencing invisible PK +SET SESSION AUTHORIZATION regress_rls_carol; +SELECT * FROM document d FULL OUTER JOIN category c on d.cid = c.cid ORDER BY d.did, c.cid; + did | cid | dlevel | dauthor | dtitle | cid | cname +-----+-----+--------+-------------------+-----------------------+-----+----------------- + 6 | 22 | 1 | regress_rls_carol | great science fiction | 22 | science fiction + 7 | 33 | 2 | regress_rls_carol | great technology book | | + 8 | 44 | 1 | regress_rls_carol | great manga | 44 | manga +(3 rows) + +INSERT INTO document VALUES (11, 33, 1, current_user, 'hoge'); +-- UNIQUE or PRIMARY KEY constraint violation DOES reveal presence of row +SET SESSION AUTHORIZATION regress_rls_bob; +INSERT INTO document VALUES (8, 44, 1, 'regress_rls_bob', 'my third manga'); -- Must fail with unique violation, revealing presence of did we can't see +ERROR: duplicate key value violates unique constraint "5_10_document_pkey" +DETAIL: Key (did)=(8) already exists. +SELECT * FROM document WHERE did = 8; -- and confirm we can't see it + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+---------+-------- +(0 rows) + +-- RLS policies are checked before constraints +INSERT INTO document VALUES (8, 44, 1, 'regress_rls_carol', 'my third manga'); -- Should fail with RLS check violation, not duplicate key violation +ERROR: new row violates row-level security policy for table "document" +UPDATE document SET did = 8, dauthor = 'regress_rls_carol' WHERE did = 5; -- Should fail with RLS check violation, not duplicate key violation +ERROR: new row violates row-level security policy for table "document" +-- database superuser does bypass RLS policy when enabled +RESET SESSION AUTHORIZATION; +SET row_security TO ON; +SELECT * FROM document; + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-------------------+------------------------- + 1 | 11 | 1 | regress_rls_bob | my first novel + 2 | 11 | 2 | regress_rls_bob | my second novel + 3 | 22 | 2 | regress_rls_bob | my science fiction + 4 | 44 | 1 | regress_rls_bob | my first manga + 5 | 44 | 2 | regress_rls_bob | my second manga + 6 | 22 | 1 | regress_rls_carol | great science fiction + 7 | 33 | 2 | regress_rls_carol | great technology book + 8 | 44 | 1 | regress_rls_carol | great manga + 9 | 22 | 1 | regress_rls_dave | awesome science fiction + 10 | 33 | 2 | regress_rls_dave | awesome technology book + 11 | 33 | 1 | regress_rls_carol | hoge +(11 rows) + +SELECT * FROM category; + cid | cname +-----+----------------- + 11 | novel + 22 | science fiction + 33 | technology + 44 | manga +(4 rows) + +-- database superuser does bypass RLS policy when disabled +RESET SESSION AUTHORIZATION; +SET row_security TO OFF; +SELECT * FROM document; + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-------------------+------------------------- + 1 | 11 | 1 | regress_rls_bob | my first novel + 2 | 11 | 2 | regress_rls_bob | my second novel + 3 | 22 | 2 | regress_rls_bob | my science fiction + 4 | 44 | 1 | regress_rls_bob | my first manga + 5 | 44 | 2 | regress_rls_bob | my second manga + 6 | 22 | 1 | regress_rls_carol | great science fiction + 7 | 33 | 2 | regress_rls_carol | great technology book + 8 | 44 | 1 | regress_rls_carol | great manga + 9 | 22 | 1 | regress_rls_dave | awesome science fiction + 10 | 33 | 2 | regress_rls_dave | awesome technology book + 11 | 33 | 1 | regress_rls_carol | hoge +(11 rows) + +SELECT * FROM category; + cid | cname +-----+----------------- + 11 | novel + 22 | science fiction + 33 | technology + 44 | manga +(4 rows) + +-- database non-superuser with bypass privilege can bypass RLS policy when disabled +SET SESSION AUTHORIZATION regress_rls_exempt_user; +SET row_security TO OFF; +SELECT * FROM document; + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-------------------+------------------------- + 1 | 11 | 1 | regress_rls_bob | my first novel + 2 | 11 | 2 | regress_rls_bob | my second novel + 3 | 22 | 2 | regress_rls_bob | my science fiction + 4 | 44 | 1 | regress_rls_bob | my first manga + 5 | 44 | 2 | regress_rls_bob | my second manga + 6 | 22 | 1 | regress_rls_carol | great science fiction + 7 | 33 | 2 | regress_rls_carol | great technology book + 8 | 44 | 1 | regress_rls_carol | great manga + 9 | 22 | 1 | regress_rls_dave | awesome science fiction + 10 | 33 | 2 | regress_rls_dave | awesome technology book + 11 | 33 | 1 | regress_rls_carol | hoge +(11 rows) + +SELECT * FROM category; + cid | cname +-----+----------------- + 11 | novel + 22 | science fiction + 33 | technology + 44 | manga +(4 rows) + +-- RLS policy does not apply to table owner when RLS enabled. +SET SESSION AUTHORIZATION regress_rls_alice; +SET row_security TO ON; +SELECT * FROM document; + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-------------------+------------------------- + 1 | 11 | 1 | regress_rls_bob | my first novel + 2 | 11 | 2 | regress_rls_bob | my second novel + 3 | 22 | 2 | regress_rls_bob | my science fiction + 4 | 44 | 1 | regress_rls_bob | my first manga + 5 | 44 | 2 | regress_rls_bob | my second manga + 6 | 22 | 1 | regress_rls_carol | great science fiction + 7 | 33 | 2 | regress_rls_carol | great technology book + 8 | 44 | 1 | regress_rls_carol | great manga + 9 | 22 | 1 | regress_rls_dave | awesome science fiction + 10 | 33 | 2 | regress_rls_dave | awesome technology book + 11 | 33 | 1 | regress_rls_carol | hoge +(11 rows) + +SELECT * FROM category; + cid | cname +-----+----------------- + 11 | novel + 22 | science fiction + 33 | technology + 44 | manga +(4 rows) + +-- RLS policy does not apply to table owner when RLS disabled. +SET SESSION AUTHORIZATION regress_rls_alice; +SET row_security TO OFF; +SELECT * FROM document; + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-------------------+------------------------- + 1 | 11 | 1 | regress_rls_bob | my first novel + 2 | 11 | 2 | regress_rls_bob | my second novel + 3 | 22 | 2 | regress_rls_bob | my science fiction + 4 | 44 | 1 | regress_rls_bob | my first manga + 5 | 44 | 2 | regress_rls_bob | my second manga + 6 | 22 | 1 | regress_rls_carol | great science fiction + 7 | 33 | 2 | regress_rls_carol | great technology book + 8 | 44 | 1 | regress_rls_carol | great manga + 9 | 22 | 1 | regress_rls_dave | awesome science fiction + 10 | 33 | 2 | regress_rls_dave | awesome technology book + 11 | 33 | 1 | regress_rls_carol | hoge +(11 rows) + +SELECT * FROM category; + cid | cname +-----+----------------- + 11 | novel + 22 | science fiction + 33 | technology + 44 | manga +(4 rows) + +-- +-- Table inheritance and RLS policy +-- +SET SESSION AUTHORIZATION regress_rls_alice; +SET row_security TO ON; +CREATE TABLE t1 (a int, junk1 text, b text); +ALTER TABLE t1 DROP COLUMN junk1; -- just a disturbing factor +GRANT ALL ON t1 TO public; +COPY t1 FROM stdin; +CREATE TABLE t2 (c float) INHERITS (t1); +GRANT ALL ON t2 TO public; +COPY t2 FROM stdin; +CREATE TABLE t3 (c text, b text, a int); +ALTER TABLE t3 INHERIT t1; +GRANT ALL ON t3 TO public; +COPY t3(a,b,c) FROM stdin; +CREATE POLICY p1 ON t1 FOR ALL TO PUBLIC USING (a % 2 = 0); -- be even number +CREATE POLICY p2 ON t2 FOR ALL TO PUBLIC USING (a % 2 = 1); -- be odd number +ALTER TABLE t1 ENABLE ROW LEVEL SECURITY; +ALTER TABLE t2 ENABLE ROW LEVEL SECURITY; +SET SESSION AUTHORIZATION regress_rls_bob; +SELECT * FROM t1; + a | b +---+----- + 2 | bbb + 4 | dad + 2 | bcd + 4 | def + 2 | yyy +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM t1; + QUERY PLAN +------------------------------- + Append + -> Seq Scan on t1 t1_1 + Filter: ((a % 2) = 0) + -> Seq Scan on t2 t1_2 + Filter: ((a % 2) = 0) + -> Seq Scan on t3 t1_3 + Filter: ((a % 2) = 0) +(7 rows) + +SELECT * FROM t1 WHERE f_leak(b); +NOTICE: f_leak => bbb +NOTICE: f_leak => dad +NOTICE: f_leak => bcd +NOTICE: f_leak => def +NOTICE: f_leak => yyy + a | b +---+----- + 2 | bbb + 4 | dad + 2 | bcd + 4 | def + 2 | yyy +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM t1 WHERE f_leak(b); + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on t1 t1_1 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on t2 t1_2 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on t3 t1_3 + Filter: (((a % 2) = 0) AND f_leak(b)) +(7 rows) + +-- reference to system column +SELECT ctid, * FROM t1; + ctid | a | b +-------+---+----- + (0,2) | 2 | bbb + (0,4) | 4 | dad + (0,2) | 2 | bcd + (0,4) | 4 | def + (0,2) | 2 | yyy +(5 rows) + +EXPLAIN (COSTS OFF) SELECT *, t1 FROM t1; + QUERY PLAN +------------------------------- + Append + -> Seq Scan on t1 t1_1 + Filter: ((a % 2) = 0) + -> Seq Scan on t2 t1_2 + Filter: ((a % 2) = 0) + -> Seq Scan on t3 t1_3 + Filter: ((a % 2) = 0) +(7 rows) + +-- reference to whole-row reference +SELECT *, t1 FROM t1; + a | b | t1 +---+-----+--------- + 2 | bbb | (2,bbb) + 4 | dad | (4,dad) + 2 | bcd | (2,bcd) + 4 | def | (4,def) + 2 | yyy | (2,yyy) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT *, t1 FROM t1; + QUERY PLAN +------------------------------- + Append + -> Seq Scan on t1 t1_1 + Filter: ((a % 2) = 0) + -> Seq Scan on t2 t1_2 + Filter: ((a % 2) = 0) + -> Seq Scan on t3 t1_3 + Filter: ((a % 2) = 0) +(7 rows) + +-- for share/update lock +SELECT * FROM t1 FOR SHARE; + a | b +---+----- + 2 | bbb + 4 | dad + 2 | bcd + 4 | def + 2 | yyy +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM t1 FOR SHARE; + QUERY PLAN +------------------------------------- + LockRows + -> Append + -> Seq Scan on t1 t1_1 + Filter: ((a % 2) = 0) + -> Seq Scan on t2 t1_2 + Filter: ((a % 2) = 0) + -> Seq Scan on t3 t1_3 + Filter: ((a % 2) = 0) +(8 rows) + +SELECT * FROM t1 WHERE f_leak(b) FOR SHARE; +NOTICE: f_leak => bbb +NOTICE: f_leak => dad +NOTICE: f_leak => bcd +NOTICE: f_leak => def +NOTICE: f_leak => yyy + a | b +---+----- + 2 | bbb + 4 | dad + 2 | bcd + 4 | def + 2 | yyy +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM t1 WHERE f_leak(b) FOR SHARE; + QUERY PLAN +----------------------------------------------------- + LockRows + -> Append + -> Seq Scan on t1 t1_1 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on t2 t1_2 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on t3 t1_3 + Filter: (((a % 2) = 0) AND f_leak(b)) +(8 rows) + +-- union all query +SELECT a, b, ctid FROM t2 UNION ALL SELECT a, b, ctid FROM t3; + a | b | ctid +---+-----+------- + 1 | abc | (0,1) + 3 | cde | (0,3) + 1 | xxx | (0,1) + 2 | yyy | (0,2) + 3 | zzz | (0,3) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT a, b, ctid FROM t2 UNION ALL SELECT a, b, ctid FROM t3; + QUERY PLAN +------------------------------- + Append + -> Seq Scan on t2 + Filter: ((a % 2) = 1) + -> Seq Scan on t3 +(4 rows) + +-- superuser is allowed to bypass RLS checks +RESET SESSION AUTHORIZATION; +SET row_security TO OFF; +SELECT * FROM t1 WHERE f_leak(b); +NOTICE: f_leak => aba +NOTICE: f_leak => bbb +NOTICE: f_leak => ccc +NOTICE: f_leak => dad +NOTICE: f_leak => abc +NOTICE: f_leak => bcd +NOTICE: f_leak => cde +NOTICE: f_leak => def +NOTICE: f_leak => xxx +NOTICE: f_leak => yyy +NOTICE: f_leak => zzz + a | b +---+----- + 1 | aba + 2 | bbb + 3 | ccc + 4 | dad + 1 | abc + 2 | bcd + 3 | cde + 4 | def + 1 | xxx + 2 | yyy + 3 | zzz +(11 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM t1 WHERE f_leak(b); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on t1 t1_1 + Filter: f_leak(b) + -> Seq Scan on t2 t1_2 + Filter: f_leak(b) + -> Seq Scan on t3 t1_3 + Filter: f_leak(b) +(7 rows) + +-- non-superuser with bypass privilege can bypass RLS policy when disabled +SET SESSION AUTHORIZATION regress_rls_exempt_user; +SET row_security TO OFF; +SELECT * FROM t1 WHERE f_leak(b); +NOTICE: f_leak => aba +NOTICE: f_leak => bbb +NOTICE: f_leak => ccc +NOTICE: f_leak => dad +NOTICE: f_leak => abc +NOTICE: f_leak => bcd +NOTICE: f_leak => cde +NOTICE: f_leak => def +NOTICE: f_leak => xxx +NOTICE: f_leak => yyy +NOTICE: f_leak => zzz + a | b +---+----- + 1 | aba + 2 | bbb + 3 | ccc + 4 | dad + 1 | abc + 2 | bcd + 3 | cde + 4 | def + 1 | xxx + 2 | yyy + 3 | zzz +(11 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM t1 WHERE f_leak(b); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on t1 t1_1 + Filter: f_leak(b) + -> Seq Scan on t2 t1_2 + Filter: f_leak(b) + -> Seq Scan on t3 t1_3 + Filter: f_leak(b) +(7 rows) + +-- +-- Hyper Tables +-- +SET SESSION AUTHORIZATION regress_rls_alice; +CREATE TABLE hyper_document ( + did int, + cid int, + dlevel int not null, + dauthor name, + dtitle text +); +GRANT ALL ON hyper_document TO public; +SELECT public.create_hypertable('hyper_document', 'did', chunk_time_interval=>2); +NOTICE: adding not-null constraint to column "did" +DETAIL: Dimensions cannot have NULL values. + create_hypertable +----------------------------------------- + (2,regress_rls_schema,hyper_document,t) +(1 row) + +INSERT INTO hyper_document VALUES + ( 1, 11, 1, 'regress_rls_bob', 'my first novel'), + ( 2, 11, 2, 'regress_rls_bob', 'my second novel'), + ( 3, 99, 2, 'regress_rls_bob', 'my science textbook'), + ( 4, 55, 1, 'regress_rls_bob', 'my first satire'), + ( 5, 99, 2, 'regress_rls_bob', 'my history book'), + ( 6, 11, 1, 'regress_rls_carol', 'great science fiction'), + ( 7, 99, 2, 'regress_rls_carol', 'great technology book'), + ( 8, 55, 2, 'regress_rls_carol', 'great satire'), + ( 9, 11, 1, 'regress_rls_dave', 'awesome science fiction'), + (10, 99, 2, 'regress_rls_dave', 'awesome technology book'); +ALTER TABLE hyper_document ENABLE ROW LEVEL SECURITY; +GRANT ALL ON _timescaledb_internal._hyper_2_9_chunk TO public; +-- Create policy on parent +-- user's security level must be higher than or equal to document's +CREATE POLICY pp1 ON hyper_document AS PERMISSIVE + USING (dlevel <= (SELECT seclv FROM uaccount WHERE pguser = current_user)); +-- Dave is only allowed to see cid < 55 +CREATE POLICY pp1r ON hyper_document AS RESTRICTIVE TO regress_rls_dave + USING (cid < 55); +\d+ hyper_document + Table "regress_rls_schema.hyper_document" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +---------+---------+-----------+----------+---------+----------+--------------+------------- + did | integer | | not null | | plain | | + cid | integer | | | | plain | | + dlevel | integer | | not null | | plain | | + dauthor | name | | | | plain | | + dtitle | text | | | | extended | | +Indexes: + "hyper_document_did_idx" btree (did DESC) +Policies: + POLICY "pp1" + USING ((dlevel <= ( SELECT uaccount.seclv + FROM uaccount + WHERE (uaccount.pguser = CURRENT_USER)))) + POLICY "pp1r" AS RESTRICTIVE + TO regress_rls_dave + USING ((cid < 55)) +Triggers: + ts_insert_blocker BEFORE INSERT ON hyper_document FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker() +Child tables: _timescaledb_internal._hyper_2_10_chunk, + _timescaledb_internal._hyper_2_11_chunk, + _timescaledb_internal._hyper_2_12_chunk, + _timescaledb_internal._hyper_2_13_chunk, + _timescaledb_internal._hyper_2_14_chunk, + _timescaledb_internal._hyper_2_9_chunk + +SELECT * FROM pg_policies WHERE schemaname = 'regress_rls_schema' AND tablename like '%hyper_document%' ORDER BY policyname; + schemaname | tablename | policyname | permissive | roles | cmd | qual | with_check +--------------------+----------------+------------+-------------+--------------------+-----+--------------------------------------------+------------ + regress_rls_schema | hyper_document | pp1 | PERMISSIVE | {public} | ALL | (dlevel <= ( SELECT uaccount.seclv +| + | | | | | | FROM uaccount +| + | | | | | | WHERE (uaccount.pguser = CURRENT_USER))) | + regress_rls_schema | hyper_document | pp1r | RESTRICTIVE | {regress_rls_dave} | ALL | (cid < 55) | +(2 rows) + +-- viewpoint from regress_rls_bob +SET SESSION AUTHORIZATION regress_rls_bob; +SET row_security TO ON; +SELECT * FROM hyper_document WHERE f_leak(dtitle) ORDER BY did; +NOTICE: f_leak => my first novel +NOTICE: f_leak => my first satire +NOTICE: f_leak => great science fiction +NOTICE: f_leak => awesome science fiction + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-------------------+------------------------- + 1 | 11 | 1 | regress_rls_bob | my first novel + 4 | 55 | 1 | regress_rls_bob | my first satire + 6 | 11 | 1 | regress_rls_carol | great science fiction + 9 | 11 | 1 | regress_rls_dave | awesome science fiction +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM hyper_document WHERE f_leak(dtitle); + QUERY PLAN +------------------------------------------------------ + Custom Scan (ChunkAppend) on hyper_document + Chunks excluded during startup: 0 + InitPlan 1 (returns $0) + -> Index Scan using uaccount_pkey on uaccount + Index Cond: (pguser = CURRENT_USER) + -> Seq Scan on hyper_document hyper_document_1 + Filter: ((dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_2_9_chunk hyper_document_2 + Filter: ((dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_2_10_chunk hyper_document_3 + Filter: ((dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_2_11_chunk hyper_document_4 + Filter: ((dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_2_12_chunk hyper_document_5 + Filter: ((dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_2_13_chunk hyper_document_6 + Filter: ((dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_2_14_chunk hyper_document_7 + Filter: ((dlevel <= $0) AND f_leak(dtitle)) +(19 rows) + +-- viewpoint from regress_rls_carol +SET SESSION AUTHORIZATION regress_rls_carol; +SELECT * FROM hyper_document WHERE f_leak(dtitle) ORDER BY did; +NOTICE: f_leak => my first novel +NOTICE: f_leak => my second novel +NOTICE: f_leak => my science textbook +NOTICE: f_leak => my first satire +NOTICE: f_leak => my history book +NOTICE: f_leak => great science fiction +NOTICE: f_leak => great technology book +NOTICE: f_leak => great satire +NOTICE: f_leak => awesome science fiction +NOTICE: f_leak => awesome technology book + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-------------------+------------------------- + 1 | 11 | 1 | regress_rls_bob | my first novel + 2 | 11 | 2 | regress_rls_bob | my second novel + 3 | 99 | 2 | regress_rls_bob | my science textbook + 4 | 55 | 1 | regress_rls_bob | my first satire + 5 | 99 | 2 | regress_rls_bob | my history book + 6 | 11 | 1 | regress_rls_carol | great science fiction + 7 | 99 | 2 | regress_rls_carol | great technology book + 8 | 55 | 2 | regress_rls_carol | great satire + 9 | 11 | 1 | regress_rls_dave | awesome science fiction + 10 | 99 | 2 | regress_rls_dave | awesome technology book +(10 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM hyper_document WHERE f_leak(dtitle); + QUERY PLAN +------------------------------------------------------ + Custom Scan (ChunkAppend) on hyper_document + Chunks excluded during startup: 0 + InitPlan 1 (returns $0) + -> Index Scan using uaccount_pkey on uaccount + Index Cond: (pguser = CURRENT_USER) + -> Seq Scan on hyper_document hyper_document_1 + Filter: ((dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_2_9_chunk hyper_document_2 + Filter: ((dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_2_10_chunk hyper_document_3 + Filter: ((dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_2_11_chunk hyper_document_4 + Filter: ((dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_2_12_chunk hyper_document_5 + Filter: ((dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_2_13_chunk hyper_document_6 + Filter: ((dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_2_14_chunk hyper_document_7 + Filter: ((dlevel <= $0) AND f_leak(dtitle)) +(19 rows) + +-- viewpoint from regress_rls_dave +SET SESSION AUTHORIZATION regress_rls_dave; +SELECT * FROM hyper_document WHERE f_leak(dtitle) ORDER BY did; +NOTICE: f_leak => my first novel +NOTICE: f_leak => my second novel +NOTICE: f_leak => great science fiction +NOTICE: f_leak => awesome science fiction + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-------------------+------------------------- + 1 | 11 | 1 | regress_rls_bob | my first novel + 2 | 11 | 2 | regress_rls_bob | my second novel + 6 | 11 | 1 | regress_rls_carol | great science fiction + 9 | 11 | 1 | regress_rls_dave | awesome science fiction +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM hyper_document WHERE f_leak(dtitle); + QUERY PLAN +-------------------------------------------------------------------- + Custom Scan (ChunkAppend) on hyper_document + Chunks excluded during startup: 0 + InitPlan 1 (returns $0) + -> Index Scan using uaccount_pkey on uaccount + Index Cond: (pguser = CURRENT_USER) + -> Seq Scan on hyper_document hyper_document_1 + Filter: ((cid < 55) AND (dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_2_9_chunk hyper_document_2 + Filter: ((cid < 55) AND (dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_2_10_chunk hyper_document_3 + Filter: ((cid < 55) AND (dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_2_11_chunk hyper_document_4 + Filter: ((cid < 55) AND (dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_2_12_chunk hyper_document_5 + Filter: ((cid < 55) AND (dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_2_13_chunk hyper_document_6 + Filter: ((cid < 55) AND (dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_2_14_chunk hyper_document_7 + Filter: ((cid < 55) AND (dlevel <= $0) AND f_leak(dtitle)) +(19 rows) + +-- pp1 ERROR +INSERT INTO hyper_document VALUES (1, 11, 5, 'regress_rls_dave', 'testing pp1'); -- fail +ERROR: new row violates row-level security policy for table "hyper_document" +-- pp1r ERROR +INSERT INTO hyper_document VALUES (1, 99, 1, 'regress_rls_dave', 'testing pp1r'); -- fail +ERROR: new row violates row-level security policy "pp1r" for table "hyper_document" +-- Show that RLS policy does not apply for direct inserts to children +-- This should fail with RLS POLICY pp1r violation. +INSERT INTO hyper_document VALUES (1, 55, 1, 'regress_rls_dave', 'testing RLS with hypertables'); -- fail +ERROR: new row violates row-level security policy "pp1r" for table "hyper_document" +-- But this should succeed. +INSERT INTO _timescaledb_internal._hyper_2_9_chunk VALUES (1, 55, 1, 'regress_rls_dave', 'testing RLS with hypertables'); -- success +-- We still cannot see the row using the parent +SELECT * FROM hyper_document WHERE f_leak(dtitle) ORDER BY did, cid; +NOTICE: f_leak => my first novel +NOTICE: f_leak => my second novel +NOTICE: f_leak => great science fiction +NOTICE: f_leak => awesome science fiction + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-------------------+------------------------- + 1 | 11 | 1 | regress_rls_bob | my first novel + 2 | 11 | 2 | regress_rls_bob | my second novel + 6 | 11 | 1 | regress_rls_carol | great science fiction + 9 | 11 | 1 | regress_rls_dave | awesome science fiction +(4 rows) + +-- But we can if we look directly +SELECT * FROM _timescaledb_internal._hyper_2_9_chunk WHERE f_leak(dtitle) ORDER BY did, cid; +NOTICE: f_leak => my first novel +NOTICE: f_leak => testing RLS with hypertables + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+------------------+------------------------------ + 1 | 11 | 1 | regress_rls_bob | my first novel + 1 | 55 | 1 | regress_rls_dave | testing RLS with hypertables +(2 rows) + +-- Turn on RLS and create policy on child to show RLS is checked before constraints +SET SESSION AUTHORIZATION regress_rls_alice; +ALTER TABLE _timescaledb_internal._hyper_2_9_chunk ENABLE ROW LEVEL SECURITY; +CREATE POLICY pp3 ON _timescaledb_internal._hyper_2_9_chunk AS RESTRICTIVE + USING (cid < 55); +-- This should fail with RLS violation now. +SET SESSION AUTHORIZATION regress_rls_dave; +INSERT INTO _timescaledb_internal._hyper_2_9_chunk VALUES (1, 55, 1, 'regress_rls_dave', 'testing RLS with hypertables - round 2'); -- fail +ERROR: new row violates row-level security policy for table "_hyper_2_9_chunk" +-- And now we cannot see directly into the partition either, due to RLS +SELECT * FROM _timescaledb_internal._hyper_2_9_chunk WHERE f_leak(dtitle) ORDER BY did, cid; + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+---------+-------- +(0 rows) + +-- The parent looks same as before +-- viewpoint from regress_rls_dave +SELECT * FROM hyper_document WHERE f_leak(dtitle) ORDER BY did, cid; +NOTICE: f_leak => my first novel +NOTICE: f_leak => my second novel +NOTICE: f_leak => great science fiction +NOTICE: f_leak => awesome science fiction + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-------------------+------------------------- + 1 | 11 | 1 | regress_rls_bob | my first novel + 2 | 11 | 2 | regress_rls_bob | my second novel + 6 | 11 | 1 | regress_rls_carol | great science fiction + 9 | 11 | 1 | regress_rls_dave | awesome science fiction +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM hyper_document WHERE f_leak(dtitle); + QUERY PLAN +-------------------------------------------------------------------- + Custom Scan (ChunkAppend) on hyper_document + Chunks excluded during startup: 0 + InitPlan 1 (returns $0) + -> Index Scan using uaccount_pkey on uaccount + Index Cond: (pguser = CURRENT_USER) + -> Seq Scan on hyper_document hyper_document_1 + Filter: ((cid < 55) AND (dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_2_9_chunk hyper_document_2 + Filter: ((cid < 55) AND (dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_2_10_chunk hyper_document_3 + Filter: ((cid < 55) AND (dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_2_11_chunk hyper_document_4 + Filter: ((cid < 55) AND (dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_2_12_chunk hyper_document_5 + Filter: ((cid < 55) AND (dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_2_13_chunk hyper_document_6 + Filter: ((cid < 55) AND (dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_2_14_chunk hyper_document_7 + Filter: ((cid < 55) AND (dlevel <= $0) AND f_leak(dtitle)) +(19 rows) + +-- viewpoint from regress_rls_carol +SET SESSION AUTHORIZATION regress_rls_carol; +SELECT * FROM hyper_document WHERE f_leak(dtitle) ORDER BY did, cid; +NOTICE: f_leak => my first novel +NOTICE: f_leak => testing RLS with hypertables +NOTICE: f_leak => my second novel +NOTICE: f_leak => my science textbook +NOTICE: f_leak => my first satire +NOTICE: f_leak => my history book +NOTICE: f_leak => great science fiction +NOTICE: f_leak => great technology book +NOTICE: f_leak => great satire +NOTICE: f_leak => awesome science fiction +NOTICE: f_leak => awesome technology book + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-------------------+------------------------------ + 1 | 11 | 1 | regress_rls_bob | my first novel + 1 | 55 | 1 | regress_rls_dave | testing RLS with hypertables + 2 | 11 | 2 | regress_rls_bob | my second novel + 3 | 99 | 2 | regress_rls_bob | my science textbook + 4 | 55 | 1 | regress_rls_bob | my first satire + 5 | 99 | 2 | regress_rls_bob | my history book + 6 | 11 | 1 | regress_rls_carol | great science fiction + 7 | 99 | 2 | regress_rls_carol | great technology book + 8 | 55 | 2 | regress_rls_carol | great satire + 9 | 11 | 1 | regress_rls_dave | awesome science fiction + 10 | 99 | 2 | regress_rls_dave | awesome technology book +(11 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM hyper_document WHERE f_leak(dtitle); + QUERY PLAN +------------------------------------------------------ + Custom Scan (ChunkAppend) on hyper_document + Chunks excluded during startup: 0 + InitPlan 1 (returns $0) + -> Index Scan using uaccount_pkey on uaccount + Index Cond: (pguser = CURRENT_USER) + -> Seq Scan on hyper_document hyper_document_1 + Filter: ((dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_2_9_chunk hyper_document_2 + Filter: ((dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_2_10_chunk hyper_document_3 + Filter: ((dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_2_11_chunk hyper_document_4 + Filter: ((dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_2_12_chunk hyper_document_5 + Filter: ((dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_2_13_chunk hyper_document_6 + Filter: ((dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on _hyper_2_14_chunk hyper_document_7 + Filter: ((dlevel <= $0) AND f_leak(dtitle)) +(19 rows) + +-- only owner can change policies +ALTER POLICY pp1 ON hyper_document USING (true); --fail +ERROR: must be owner of table hyper_document +DROP POLICY pp1 ON hyper_document; --fail +ERROR: must be owner of relation hyper_document +SET SESSION AUTHORIZATION regress_rls_alice; +ALTER POLICY pp1 ON hyper_document USING (dauthor = current_user); +-- viewpoint from regress_rls_bob again +SET SESSION AUTHORIZATION regress_rls_bob; +SELECT * FROM hyper_document WHERE f_leak(dtitle) ORDER BY did, cid; +NOTICE: f_leak => my first novel +NOTICE: f_leak => my second novel +NOTICE: f_leak => my science textbook +NOTICE: f_leak => my first satire +NOTICE: f_leak => my history book + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-----------------+--------------------- + 1 | 11 | 1 | regress_rls_bob | my first novel + 2 | 11 | 2 | regress_rls_bob | my second novel + 3 | 99 | 2 | regress_rls_bob | my science textbook + 4 | 55 | 1 | regress_rls_bob | my first satire + 5 | 99 | 2 | regress_rls_bob | my history book +(5 rows) + +-- viewpoint from rls_regres_carol again +SET SESSION AUTHORIZATION regress_rls_carol; +SELECT * FROM hyper_document WHERE f_leak(dtitle) ORDER BY did, cid; +NOTICE: f_leak => great science fiction +NOTICE: f_leak => great technology book +NOTICE: f_leak => great satire + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-------------------+----------------------- + 6 | 11 | 1 | regress_rls_carol | great science fiction + 7 | 99 | 2 | regress_rls_carol | great technology book + 8 | 55 | 2 | regress_rls_carol | great satire +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM hyper_document WHERE f_leak(dtitle); + QUERY PLAN +--------------------------------------------------------------- + Custom Scan (ChunkAppend) on hyper_document + Chunks excluded during startup: 0 + -> Seq Scan on hyper_document hyper_document_1 + Filter: ((dauthor = CURRENT_USER) AND f_leak(dtitle)) + -> Seq Scan on _hyper_2_9_chunk hyper_document_2 + Filter: ((dauthor = CURRENT_USER) AND f_leak(dtitle)) + -> Seq Scan on _hyper_2_10_chunk hyper_document_3 + Filter: ((dauthor = CURRENT_USER) AND f_leak(dtitle)) + -> Seq Scan on _hyper_2_11_chunk hyper_document_4 + Filter: ((dauthor = CURRENT_USER) AND f_leak(dtitle)) + -> Seq Scan on _hyper_2_12_chunk hyper_document_5 + Filter: ((dauthor = CURRENT_USER) AND f_leak(dtitle)) + -> Seq Scan on _hyper_2_13_chunk hyper_document_6 + Filter: ((dauthor = CURRENT_USER) AND f_leak(dtitle)) + -> Seq Scan on _hyper_2_14_chunk hyper_document_7 + Filter: ((dauthor = CURRENT_USER) AND f_leak(dtitle)) +(16 rows) + +-- database superuser does bypass RLS policy when enabled +RESET SESSION AUTHORIZATION; +SET row_security TO ON; +SELECT * FROM hyper_document ORDER BY did, cid; + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-------------------+------------------------------ + 1 | 11 | 1 | regress_rls_bob | my first novel + 1 | 55 | 1 | regress_rls_dave | testing RLS with hypertables + 2 | 11 | 2 | regress_rls_bob | my second novel + 3 | 99 | 2 | regress_rls_bob | my science textbook + 4 | 55 | 1 | regress_rls_bob | my first satire + 5 | 99 | 2 | regress_rls_bob | my history book + 6 | 11 | 1 | regress_rls_carol | great science fiction + 7 | 99 | 2 | regress_rls_carol | great technology book + 8 | 55 | 2 | regress_rls_carol | great satire + 9 | 11 | 1 | regress_rls_dave | awesome science fiction + 10 | 99 | 2 | regress_rls_dave | awesome technology book +(11 rows) + +SELECT * FROM _timescaledb_internal._hyper_2_9_chunk ORDER BY did, cid; + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+------------------+------------------------------ + 1 | 11 | 1 | regress_rls_bob | my first novel + 1 | 55 | 1 | regress_rls_dave | testing RLS with hypertables +(2 rows) + +-- database non-superuser with bypass privilege can bypass RLS policy when disabled +SET SESSION AUTHORIZATION regress_rls_exempt_user; +SET row_security TO OFF; +SELECT * FROM hyper_document ORDER BY did, cid; + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-------------------+------------------------------ + 1 | 11 | 1 | regress_rls_bob | my first novel + 1 | 55 | 1 | regress_rls_dave | testing RLS with hypertables + 2 | 11 | 2 | regress_rls_bob | my second novel + 3 | 99 | 2 | regress_rls_bob | my science textbook + 4 | 55 | 1 | regress_rls_bob | my first satire + 5 | 99 | 2 | regress_rls_bob | my history book + 6 | 11 | 1 | regress_rls_carol | great science fiction + 7 | 99 | 2 | regress_rls_carol | great technology book + 8 | 55 | 2 | regress_rls_carol | great satire + 9 | 11 | 1 | regress_rls_dave | awesome science fiction + 10 | 99 | 2 | regress_rls_dave | awesome technology book +(11 rows) + +SELECT * FROM _timescaledb_internal._hyper_2_9_chunk ORDER BY did, cid; + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+------------------+------------------------------ + 1 | 11 | 1 | regress_rls_bob | my first novel + 1 | 55 | 1 | regress_rls_dave | testing RLS with hypertables +(2 rows) + +-- RLS policy does not apply to table owner when RLS enabled. +SET SESSION AUTHORIZATION regress_rls_alice; +SET row_security TO ON; +SELECT * FROM hyper_document ORDER BY did, cid; + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-------------------+------------------------------ + 1 | 11 | 1 | regress_rls_bob | my first novel + 1 | 55 | 1 | regress_rls_dave | testing RLS with hypertables + 2 | 11 | 2 | regress_rls_bob | my second novel + 3 | 99 | 2 | regress_rls_bob | my science textbook + 4 | 55 | 1 | regress_rls_bob | my first satire + 5 | 99 | 2 | regress_rls_bob | my history book + 6 | 11 | 1 | regress_rls_carol | great science fiction + 7 | 99 | 2 | regress_rls_carol | great technology book + 8 | 55 | 2 | regress_rls_carol | great satire + 9 | 11 | 1 | regress_rls_dave | awesome science fiction + 10 | 99 | 2 | regress_rls_dave | awesome technology book +(11 rows) + +SELECT * FROM _timescaledb_internal._hyper_2_9_chunk ORDER BY did, cid; + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+------------------+------------------------------ + 1 | 11 | 1 | regress_rls_bob | my first novel + 1 | 55 | 1 | regress_rls_dave | testing RLS with hypertables +(2 rows) + +-- When RLS disabled, other users get ERROR. +SET SESSION AUTHORIZATION regress_rls_dave; +SET row_security TO OFF; +SELECT * FROM hyper_document ORDER BY did, cid; +ERROR: query would be affected by row-level security policy for table "hyper_document" +SELECT * FROM _timescaledb_internal._hyper_2_9_chunk ORDER BY did, cid; +ERROR: query would be affected by row-level security policy for table "_hyper_2_9_chunk" +-- Check behavior with a policy that uses a SubPlan not an InitPlan. +SET SESSION AUTHORIZATION regress_rls_alice; +SET row_security TO ON; +CREATE POLICY pp3 ON hyper_document AS RESTRICTIVE + USING ((SELECT dlevel <= seclv FROM uaccount WHERE pguser = current_user)); +SET SESSION AUTHORIZATION regress_rls_carol; +INSERT INTO hyper_document VALUES (100, 11, 5, 'regress_rls_carol', 'testing pp3'); -- fail +ERROR: new row violates row-level security policy "pp3" for table "hyper_document" +----- Dependencies ----- +SET SESSION AUTHORIZATION regress_rls_alice; +SET row_security TO ON; +CREATE TABLE dependee (x integer, y integer); +SELECT public.create_hypertable('dependee', 'x', chunk_time_interval=>2); +NOTICE: adding not-null constraint to column "x" +DETAIL: Dimensions cannot have NULL values. + create_hypertable +----------------------------------- + (3,regress_rls_schema,dependee,t) +(1 row) + +CREATE TABLE dependent (x integer, y integer); +SELECT public.create_hypertable('dependent', 'x', chunk_time_interval=>2); +NOTICE: adding not-null constraint to column "x" +DETAIL: Dimensions cannot have NULL values. + create_hypertable +------------------------------------ + (4,regress_rls_schema,dependent,t) +(1 row) + +CREATE POLICY d1 ON dependent FOR ALL + TO PUBLIC + USING (x = (SELECT d.x FROM dependee d WHERE d.y = y)); +DROP TABLE dependee; -- Should fail without CASCADE due to dependency on row security qual? +ERROR: cannot drop table dependee because other objects depend on it +DETAIL: policy d1 on table dependent depends on table dependee +HINT: Use DROP ... CASCADE to drop the dependent objects too. +DROP TABLE dependee CASCADE; +NOTICE: drop cascades to policy d1 on table dependent +EXPLAIN (COSTS OFF) SELECT * FROM dependent; -- After drop, should be unqualified + QUERY PLAN +----------------------- + Seq Scan on dependent +(1 row) + +----- RECURSION ---- +-- +-- Simple recursion +-- +SET SESSION AUTHORIZATION regress_rls_alice; +CREATE TABLE rec1 (x integer, y integer); +SELECT public.create_hypertable('rec1', 'x', chunk_time_interval=>2); +NOTICE: adding not-null constraint to column "x" +DETAIL: Dimensions cannot have NULL values. + create_hypertable +------------------------------- + (5,regress_rls_schema,rec1,t) +(1 row) + +CREATE POLICY r1 ON rec1 USING (x = (SELECT r.x FROM rec1 r WHERE y = r.y)); +ALTER TABLE rec1 ENABLE ROW LEVEL SECURITY; +SET SESSION AUTHORIZATION regress_rls_bob; +SELECT * FROM rec1; -- fail, direct recursion +ERROR: infinite recursion detected in policy for relation "rec1" +-- +-- Mutual recursion +-- +SET SESSION AUTHORIZATION regress_rls_alice; +CREATE TABLE rec2 (a integer, b integer); +SELECT public.create_hypertable('rec2', 'x', chunk_time_interval=>2); +ERROR: column "x" does not exist +ALTER POLICY r1 ON rec1 USING (x = (SELECT a FROM rec2 WHERE b = y)); +CREATE POLICY r2 ON rec2 USING (a = (SELECT x FROM rec1 WHERE y = b)); +ALTER TABLE rec2 ENABLE ROW LEVEL SECURITY; +SET SESSION AUTHORIZATION regress_rls_bob; +SELECT * FROM rec1; -- fail, mutual recursion +ERROR: infinite recursion detected in policy for relation "rec1" +-- +-- Mutual recursion via views +-- +SET SESSION AUTHORIZATION regress_rls_bob; +CREATE VIEW rec1v AS SELECT * FROM rec1; +CREATE VIEW rec2v AS SELECT * FROM rec2; +SET SESSION AUTHORIZATION regress_rls_alice; +ALTER POLICY r1 ON rec1 USING (x = (SELECT a FROM rec2v WHERE b = y)); +ALTER POLICY r2 ON rec2 USING (a = (SELECT x FROM rec1v WHERE y = b)); +SET SESSION AUTHORIZATION regress_rls_bob; +SELECT * FROM rec1; -- fail, mutual recursion via views +ERROR: infinite recursion detected in policy for relation "rec1" +-- +-- Mutual recursion via .s.b views +-- +SET SESSION AUTHORIZATION regress_rls_bob; +\set VERBOSITY terse \\ -- suppress cascade details +DROP VIEW rec1v, rec2v CASCADE; +NOTICE: drop cascades to 2 other objects +\set VERBOSITY default +CREATE VIEW rec1v WITH (security_barrier) AS SELECT * FROM rec1; +CREATE VIEW rec2v WITH (security_barrier) AS SELECT * FROM rec2; +SET SESSION AUTHORIZATION regress_rls_alice; +CREATE POLICY r1 ON rec1 USING (x = (SELECT a FROM rec2v WHERE b = y)); +CREATE POLICY r2 ON rec2 USING (a = (SELECT x FROM rec1v WHERE y = b)); +SET SESSION AUTHORIZATION regress_rls_bob; +SELECT * FROM rec1; -- fail, mutual recursion via s.b. views +ERROR: infinite recursion detected in policy for relation "rec1" +-- +-- recursive RLS and VIEWs in policy +-- +SET SESSION AUTHORIZATION regress_rls_alice; +CREATE TABLE s1 (a int, b text); +SELECT public.create_hypertable('s1', 'a', chunk_time_interval=>2); +NOTICE: adding not-null constraint to column "a" +DETAIL: Dimensions cannot have NULL values. + create_hypertable +----------------------------- + (6,regress_rls_schema,s1,t) +(1 row) + +INSERT INTO s1 (SELECT x, md5(x::text) FROM generate_series(-10,10) x); +CREATE TABLE s2 (x int, y text); +SELECT public.create_hypertable('s2', 'x', chunk_time_interval=>2); +NOTICE: adding not-null constraint to column "x" +DETAIL: Dimensions cannot have NULL values. + create_hypertable +----------------------------- + (7,regress_rls_schema,s2,t) +(1 row) + +INSERT INTO s2 (SELECT x, md5(x::text) FROM generate_series(-6,6) x); +GRANT SELECT ON s1, s2 TO regress_rls_bob; +CREATE POLICY p1 ON s1 USING (a in (select x from s2 where y like '%2f%')); +CREATE POLICY p2 ON s2 USING (x in (select a from s1 where b like '%22%')); +CREATE POLICY p3 ON s1 FOR INSERT WITH CHECK (a = (SELECT a FROM s1)); +ALTER TABLE s1 ENABLE ROW LEVEL SECURITY; +ALTER TABLE s2 ENABLE ROW LEVEL SECURITY; +SET SESSION AUTHORIZATION regress_rls_bob; +CREATE VIEW v2 AS SELECT * FROM s2 WHERE y like '%af%'; +SELECT * FROM s1 WHERE f_leak(b); -- fail (infinite recursion) +ERROR: infinite recursion detected in policy for relation "s1" +INSERT INTO s1 VALUES (1, 'foo'); -- fail (infinite recursion) +ERROR: infinite recursion detected in policy for relation "s1" +SET SESSION AUTHORIZATION regress_rls_alice; +DROP POLICY p3 on s1; +ALTER POLICY p2 ON s2 USING (x % 2 = 0); +SET SESSION AUTHORIZATION regress_rls_bob; +SELECT * FROM s1 WHERE f_leak(b); -- OK +NOTICE: f_leak => c81e728d9d4c2f636f067f89cc14862c +NOTICE: f_leak => a87ff679a2f3e71d9181a67b7542122c + a | b +---+---------------------------------- + 2 | c81e728d9d4c2f636f067f89cc14862c + 4 | a87ff679a2f3e71d9181a67b7542122c +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM only s1 WHERE f_leak(b); + QUERY PLAN +----------------------------------------------------------------- + Seq Scan on s1 + Filter: ((hashed SubPlan 1) AND f_leak(b)) + SubPlan 1 + -> Append + -> Seq Scan on s2 s2_1 + Filter: (((x % 2) = 0) AND (y ~~ '%2f%'::text)) + -> Seq Scan on _hyper_7_27_chunk s2_2 + Filter: (((x % 2) = 0) AND (y ~~ '%2f%'::text)) + -> Seq Scan on _hyper_7_28_chunk s2_3 + Filter: (((x % 2) = 0) AND (y ~~ '%2f%'::text)) + -> Seq Scan on _hyper_7_29_chunk s2_4 + Filter: (((x % 2) = 0) AND (y ~~ '%2f%'::text)) + -> Seq Scan on _hyper_7_30_chunk s2_5 + Filter: (((x % 2) = 0) AND (y ~~ '%2f%'::text)) + -> Seq Scan on _hyper_7_31_chunk s2_6 + Filter: (((x % 2) = 0) AND (y ~~ '%2f%'::text)) + -> Seq Scan on _hyper_7_32_chunk s2_7 + Filter: (((x % 2) = 0) AND (y ~~ '%2f%'::text)) + -> Seq Scan on _hyper_7_33_chunk s2_8 + Filter: (((x % 2) = 0) AND (y ~~ '%2f%'::text)) +(20 rows) + +SET SESSION AUTHORIZATION regress_rls_alice; +ALTER POLICY p1 ON s1 USING (a in (select x from v2)); -- using VIEW in RLS policy +SET SESSION AUTHORIZATION regress_rls_bob; +SELECT * FROM s1 WHERE f_leak(b); -- OK +NOTICE: f_leak => 0267aaf632e87a63288a08331f22c7c3 +NOTICE: f_leak => 1679091c5a880faf6fb5e6087eb1b2dc + a | b +----+---------------------------------- + -4 | 0267aaf632e87a63288a08331f22c7c3 + 6 | 1679091c5a880faf6fb5e6087eb1b2dc +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM s1 WHERE f_leak(b); + QUERY PLAN +----------------------------------------------------------------------- + Custom Scan (ChunkAppend) on s1 + Chunks excluded during startup: 0 + -> Seq Scan on s1 s1_1 + Filter: ((hashed SubPlan 1) AND f_leak(b)) + SubPlan 1 + -> Append + -> Seq Scan on s2 s2_1 + Filter: (((x % 2) = 0) AND (y ~~ '%af%'::text)) + -> Seq Scan on _hyper_7_27_chunk s2_2 + Filter: (((x % 2) = 0) AND (y ~~ '%af%'::text)) + -> Seq Scan on _hyper_7_28_chunk s2_3 + Filter: (((x % 2) = 0) AND (y ~~ '%af%'::text)) + -> Seq Scan on _hyper_7_29_chunk s2_4 + Filter: (((x % 2) = 0) AND (y ~~ '%af%'::text)) + -> Seq Scan on _hyper_7_30_chunk s2_5 + Filter: (((x % 2) = 0) AND (y ~~ '%af%'::text)) + -> Seq Scan on _hyper_7_31_chunk s2_6 + Filter: (((x % 2) = 0) AND (y ~~ '%af%'::text)) + -> Seq Scan on _hyper_7_32_chunk s2_7 + Filter: (((x % 2) = 0) AND (y ~~ '%af%'::text)) + -> Seq Scan on _hyper_7_33_chunk s2_8 + Filter: (((x % 2) = 0) AND (y ~~ '%af%'::text)) + -> Seq Scan on _hyper_6_16_chunk s1_2 + Filter: ((hashed SubPlan 1) AND f_leak(b)) + -> Seq Scan on _hyper_6_17_chunk s1_3 + Filter: ((hashed SubPlan 1) AND f_leak(b)) + -> Seq Scan on _hyper_6_18_chunk s1_4 + Filter: ((hashed SubPlan 1) AND f_leak(b)) + -> Seq Scan on _hyper_6_19_chunk s1_5 + Filter: ((hashed SubPlan 1) AND f_leak(b)) + -> Seq Scan on _hyper_6_20_chunk s1_6 + Filter: ((hashed SubPlan 1) AND f_leak(b)) + -> Seq Scan on _hyper_6_21_chunk s1_7 + Filter: ((hashed SubPlan 1) AND f_leak(b)) + -> Seq Scan on _hyper_6_22_chunk s1_8 + Filter: ((hashed SubPlan 1) AND f_leak(b)) + -> Seq Scan on _hyper_6_23_chunk s1_9 + Filter: ((hashed SubPlan 1) AND f_leak(b)) + -> Seq Scan on _hyper_6_24_chunk s1_10 + Filter: ((hashed SubPlan 1) AND f_leak(b)) + -> Seq Scan on _hyper_6_25_chunk s1_11 + Filter: ((hashed SubPlan 1) AND f_leak(b)) + -> Seq Scan on _hyper_6_26_chunk s1_12 + Filter: ((hashed SubPlan 1) AND f_leak(b)) +(44 rows) + +SELECT (SELECT x FROM s1 LIMIT 1) xx, * FROM s2 WHERE y like '%28%'; + xx | x | y +----+----+---------------------------------- + -6 | -6 | 596a3d04481816330f07e4f97510c28f + -4 | -4 | 0267aaf632e87a63288a08331f22c7c3 + 2 | 2 | c81e728d9d4c2f636f067f89cc14862c +(3 rows) + +EXPLAIN (COSTS OFF) SELECT (SELECT x FROM s1 LIMIT 1) xx, * FROM s2 WHERE y like '%28%'; + QUERY PLAN +------------------------------------------------------------------------------------------- + Result + -> Append + -> Seq Scan on s2 s2_1 + Filter: (((x % 2) = 0) AND (y ~~ '%28%'::text)) + -> Seq Scan on _hyper_7_27_chunk s2_2 + Filter: (((x % 2) = 0) AND (y ~~ '%28%'::text)) + -> Seq Scan on _hyper_7_28_chunk s2_3 + Filter: (((x % 2) = 0) AND (y ~~ '%28%'::text)) + -> Seq Scan on _hyper_7_29_chunk s2_4 + Filter: (((x % 2) = 0) AND (y ~~ '%28%'::text)) + -> Seq Scan on _hyper_7_30_chunk s2_5 + Filter: (((x % 2) = 0) AND (y ~~ '%28%'::text)) + -> Seq Scan on _hyper_7_31_chunk s2_6 + Filter: (((x % 2) = 0) AND (y ~~ '%28%'::text)) + -> Seq Scan on _hyper_7_32_chunk s2_7 + Filter: (((x % 2) = 0) AND (y ~~ '%28%'::text)) + -> Seq Scan on _hyper_7_33_chunk s2_8 + Filter: (((x % 2) = 0) AND (y ~~ '%28%'::text)) + SubPlan 2 + -> Limit + -> Result + -> Custom Scan (ChunkAppend) on s1 + -> Seq Scan on s1 s1_1 + Filter: (hashed SubPlan 1) + SubPlan 1 + -> Append + -> Seq Scan on s2 s2_10 + Filter: (((x % 2) = 0) AND (y ~~ '%af%'::text)) + -> Seq Scan on _hyper_7_27_chunk s2_11 + Filter: (((x % 2) = 0) AND (y ~~ '%af%'::text)) + -> Seq Scan on _hyper_7_28_chunk s2_12 + Filter: (((x % 2) = 0) AND (y ~~ '%af%'::text)) + -> Seq Scan on _hyper_7_29_chunk s2_13 + Filter: (((x % 2) = 0) AND (y ~~ '%af%'::text)) + -> Seq Scan on _hyper_7_30_chunk s2_14 + Filter: (((x % 2) = 0) AND (y ~~ '%af%'::text)) + -> Seq Scan on _hyper_7_31_chunk s2_15 + Filter: (((x % 2) = 0) AND (y ~~ '%af%'::text)) + -> Seq Scan on _hyper_7_32_chunk s2_16 + Filter: (((x % 2) = 0) AND (y ~~ '%af%'::text)) + -> Seq Scan on _hyper_7_33_chunk s2_17 + Filter: (((x % 2) = 0) AND (y ~~ '%af%'::text)) + -> Seq Scan on _hyper_6_16_chunk s1_2 + Filter: (hashed SubPlan 1) + -> Seq Scan on _hyper_6_17_chunk s1_3 + Filter: (hashed SubPlan 1) + -> Seq Scan on _hyper_6_18_chunk s1_4 + Filter: (hashed SubPlan 1) + -> Seq Scan on _hyper_6_19_chunk s1_5 + Filter: (hashed SubPlan 1) + -> Seq Scan on _hyper_6_20_chunk s1_6 + Filter: (hashed SubPlan 1) + -> Seq Scan on _hyper_6_21_chunk s1_7 + Filter: (hashed SubPlan 1) + -> Seq Scan on _hyper_6_22_chunk s1_8 + Filter: (hashed SubPlan 1) + -> Seq Scan on _hyper_6_23_chunk s1_9 + Filter: (hashed SubPlan 1) + -> Seq Scan on _hyper_6_24_chunk s1_10 + Filter: (hashed SubPlan 1) + -> Seq Scan on _hyper_6_25_chunk s1_11 + Filter: (hashed SubPlan 1) + -> Seq Scan on _hyper_6_26_chunk s1_12 + Filter: (hashed SubPlan 1) +(64 rows) + +SET SESSION AUTHORIZATION regress_rls_alice; +ALTER POLICY p2 ON s2 USING (x in (select a from s1 where b like '%d2%')); +SET SESSION AUTHORIZATION regress_rls_bob; +SELECT * FROM s1 WHERE f_leak(b); -- fail (infinite recursion via view) +ERROR: infinite recursion detected in policy for relation "s1" +-- prepared statement with regress_rls_alice privilege +PREPARE p1(int) AS SELECT * FROM t1 WHERE a <= $1; +EXECUTE p1(2); + a | b +---+----- + 2 | bbb + 2 | bcd + 2 | yyy +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE p1(2); + QUERY PLAN +---------------------------------------------- + Append + -> Seq Scan on t1 t1_1 + Filter: ((a <= 2) AND ((a % 2) = 0)) + -> Seq Scan on t2 t1_2 + Filter: ((a <= 2) AND ((a % 2) = 0)) + -> Seq Scan on t3 t1_3 + Filter: ((a <= 2) AND ((a % 2) = 0)) +(7 rows) + +-- superuser is allowed to bypass RLS checks +RESET SESSION AUTHORIZATION; +SET row_security TO OFF; +SELECT * FROM t1 WHERE f_leak(b); +NOTICE: f_leak => aba +NOTICE: f_leak => bbb +NOTICE: f_leak => ccc +NOTICE: f_leak => dad +NOTICE: f_leak => abc +NOTICE: f_leak => bcd +NOTICE: f_leak => cde +NOTICE: f_leak => def +NOTICE: f_leak => xxx +NOTICE: f_leak => yyy +NOTICE: f_leak => zzz + a | b +---+----- + 1 | aba + 2 | bbb + 3 | ccc + 4 | dad + 1 | abc + 2 | bcd + 3 | cde + 4 | def + 1 | xxx + 2 | yyy + 3 | zzz +(11 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM t1 WHERE f_leak(b); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on t1 t1_1 + Filter: f_leak(b) + -> Seq Scan on t2 t1_2 + Filter: f_leak(b) + -> Seq Scan on t3 t1_3 + Filter: f_leak(b) +(7 rows) + +-- plan cache should be invalidated +EXECUTE p1(2); + a | b +---+----- + 1 | aba + 2 | bbb + 1 | abc + 2 | bcd + 1 | xxx + 2 | yyy +(6 rows) + +EXPLAIN (COSTS OFF) EXECUTE p1(2); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on t1 t1_1 + Filter: (a <= 2) + -> Seq Scan on t2 t1_2 + Filter: (a <= 2) + -> Seq Scan on t3 t1_3 + Filter: (a <= 2) +(7 rows) + +PREPARE p2(int) AS SELECT * FROM t1 WHERE a = $1; +EXECUTE p2(2); + a | b +---+----- + 2 | bbb + 2 | bcd + 2 | yyy +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE p2(2); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on t1 t1_1 + Filter: (a = 2) + -> Seq Scan on t2 t1_2 + Filter: (a = 2) + -> Seq Scan on t3 t1_3 + Filter: (a = 2) +(7 rows) + +-- also, case when privilege switch from superuser +SET SESSION AUTHORIZATION regress_rls_bob; +SET row_security TO ON; +EXECUTE p2(2); + a | b +---+----- + 2 | bbb + 2 | bcd + 2 | yyy +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE p2(2); + QUERY PLAN +--------------------------------------------- + Append + -> Seq Scan on t1 t1_1 + Filter: ((a = 2) AND ((a % 2) = 0)) + -> Seq Scan on t2 t1_2 + Filter: ((a = 2) AND ((a % 2) = 0)) + -> Seq Scan on t3 t1_3 + Filter: ((a = 2) AND ((a % 2) = 0)) +(7 rows) + +-- +-- UPDATE / DELETE and Row-level security +-- +SET SESSION AUTHORIZATION regress_rls_bob; +EXPLAIN (COSTS OFF) UPDATE t1 SET b = b || b WHERE f_leak(b); + QUERY PLAN +----------------------------------------------------------- + Update on t1 + Update on t1 t1_1 + Update on t2 t1_2 + Update on t3 t1_3 + -> Result + -> Append + -> Seq Scan on t1 t1_1 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on t2 t1_2 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on t3 t1_3 + Filter: (((a % 2) = 0) AND f_leak(b)) +(12 rows) + +UPDATE t1 SET b = b || b WHERE f_leak(b); +NOTICE: f_leak => bbb +NOTICE: f_leak => dad +NOTICE: f_leak => bcd +NOTICE: f_leak => def +NOTICE: f_leak => yyy +EXPLAIN (COSTS OFF) UPDATE only t1 SET b = b || '_updt' WHERE f_leak(b); + QUERY PLAN +----------------------------------------------- + Update on t1 + -> Seq Scan on t1 + Filter: (((a % 2) = 0) AND f_leak(b)) +(3 rows) + +UPDATE only t1 SET b = b || '_updt' WHERE f_leak(b); +NOTICE: f_leak => bbbbbb +NOTICE: f_leak => daddad +-- returning clause with system column +UPDATE only t1 SET b = b WHERE f_leak(b) RETURNING ctid, *, t1; +NOTICE: f_leak => bbbbbb_updt +NOTICE: f_leak => daddad_updt + ctid | a | b | t1 +--------+---+-------------+----------------- + (0,9) | 2 | bbbbbb_updt | (2,bbbbbb_updt) + (0,10) | 4 | daddad_updt | (4,daddad_updt) +(2 rows) + +UPDATE t1 SET b = b WHERE f_leak(b) RETURNING *; +NOTICE: f_leak => bbbbbb_updt +NOTICE: f_leak => daddad_updt +NOTICE: f_leak => bcdbcd +NOTICE: f_leak => defdef +NOTICE: f_leak => yyyyyy + a | b +---+------------- + 2 | bbbbbb_updt + 4 | daddad_updt + 2 | bcdbcd + 4 | defdef + 2 | yyyyyy +(5 rows) + +UPDATE t1 SET b = b WHERE f_leak(b) RETURNING ctid, *, t1; +NOTICE: f_leak => bbbbbb_updt +NOTICE: f_leak => daddad_updt +NOTICE: f_leak => bcdbcd +NOTICE: f_leak => defdef +NOTICE: f_leak => yyyyyy + ctid | a | b | t1 +--------+---+-------------+----------------- + (0,13) | 2 | bbbbbb_updt | (2,bbbbbb_updt) + (0,14) | 4 | daddad_updt | (4,daddad_updt) + (0,9) | 2 | bcdbcd | (2,bcdbcd) + (0,10) | 4 | defdef | (4,defdef) + (0,6) | 2 | yyyyyy | (2,yyyyyy) +(5 rows) + +-- updates with from clause +EXPLAIN (COSTS OFF) UPDATE t2 SET b=t2.b FROM t3 +WHERE t2.a = 3 and t3.a = 2 AND f_leak(t2.b) AND f_leak(t3.b); + QUERY PLAN +----------------------------------------------------------------- + Update on t2 + -> Nested Loop + -> Seq Scan on t2 + Filter: ((a = 3) AND ((a % 2) = 1) AND f_leak(b)) + -> Seq Scan on t3 + Filter: ((a = 2) AND f_leak(b)) +(6 rows) + +UPDATE t2 SET b=t2.b FROM t3 +WHERE t2.a = 3 and t3.a = 2 AND f_leak(t2.b) AND f_leak(t3.b); +NOTICE: f_leak => cde +NOTICE: f_leak => yyyyyy +EXPLAIN (COSTS OFF) UPDATE t1 SET b=t1.b FROM t2 +WHERE t1.a = 3 and t2.a = 3 AND f_leak(t1.b) AND f_leak(t2.b); + QUERY PLAN +----------------------------------------------------------------------- + Update on t1 + Update on t1 t1_1 + Update on t2 t1_2 + Update on t3 t1_3 + -> Nested Loop + -> Seq Scan on t2 + Filter: ((a = 3) AND ((a % 2) = 1) AND f_leak(b)) + -> Append + -> Seq Scan on t1 t1_1 + Filter: ((a = 3) AND ((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on t2 t1_2 + Filter: ((a = 3) AND ((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on t3 t1_3 + Filter: ((a = 3) AND ((a % 2) = 0) AND f_leak(b)) +(14 rows) + +UPDATE t1 SET b=t1.b FROM t2 +WHERE t1.a = 3 and t2.a = 3 AND f_leak(t1.b) AND f_leak(t2.b); +NOTICE: f_leak => cde +EXPLAIN (COSTS OFF) UPDATE t2 SET b=t2.b FROM t1 +WHERE t1.a = 3 and t2.a = 3 AND f_leak(t1.b) AND f_leak(t2.b); + QUERY PLAN +----------------------------------------------------------------------- + Update on t2 + -> Nested Loop + -> Seq Scan on t2 + Filter: ((a = 3) AND ((a % 2) = 1) AND f_leak(b)) + -> Append + -> Seq Scan on t1 t1_1 + Filter: ((a = 3) AND ((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on t2 t1_2 + Filter: ((a = 3) AND ((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on t3 t1_3 + Filter: ((a = 3) AND ((a % 2) = 0) AND f_leak(b)) +(11 rows) + +UPDATE t2 SET b=t2.b FROM t1 +WHERE t1.a = 3 and t2.a = 3 AND f_leak(t1.b) AND f_leak(t2.b); +NOTICE: f_leak => cde +-- updates with from clause self join +EXPLAIN (COSTS OFF) UPDATE t2 t2_1 SET b = t2_2.b FROM t2 t2_2 +WHERE t2_1.a = 3 AND t2_2.a = t2_1.a AND t2_2.b = t2_1.b +AND f_leak(t2_1.b) AND f_leak(t2_2.b) RETURNING *, t2_1, t2_2; + QUERY PLAN +----------------------------------------------------------------- + Update on t2 t2_1 + -> Nested Loop + Join Filter: (t2_1.b = t2_2.b) + -> Seq Scan on t2 t2_1 + Filter: ((a = 3) AND ((a % 2) = 1) AND f_leak(b)) + -> Seq Scan on t2 t2_2 + Filter: ((a = 3) AND ((a % 2) = 1) AND f_leak(b)) +(7 rows) + +UPDATE t2 t2_1 SET b = t2_2.b FROM t2 t2_2 +WHERE t2_1.a = 3 AND t2_2.a = t2_1.a AND t2_2.b = t2_1.b +AND f_leak(t2_1.b) AND f_leak(t2_2.b) RETURNING *, t2_1, t2_2; +NOTICE: f_leak => cde +NOTICE: f_leak => cde + a | b | c | a | b | c | t2_1 | t2_2 +---+-----+-----+---+-----+-----+-------------+------------- + 3 | cde | 3.3 | 3 | cde | 3.3 | (3,cde,3.3) | (3,cde,3.3) +(1 row) + +EXPLAIN (COSTS OFF) UPDATE t1 t1_1 SET b = t1_2.b FROM t1 t1_2 +WHERE t1_1.a = 4 AND t1_2.a = t1_1.a AND t1_2.b = t1_1.b +AND f_leak(t1_1.b) AND f_leak(t1_2.b) RETURNING *, t1_1, t1_2; + QUERY PLAN +----------------------------------------------------------------------------- + Update on t1 t1_1 + Update on t1 t1_1_1 + Update on t2 t1_1_2 + Update on t3 t1_1_3 + -> Nested Loop + Join Filter: (t1_1.b = t1_2.b) + -> Append + -> Seq Scan on t1 t1_1_1 + Filter: ((a = 4) AND ((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on t2 t1_1_2 + Filter: ((a = 4) AND ((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on t3 t1_1_3 + Filter: ((a = 4) AND ((a % 2) = 0) AND f_leak(b)) + -> Materialize + -> Append + -> Seq Scan on t1 t1_2_1 + Filter: ((a = 4) AND ((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on t2 t1_2_2 + Filter: ((a = 4) AND ((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on t3 t1_2_3 + Filter: ((a = 4) AND ((a % 2) = 0) AND f_leak(b)) +(21 rows) + +UPDATE t1 t1_1 SET b = t1_2.b FROM t1 t1_2 +WHERE t1_1.a = 4 AND t1_2.a = t1_1.a AND t1_2.b = t1_1.b +AND f_leak(t1_1.b) AND f_leak(t1_2.b) RETURNING *, t1_1, t1_2; +NOTICE: f_leak => daddad_updt +NOTICE: f_leak => daddad_updt +NOTICE: f_leak => defdef +NOTICE: f_leak => defdef + a | b | a | b | t1_1 | t1_2 +---+-------------+---+-------------+-----------------+----------------- + 4 | daddad_updt | 4 | daddad_updt | (4,daddad_updt) | (4,daddad_updt) + 4 | defdef | 4 | defdef | (4,defdef) | (4,defdef) +(2 rows) + +RESET SESSION AUTHORIZATION; +SET row_security TO OFF; +SELECT * FROM t1 ORDER BY a,b; + a | b +---+------------- + 1 | aba + 1 | abc + 1 | xxx + 2 | bbbbbb_updt + 2 | bcdbcd + 2 | yyyyyy + 3 | ccc + 3 | cde + 3 | zzz + 4 | daddad_updt + 4 | defdef +(11 rows) + +SET SESSION AUTHORIZATION regress_rls_bob; +SET row_security TO ON; +EXPLAIN (COSTS OFF) DELETE FROM only t1 WHERE f_leak(b); + QUERY PLAN +----------------------------------------------- + Delete on t1 + -> Seq Scan on t1 + Filter: (((a % 2) = 0) AND f_leak(b)) +(3 rows) + +EXPLAIN (COSTS OFF) DELETE FROM t1 WHERE f_leak(b); + QUERY PLAN +----------------------------------------------------- + Delete on t1 + Delete on t1 t1_1 + Delete on t2 t1_2 + Delete on t3 t1_3 + -> Append + -> Seq Scan on t1 t1_1 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on t2 t1_2 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on t3 t1_3 + Filter: (((a % 2) = 0) AND f_leak(b)) +(11 rows) + +DELETE FROM only t1 WHERE f_leak(b) RETURNING ctid, *, t1; +NOTICE: f_leak => bbbbbb_updt +NOTICE: f_leak => daddad_updt + ctid | a | b | t1 +--------+---+-------------+----------------- + (0,13) | 2 | bbbbbb_updt | (2,bbbbbb_updt) + (0,15) | 4 | daddad_updt | (4,daddad_updt) +(2 rows) + +DELETE FROM t1 WHERE f_leak(b) RETURNING ctid, *, t1; +NOTICE: f_leak => bcdbcd +NOTICE: f_leak => defdef +NOTICE: f_leak => yyyyyy + ctid | a | b | t1 +--------+---+--------+------------ + (0,9) | 2 | bcdbcd | (2,bcdbcd) + (0,13) | 4 | defdef | (4,defdef) + (0,6) | 2 | yyyyyy | (2,yyyyyy) +(3 rows) + +-- +-- S.b. view on top of Row-level security +-- +SET SESSION AUTHORIZATION regress_rls_alice; +CREATE TABLE b1 (a int, b text); +SELECT public.create_hypertable('b1', 'a', chunk_time_interval=>2); +NOTICE: adding not-null constraint to column "a" +DETAIL: Dimensions cannot have NULL values. + create_hypertable +----------------------------- + (8,regress_rls_schema,b1,t) +(1 row) + +INSERT INTO b1 (SELECT x, md5(x::text) FROM generate_series(-10,10) x); +CREATE POLICY p1 ON b1 USING (a % 2 = 0); +ALTER TABLE b1 ENABLE ROW LEVEL SECURITY; +GRANT ALL ON b1 TO regress_rls_bob; +SET SESSION AUTHORIZATION regress_rls_bob; +CREATE VIEW bv1 WITH (security_barrier) AS SELECT * FROM b1 WHERE a > 0 WITH CHECK OPTION; +GRANT ALL ON bv1 TO regress_rls_carol; +SET SESSION AUTHORIZATION regress_rls_carol; +EXPLAIN (COSTS OFF) SELECT * FROM bv1 WHERE f_leak(b); + QUERY PLAN +----------------------------------------------------------------------------------- + Subquery Scan on bv1 + Filter: f_leak(bv1.b) + -> Append + -> Seq Scan on b1 b1_1 + Filter: ((a > 0) AND ((a % 2) = 0)) + -> Index Scan using _hyper_8_39_chunk_b1_a_idx on _hyper_8_39_chunk b1_2 + Index Cond: (a > 0) + Filter: ((a % 2) = 0) + -> Index Scan using _hyper_8_40_chunk_b1_a_idx on _hyper_8_40_chunk b1_3 + Index Cond: (a > 0) + Filter: ((a % 2) = 0) + -> Index Scan using _hyper_8_41_chunk_b1_a_idx on _hyper_8_41_chunk b1_4 + Index Cond: (a > 0) + Filter: ((a % 2) = 0) + -> Index Scan using _hyper_8_42_chunk_b1_a_idx on _hyper_8_42_chunk b1_5 + Index Cond: (a > 0) + Filter: ((a % 2) = 0) + -> Index Scan using _hyper_8_43_chunk_b1_a_idx on _hyper_8_43_chunk b1_6 + Index Cond: (a > 0) + Filter: ((a % 2) = 0) + -> Index Scan using _hyper_8_44_chunk_b1_a_idx on _hyper_8_44_chunk b1_7 + Index Cond: (a > 0) + Filter: ((a % 2) = 0) +(23 rows) + +SELECT * FROM bv1 WHERE f_leak(b); +NOTICE: f_leak => c81e728d9d4c2f636f067f89cc14862c +NOTICE: f_leak => a87ff679a2f3e71d9181a67b7542122c +NOTICE: f_leak => 1679091c5a880faf6fb5e6087eb1b2dc +NOTICE: f_leak => c9f0f895fb98ab9159f51fd0297e236d +NOTICE: f_leak => d3d9446802a44259755d38e6d163e820 + a | b +----+---------------------------------- + 2 | c81e728d9d4c2f636f067f89cc14862c + 4 | a87ff679a2f3e71d9181a67b7542122c + 6 | 1679091c5a880faf6fb5e6087eb1b2dc + 8 | c9f0f895fb98ab9159f51fd0297e236d + 10 | d3d9446802a44259755d38e6d163e820 +(5 rows) + +INSERT INTO bv1 VALUES (-1, 'xxx'); -- should fail view WCO +ERROR: new row violates row-level security policy for table "b1" +INSERT INTO bv1 VALUES (11, 'xxx'); -- should fail RLS check +ERROR: new row violates row-level security policy for table "b1" +INSERT INTO bv1 VALUES (12, 'xxx'); -- ok +EXPLAIN (COSTS OFF) UPDATE bv1 SET b = 'yyy' WHERE a = 4 AND f_leak(b); + QUERY PLAN +----------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Update on b1 + Update on _hyper_8_41_chunk b1_1 + -> Result + -> Custom Scan (ChunkAppend) on b1 + Chunks excluded during startup: 0 + -> Index Scan using _hyper_8_41_chunk_b1_a_idx on _hyper_8_41_chunk b1_1 + Index Cond: ((a > 0) AND (a = 4)) + Filter: (((a % 2) = 0) AND f_leak(b)) +(9 rows) + +UPDATE bv1 SET b = 'yyy' WHERE a = 4 AND f_leak(b); +NOTICE: f_leak => a87ff679a2f3e71d9181a67b7542122c +EXPLAIN (COSTS OFF) DELETE FROM bv1 WHERE a = 6 AND f_leak(b); + QUERY PLAN +----------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Delete on b1 + Delete on _hyper_8_42_chunk b1_1 + -> Custom Scan (ChunkAppend) on b1 + Chunks excluded during startup: 0 + -> Index Scan using _hyper_8_42_chunk_b1_a_idx on _hyper_8_42_chunk b1_1 + Index Cond: ((a > 0) AND (a = 6)) + Filter: (((a % 2) = 0) AND f_leak(b)) +(8 rows) + +DELETE FROM bv1 WHERE a = 6 AND f_leak(b); +NOTICE: f_leak => 1679091c5a880faf6fb5e6087eb1b2dc +SET SESSION AUTHORIZATION regress_rls_alice; +SELECT * FROM b1; + a | b +-----+---------------------------------- + -10 | 1b0fd9efa5279c4203b7c70233f86dbf + -9 | 252e691406782824eec43d7eadc3d256 + -8 | a8d2ec85eaf98407310b72eb73dda247 + -7 | 74687a12d3915d3c4d83f1af7b3683d5 + -6 | 596a3d04481816330f07e4f97510c28f + -5 | 47c1b025fa18ea96c33fbb6718688c0f + -4 | 0267aaf632e87a63288a08331f22c7c3 + -3 | b3149ecea4628efd23d2f86e5a723472 + -2 | 5d7b9adcbe1c629ec722529dd12e5129 + -1 | 6bb61e3b7bce0931da574d19d1d82c88 + 0 | cfcd208495d565ef66e7dff9f98764da + 1 | c4ca4238a0b923820dcc509a6f75849b + 2 | c81e728d9d4c2f636f067f89cc14862c + 3 | eccbc87e4b5ce2fe28308fd9f2a7baf3 + 5 | e4da3b7fbbce2345d7772b0674a318d5 + 4 | yyy + 7 | 8f14e45fceea167a5a36dedd4bea2543 + 8 | c9f0f895fb98ab9159f51fd0297e236d + 9 | 45c48cce2e2d7fbdea1afc51c7c6ad26 + 10 | d3d9446802a44259755d38e6d163e820 + 12 | xxx +(21 rows) + +-- +-- INSERT ... ON CONFLICT DO UPDATE and Row-level security +-- +SET SESSION AUTHORIZATION regress_rls_alice; +DROP POLICY p1 ON document; +DROP POLICY p1r ON document; +CREATE POLICY p1 ON document FOR SELECT USING (true); +CREATE POLICY p2 ON document FOR INSERT WITH CHECK (dauthor = current_user); +CREATE POLICY p3 ON document FOR UPDATE + USING (cid = (SELECT cid from category WHERE cname = 'novel')) + WITH CHECK (dauthor = current_user); +SET SESSION AUTHORIZATION regress_rls_bob; +-- Exists... +SELECT * FROM document WHERE did = 2; + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-----------------+----------------- + 2 | 11 | 2 | regress_rls_bob | my second novel +(1 row) + +-- ...so violates actual WITH CHECK OPTION within UPDATE (not INSERT, since +-- alternative UPDATE path happens to be taken): +INSERT INTO document VALUES (2, (SELECT cid from category WHERE cname = 'novel'), 1, 'regress_rls_carol', 'my first novel') + ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle, dauthor = EXCLUDED.dauthor; +ERROR: new row violates row-level security policy for table "document" +-- Violates USING qual for UPDATE policy p3. +-- +-- UPDATE path is taken, but UPDATE fails purely because *existing* row to be +-- updated is not a "novel"/cid 11 (row is not leaked, even though we have +-- SELECT privileges sufficient to see the row in this instance): +INSERT INTO document VALUES (33, 22, 1, 'regress_rls_bob', 'okay science fiction'); -- preparation for next statement +INSERT INTO document VALUES (33, (SELECT cid from category WHERE cname = 'novel'), 1, 'regress_rls_bob', 'Some novel, replaces sci-fi') -- takes UPDATE path + ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle; +ERROR: new row violates row-level security policy (USING expression) for table "document" +-- Fine (we UPDATE, since INSERT WCOs and UPDATE security barrier quals + WCOs +-- not violated): +INSERT INTO document VALUES (2, (SELECT cid from category WHERE cname = 'novel'), 1, 'regress_rls_bob', 'my first novel') + ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle RETURNING *; + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-----------------+---------------- + 2 | 11 | 2 | regress_rls_bob | my first novel +(1 row) + +-- Fine (we INSERT, so "cid = 33" ("technology") isn't evaluated): +INSERT INTO document VALUES (78, (SELECT cid from category WHERE cname = 'novel'), 1, 'regress_rls_bob', 'some technology novel') + ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle, cid = 33 RETURNING *; + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-----------------+----------------------- + 78 | 11 | 1 | regress_rls_bob | some technology novel +(1 row) + +-- Fine (same query, but we UPDATE, so "cid = 33", ("technology") is not the +-- case in respect of *existing* tuple): +INSERT INTO document VALUES (78, (SELECT cid from category WHERE cname = 'novel'), 1, 'regress_rls_bob', 'some technology novel') + ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle, cid = 33 RETURNING *; + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-----------------+----------------------- + 78 | 33 | 1 | regress_rls_bob | some technology novel +(1 row) + +-- Same query a third time, but now fails due to existing tuple finally not +-- passing quals: +INSERT INTO document VALUES (78, (SELECT cid from category WHERE cname = 'novel'), 1, 'regress_rls_bob', 'some technology novel') + ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle, cid = 33 RETURNING *; +ERROR: new row violates row-level security policy (USING expression) for table "document" +-- Don't fail just because INSERT doesn't satisfy WITH CHECK option that +-- originated as a barrier/USING() qual from the UPDATE. Note that the UPDATE +-- path *isn't* taken, and so UPDATE-related policy does not apply: +INSERT INTO document VALUES (79, (SELECT cid from category WHERE cname = 'technology'), 1, 'regress_rls_bob', 'technology book, can only insert') + ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle RETURNING *; + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-----------------+---------------------------------- + 79 | 33 | 1 | regress_rls_bob | technology book, can only insert +(1 row) + +-- But this time, the same statement fails, because the UPDATE path is taken, +-- and updating the row just inserted falls afoul of security barrier qual +-- (enforced as WCO) -- what we might have updated target tuple to is +-- irrelevant, in fact. +INSERT INTO document VALUES (79, (SELECT cid from category WHERE cname = 'technology'), 1, 'regress_rls_bob', 'technology book, can only insert') + ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle RETURNING *; +ERROR: new row violates row-level security policy (USING expression) for table "document" +-- Test default USING qual enforced as WCO +SET SESSION AUTHORIZATION regress_rls_alice; +DROP POLICY p1 ON document; +DROP POLICY p2 ON document; +DROP POLICY p3 ON document; +CREATE POLICY p3_with_default ON document FOR UPDATE + USING (cid = (SELECT cid from category WHERE cname = 'novel')); +SET SESSION AUTHORIZATION regress_rls_bob; +-- Just because WCO-style enforcement of USING quals occurs with +-- existing/target tuple does not mean that the implementation can be allowed +-- to fail to also enforce this qual against the final tuple appended to +-- relation (since in the absence of an explicit WCO, this is also interpreted +-- as an UPDATE/ALL WCO in general). +-- +-- UPDATE path is taken here (fails due to existing tuple). Note that this is +-- not reported as a "USING expression", because it's an RLS UPDATE check that originated as +-- a USING qual for the purposes of RLS in general, as opposed to an explicit +-- USING qual that is ordinarily a security barrier. We leave it up to the +-- UPDATE to make this fail: +INSERT INTO document VALUES (79, (SELECT cid from category WHERE cname = 'technology'), 1, 'regress_rls_bob', 'technology book, can only insert') + ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle RETURNING *; +ERROR: new row violates row-level security policy for table "document" +-- UPDATE path is taken here. Existing tuple passes, since it's cid +-- corresponds to "novel", but default USING qual is enforced against +-- post-UPDATE tuple too (as always when updating with a policy that lacks an +-- explicit WCO), and so this fails: +INSERT INTO document VALUES (2, (SELECT cid from category WHERE cname = 'technology'), 1, 'regress_rls_bob', 'my first novel') + ON CONFLICT (did) DO UPDATE SET cid = EXCLUDED.cid, dtitle = EXCLUDED.dtitle RETURNING *; +ERROR: new row violates row-level security policy for table "document" +SET SESSION AUTHORIZATION regress_rls_alice; +DROP POLICY p3_with_default ON document; +-- +-- Test ALL policies with ON CONFLICT DO UPDATE (much the same as existing UPDATE +-- tests) +-- +CREATE POLICY p3_with_all ON document FOR ALL + USING (cid = (SELECT cid from category WHERE cname = 'novel')) + WITH CHECK (dauthor = current_user); +SET SESSION AUTHORIZATION regress_rls_bob; +-- Fails, since ALL WCO is enforced in insert path: +INSERT INTO document VALUES (80, (SELECT cid from category WHERE cname = 'novel'), 1, 'regress_rls_carol', 'my first novel') + ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle, cid = 33; +ERROR: new row violates row-level security policy for table "document" +-- Fails, since ALL policy USING qual is enforced (existing, target tuple is in +-- violation, since it has the "manga" cid): +INSERT INTO document VALUES (4, (SELECT cid from category WHERE cname = 'novel'), 1, 'regress_rls_bob', 'my first novel') + ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle; +ERROR: new row violates row-level security policy (USING expression) for table "document" +-- Fails, since ALL WCO are enforced: +INSERT INTO document VALUES (1, (SELECT cid from category WHERE cname = 'novel'), 1, 'regress_rls_bob', 'my first novel') + ON CONFLICT (did) DO UPDATE SET dauthor = 'regress_rls_carol'; +ERROR: new row violates row-level security policy for table "document" +-- +-- ROLE/GROUP +-- +SET SESSION AUTHORIZATION regress_rls_alice; +CREATE TABLE z1 (a int, b text); +SELECT public.create_hypertable('z1', 'a', chunk_time_interval=>2); +NOTICE: adding not-null constraint to column "a" +DETAIL: Dimensions cannot have NULL values. + create_hypertable +----------------------------- + (9,regress_rls_schema,z1,t) +(1 row) + +CREATE TABLE z2 (a int, b text); +SELECT public.create_hypertable('z2', 'a', chunk_time_interval=>2); +NOTICE: adding not-null constraint to column "a" +DETAIL: Dimensions cannot have NULL values. + create_hypertable +------------------------------ + (10,regress_rls_schema,z2,t) +(1 row) + +GRANT SELECT ON z1,z2 TO regress_rls_group1, regress_rls_group2, + regress_rls_bob, regress_rls_carol; +INSERT INTO z1 VALUES + (1, 'aba'), + (2, 'bbb'), + (3, 'ccc'), + (4, 'dad'); +CREATE POLICY p1 ON z1 TO regress_rls_group1 USING (a % 2 = 0); +CREATE POLICY p2 ON z1 TO regress_rls_group2 USING (a % 2 = 1); +ALTER TABLE z1 ENABLE ROW LEVEL SECURITY; +SET SESSION AUTHORIZATION regress_rls_bob; +SELECT * FROM z1 WHERE f_leak(b); +NOTICE: f_leak => bbb +NOTICE: f_leak => dad + a | b +---+----- + 2 | bbb + 4 | dad +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM z1 WHERE f_leak(b); + QUERY PLAN +----------------------------------------------- + Custom Scan (ChunkAppend) on z1 + Chunks excluded during startup: 0 + -> Seq Scan on z1 z1_1 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_49_chunk z1_2 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_50_chunk z1_3 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_51_chunk z1_4 + Filter: (((a % 2) = 0) AND f_leak(b)) +(10 rows) + +PREPARE plancache_test AS SELECT * FROM z1 WHERE f_leak(b); +EXPLAIN (COSTS OFF) EXECUTE plancache_test; + QUERY PLAN +----------------------------------------------- + Custom Scan (ChunkAppend) on z1 + Chunks excluded during startup: 0 + -> Seq Scan on z1 z1_1 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_49_chunk z1_2 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_50_chunk z1_3 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_51_chunk z1_4 + Filter: (((a % 2) = 0) AND f_leak(b)) +(10 rows) + +PREPARE plancache_test2 AS WITH q AS MATERIALIZED (SELECT * FROM z1 WHERE f_leak(b)) SELECT * FROM q,z2; +EXPLAIN (COSTS OFF) EXECUTE plancache_test2; + QUERY PLAN +------------------------------------------------------- + Nested Loop + CTE q + -> Custom Scan (ChunkAppend) on z1 + Chunks excluded during startup: 0 + -> Seq Scan on z1 z1_1 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_49_chunk z1_2 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_50_chunk z1_3 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_51_chunk z1_4 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> CTE Scan on q + -> Materialize + -> Seq Scan on z2 +(15 rows) + +PREPARE plancache_test4 AS WITH q AS (SELECT * FROM z1 WHERE f_leak(b)) SELECT * FROM q,z2; +EXPLAIN (COSTS OFF) EXECUTE plancache_test4; + QUERY PLAN +------------------------------------------------------- + Nested Loop + CTE q + -> Custom Scan (ChunkAppend) on z1 + Chunks excluded during startup: 0 + -> Seq Scan on z1 z1_1 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_49_chunk z1_2 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_50_chunk z1_3 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_51_chunk z1_4 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> CTE Scan on q + -> Materialize + -> Seq Scan on z2 +(15 rows) + +PREPARE plancache_test6 AS WITH q AS NOT MATERIALIZED (SELECT * FROM z1 WHERE f_leak(b)) SELECT * FROM q,z2; +EXPLAIN (COSTS OFF) EXECUTE plancache_test6; + QUERY PLAN +------------------------------------------------------- + Nested Loop + CTE q + -> Custom Scan (ChunkAppend) on z1 + Chunks excluded during startup: 0 + -> Seq Scan on z1 z1_1 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_49_chunk z1_2 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_50_chunk z1_3 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_51_chunk z1_4 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> CTE Scan on q + -> Materialize + -> Seq Scan on z2 +(15 rows) + +PREPARE plancache_test3 AS WITH q AS MATERIALIZED (SELECT * FROM z2) SELECT * FROM q,z1 WHERE f_leak(z1.b); +EXPLAIN (COSTS OFF) EXECUTE plancache_test3; + QUERY PLAN +----------------------------------------------------------- + Nested Loop + CTE q + -> Seq Scan on z2 + -> CTE Scan on q + -> Materialize + -> Custom Scan (ChunkAppend) on z1 + Chunks excluded during startup: 0 + -> Seq Scan on z1 z1_1 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_49_chunk z1_2 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_50_chunk z1_3 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_51_chunk z1_4 + Filter: (((a % 2) = 0) AND f_leak(b)) +(15 rows) + +PREPARE plancache_test5 AS WITH q AS (SELECT * FROM z2) SELECT * FROM q,z1 WHERE f_leak(z1.b); +EXPLAIN (COSTS OFF) EXECUTE plancache_test5; + QUERY PLAN +----------------------------------------------------------- + Nested Loop + -> Seq Scan on z2 + -> Materialize + -> Custom Scan (ChunkAppend) on z1 + Chunks excluded during startup: 0 + -> Seq Scan on z1 z1_1 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_49_chunk z1_2 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_50_chunk z1_3 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_51_chunk z1_4 + Filter: (((a % 2) = 0) AND f_leak(b)) +(13 rows) + +PREPARE plancache_test7 AS WITH q AS NOT MATERIALIZED (SELECT * FROM z2) SELECT * FROM q,z1 WHERE f_leak(z1.b); +EXPLAIN (COSTS OFF) EXECUTE plancache_test7; + QUERY PLAN +----------------------------------------------------------- + Nested Loop + -> Seq Scan on z2 + -> Materialize + -> Custom Scan (ChunkAppend) on z1 + Chunks excluded during startup: 0 + -> Seq Scan on z1 z1_1 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_49_chunk z1_2 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_50_chunk z1_3 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_51_chunk z1_4 + Filter: (((a % 2) = 0) AND f_leak(b)) +(13 rows) + +SET ROLE regress_rls_group1; +SELECT * FROM z1 WHERE f_leak(b); +NOTICE: f_leak => bbb +NOTICE: f_leak => dad + a | b +---+----- + 2 | bbb + 4 | dad +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM z1 WHERE f_leak(b); + QUERY PLAN +----------------------------------------------- + Custom Scan (ChunkAppend) on z1 + Chunks excluded during startup: 0 + -> Seq Scan on z1 z1_1 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_49_chunk z1_2 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_50_chunk z1_3 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_51_chunk z1_4 + Filter: (((a % 2) = 0) AND f_leak(b)) +(10 rows) + +EXPLAIN (COSTS OFF) EXECUTE plancache_test; + QUERY PLAN +----------------------------------------------- + Custom Scan (ChunkAppend) on z1 + Chunks excluded during startup: 0 + -> Seq Scan on z1 z1_1 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_49_chunk z1_2 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_50_chunk z1_3 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_51_chunk z1_4 + Filter: (((a % 2) = 0) AND f_leak(b)) +(10 rows) + +EXPLAIN (COSTS OFF) EXECUTE plancache_test2; + QUERY PLAN +------------------------------------------------------- + Nested Loop + CTE q + -> Custom Scan (ChunkAppend) on z1 + Chunks excluded during startup: 0 + -> Seq Scan on z1 z1_1 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_49_chunk z1_2 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_50_chunk z1_3 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_51_chunk z1_4 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> CTE Scan on q + -> Materialize + -> Seq Scan on z2 +(15 rows) + +EXPLAIN (COSTS OFF) EXECUTE plancache_test4; + QUERY PLAN +------------------------------------------------------- + Nested Loop + CTE q + -> Custom Scan (ChunkAppend) on z1 + Chunks excluded during startup: 0 + -> Seq Scan on z1 z1_1 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_49_chunk z1_2 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_50_chunk z1_3 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_51_chunk z1_4 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> CTE Scan on q + -> Materialize + -> Seq Scan on z2 +(15 rows) + +EXPLAIN (COSTS OFF) EXECUTE plancache_test3; + QUERY PLAN +----------------------------------------------------------- + Nested Loop + CTE q + -> Seq Scan on z2 + -> CTE Scan on q + -> Materialize + -> Custom Scan (ChunkAppend) on z1 + Chunks excluded during startup: 0 + -> Seq Scan on z1 z1_1 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_49_chunk z1_2 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_50_chunk z1_3 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_51_chunk z1_4 + Filter: (((a % 2) = 0) AND f_leak(b)) +(15 rows) + +EXPLAIN (COSTS OFF) EXECUTE plancache_test5; + QUERY PLAN +----------------------------------------------------------- + Nested Loop + -> Seq Scan on z2 + -> Materialize + -> Custom Scan (ChunkAppend) on z1 + Chunks excluded during startup: 0 + -> Seq Scan on z1 z1_1 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_49_chunk z1_2 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_50_chunk z1_3 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_51_chunk z1_4 + Filter: (((a % 2) = 0) AND f_leak(b)) +(13 rows) + +SET SESSION AUTHORIZATION regress_rls_carol; +SELECT * FROM z1 WHERE f_leak(b); +NOTICE: f_leak => aba +NOTICE: f_leak => ccc + a | b +---+----- + 1 | aba + 3 | ccc +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM z1 WHERE f_leak(b); + QUERY PLAN +----------------------------------------------- + Custom Scan (ChunkAppend) on z1 + Chunks excluded during startup: 0 + -> Seq Scan on z1 z1_1 + Filter: (((a % 2) = 1) AND f_leak(b)) + -> Seq Scan on _hyper_9_49_chunk z1_2 + Filter: (((a % 2) = 1) AND f_leak(b)) + -> Seq Scan on _hyper_9_50_chunk z1_3 + Filter: (((a % 2) = 1) AND f_leak(b)) + -> Seq Scan on _hyper_9_51_chunk z1_4 + Filter: (((a % 2) = 1) AND f_leak(b)) +(10 rows) + +EXPLAIN (COSTS OFF) EXECUTE plancache_test; + QUERY PLAN +----------------------------------------------- + Custom Scan (ChunkAppend) on z1 + Chunks excluded during startup: 0 + -> Seq Scan on z1 z1_1 + Filter: (((a % 2) = 1) AND f_leak(b)) + -> Seq Scan on _hyper_9_49_chunk z1_2 + Filter: (((a % 2) = 1) AND f_leak(b)) + -> Seq Scan on _hyper_9_50_chunk z1_3 + Filter: (((a % 2) = 1) AND f_leak(b)) + -> Seq Scan on _hyper_9_51_chunk z1_4 + Filter: (((a % 2) = 1) AND f_leak(b)) +(10 rows) + +EXPLAIN (COSTS OFF) EXECUTE plancache_test2; + QUERY PLAN +------------------------------------------------------- + Nested Loop + CTE q + -> Custom Scan (ChunkAppend) on z1 + Chunks excluded during startup: 0 + -> Seq Scan on z1 z1_1 + Filter: (((a % 2) = 1) AND f_leak(b)) + -> Seq Scan on _hyper_9_49_chunk z1_2 + Filter: (((a % 2) = 1) AND f_leak(b)) + -> Seq Scan on _hyper_9_50_chunk z1_3 + Filter: (((a % 2) = 1) AND f_leak(b)) + -> Seq Scan on _hyper_9_51_chunk z1_4 + Filter: (((a % 2) = 1) AND f_leak(b)) + -> CTE Scan on q + -> Materialize + -> Seq Scan on z2 +(15 rows) + +EXPLAIN (COSTS OFF) EXECUTE plancache_test4; + QUERY PLAN +------------------------------------------------------- + Nested Loop + CTE q + -> Custom Scan (ChunkAppend) on z1 + Chunks excluded during startup: 0 + -> Seq Scan on z1 z1_1 + Filter: (((a % 2) = 1) AND f_leak(b)) + -> Seq Scan on _hyper_9_49_chunk z1_2 + Filter: (((a % 2) = 1) AND f_leak(b)) + -> Seq Scan on _hyper_9_50_chunk z1_3 + Filter: (((a % 2) = 1) AND f_leak(b)) + -> Seq Scan on _hyper_9_51_chunk z1_4 + Filter: (((a % 2) = 1) AND f_leak(b)) + -> CTE Scan on q + -> Materialize + -> Seq Scan on z2 +(15 rows) + +EXPLAIN (COSTS OFF) EXECUTE plancache_test3; + QUERY PLAN +----------------------------------------------------------- + Nested Loop + CTE q + -> Seq Scan on z2 + -> CTE Scan on q + -> Materialize + -> Custom Scan (ChunkAppend) on z1 + Chunks excluded during startup: 0 + -> Seq Scan on z1 z1_1 + Filter: (((a % 2) = 1) AND f_leak(b)) + -> Seq Scan on _hyper_9_49_chunk z1_2 + Filter: (((a % 2) = 1) AND f_leak(b)) + -> Seq Scan on _hyper_9_50_chunk z1_3 + Filter: (((a % 2) = 1) AND f_leak(b)) + -> Seq Scan on _hyper_9_51_chunk z1_4 + Filter: (((a % 2) = 1) AND f_leak(b)) +(15 rows) + +EXPLAIN (COSTS OFF) EXECUTE plancache_test5; + QUERY PLAN +----------------------------------------------------------- + Nested Loop + -> Seq Scan on z2 + -> Materialize + -> Custom Scan (ChunkAppend) on z1 + Chunks excluded during startup: 0 + -> Seq Scan on z1 z1_1 + Filter: (((a % 2) = 1) AND f_leak(b)) + -> Seq Scan on _hyper_9_49_chunk z1_2 + Filter: (((a % 2) = 1) AND f_leak(b)) + -> Seq Scan on _hyper_9_50_chunk z1_3 + Filter: (((a % 2) = 1) AND f_leak(b)) + -> Seq Scan on _hyper_9_51_chunk z1_4 + Filter: (((a % 2) = 1) AND f_leak(b)) +(13 rows) + +SET ROLE regress_rls_group2; +SELECT * FROM z1 WHERE f_leak(b); +NOTICE: f_leak => aba +NOTICE: f_leak => ccc + a | b +---+----- + 1 | aba + 3 | ccc +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM z1 WHERE f_leak(b); + QUERY PLAN +----------------------------------------------- + Custom Scan (ChunkAppend) on z1 + Chunks excluded during startup: 0 + -> Seq Scan on z1 z1_1 + Filter: (((a % 2) = 1) AND f_leak(b)) + -> Seq Scan on _hyper_9_49_chunk z1_2 + Filter: (((a % 2) = 1) AND f_leak(b)) + -> Seq Scan on _hyper_9_50_chunk z1_3 + Filter: (((a % 2) = 1) AND f_leak(b)) + -> Seq Scan on _hyper_9_51_chunk z1_4 + Filter: (((a % 2) = 1) AND f_leak(b)) +(10 rows) + +EXPLAIN (COSTS OFF) EXECUTE plancache_test; + QUERY PLAN +----------------------------------------------- + Custom Scan (ChunkAppend) on z1 + Chunks excluded during startup: 0 + -> Seq Scan on z1 z1_1 + Filter: (((a % 2) = 1) AND f_leak(b)) + -> Seq Scan on _hyper_9_49_chunk z1_2 + Filter: (((a % 2) = 1) AND f_leak(b)) + -> Seq Scan on _hyper_9_50_chunk z1_3 + Filter: (((a % 2) = 1) AND f_leak(b)) + -> Seq Scan on _hyper_9_51_chunk z1_4 + Filter: (((a % 2) = 1) AND f_leak(b)) +(10 rows) + +EXPLAIN (COSTS OFF) EXECUTE plancache_test2; + QUERY PLAN +------------------------------------------------------- + Nested Loop + CTE q + -> Custom Scan (ChunkAppend) on z1 + Chunks excluded during startup: 0 + -> Seq Scan on z1 z1_1 + Filter: (((a % 2) = 1) AND f_leak(b)) + -> Seq Scan on _hyper_9_49_chunk z1_2 + Filter: (((a % 2) = 1) AND f_leak(b)) + -> Seq Scan on _hyper_9_50_chunk z1_3 + Filter: (((a % 2) = 1) AND f_leak(b)) + -> Seq Scan on _hyper_9_51_chunk z1_4 + Filter: (((a % 2) = 1) AND f_leak(b)) + -> CTE Scan on q + -> Materialize + -> Seq Scan on z2 +(15 rows) + +EXPLAIN (COSTS OFF) EXECUTE plancache_test4; + QUERY PLAN +------------------------------------------------------- + Nested Loop + CTE q + -> Custom Scan (ChunkAppend) on z1 + Chunks excluded during startup: 0 + -> Seq Scan on z1 z1_1 + Filter: (((a % 2) = 1) AND f_leak(b)) + -> Seq Scan on _hyper_9_49_chunk z1_2 + Filter: (((a % 2) = 1) AND f_leak(b)) + -> Seq Scan on _hyper_9_50_chunk z1_3 + Filter: (((a % 2) = 1) AND f_leak(b)) + -> Seq Scan on _hyper_9_51_chunk z1_4 + Filter: (((a % 2) = 1) AND f_leak(b)) + -> CTE Scan on q + -> Materialize + -> Seq Scan on z2 +(15 rows) + +EXPLAIN (COSTS OFF) EXECUTE plancache_test3; + QUERY PLAN +----------------------------------------------------------- + Nested Loop + CTE q + -> Seq Scan on z2 + -> CTE Scan on q + -> Materialize + -> Custom Scan (ChunkAppend) on z1 + Chunks excluded during startup: 0 + -> Seq Scan on z1 z1_1 + Filter: (((a % 2) = 1) AND f_leak(b)) + -> Seq Scan on _hyper_9_49_chunk z1_2 + Filter: (((a % 2) = 1) AND f_leak(b)) + -> Seq Scan on _hyper_9_50_chunk z1_3 + Filter: (((a % 2) = 1) AND f_leak(b)) + -> Seq Scan on _hyper_9_51_chunk z1_4 + Filter: (((a % 2) = 1) AND f_leak(b)) +(15 rows) + +EXPLAIN (COSTS OFF) EXECUTE plancache_test5; + QUERY PLAN +----------------------------------------------------------- + Nested Loop + -> Seq Scan on z2 + -> Materialize + -> Custom Scan (ChunkAppend) on z1 + Chunks excluded during startup: 0 + -> Seq Scan on z1 z1_1 + Filter: (((a % 2) = 1) AND f_leak(b)) + -> Seq Scan on _hyper_9_49_chunk z1_2 + Filter: (((a % 2) = 1) AND f_leak(b)) + -> Seq Scan on _hyper_9_50_chunk z1_3 + Filter: (((a % 2) = 1) AND f_leak(b)) + -> Seq Scan on _hyper_9_51_chunk z1_4 + Filter: (((a % 2) = 1) AND f_leak(b)) +(13 rows) + +-- +-- Views should follow policy for view owner. +-- +-- View and Table owner are the same. +SET SESSION AUTHORIZATION regress_rls_alice; +CREATE VIEW rls_view AS SELECT * FROM z1 WHERE f_leak(b); +GRANT SELECT ON rls_view TO regress_rls_bob; +-- Query as role that is not owner of view or table. Should return all records. +SET SESSION AUTHORIZATION regress_rls_bob; +SELECT * FROM rls_view; +NOTICE: f_leak => aba +NOTICE: f_leak => bbb +NOTICE: f_leak => ccc +NOTICE: f_leak => dad + a | b +---+----- + 1 | aba + 2 | bbb + 3 | ccc + 4 | dad +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM rls_view; + QUERY PLAN +------------------------------------------ + Custom Scan (ChunkAppend) on z1 + Chunks excluded during startup: 0 + -> Seq Scan on z1 z1_1 + Filter: f_leak(b) + -> Seq Scan on _hyper_9_49_chunk z1_2 + Filter: f_leak(b) + -> Seq Scan on _hyper_9_50_chunk z1_3 + Filter: f_leak(b) + -> Seq Scan on _hyper_9_51_chunk z1_4 + Filter: f_leak(b) +(10 rows) + +-- Query as view/table owner. Should return all records. +SET SESSION AUTHORIZATION regress_rls_alice; +SELECT * FROM rls_view; +NOTICE: f_leak => aba +NOTICE: f_leak => bbb +NOTICE: f_leak => ccc +NOTICE: f_leak => dad + a | b +---+----- + 1 | aba + 2 | bbb + 3 | ccc + 4 | dad +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM rls_view; + QUERY PLAN +------------------------------------------ + Custom Scan (ChunkAppend) on z1 + Chunks excluded during startup: 0 + -> Seq Scan on z1 z1_1 + Filter: f_leak(b) + -> Seq Scan on _hyper_9_49_chunk z1_2 + Filter: f_leak(b) + -> Seq Scan on _hyper_9_50_chunk z1_3 + Filter: f_leak(b) + -> Seq Scan on _hyper_9_51_chunk z1_4 + Filter: f_leak(b) +(10 rows) + +DROP VIEW rls_view; +-- View and Table owners are different. +SET SESSION AUTHORIZATION regress_rls_bob; +CREATE VIEW rls_view AS SELECT * FROM z1 WHERE f_leak(b); +GRANT SELECT ON rls_view TO regress_rls_alice; +-- Query as role that is not owner of view but is owner of table. +-- Should return records based on view owner policies. +SET SESSION AUTHORIZATION regress_rls_alice; +SELECT * FROM rls_view; +NOTICE: f_leak => bbb +NOTICE: f_leak => dad + a | b +---+----- + 2 | bbb + 4 | dad +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM rls_view; + QUERY PLAN +----------------------------------------------- + Custom Scan (ChunkAppend) on z1 + Chunks excluded during startup: 0 + -> Seq Scan on z1 z1_1 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_49_chunk z1_2 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_50_chunk z1_3 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_51_chunk z1_4 + Filter: (((a % 2) = 0) AND f_leak(b)) +(10 rows) + +-- Query as role that is not owner of table but is owner of view. +-- Should return records based on view owner policies. +SET SESSION AUTHORIZATION regress_rls_bob; +SELECT * FROM rls_view; +NOTICE: f_leak => bbb +NOTICE: f_leak => dad + a | b +---+----- + 2 | bbb + 4 | dad +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM rls_view; + QUERY PLAN +----------------------------------------------- + Custom Scan (ChunkAppend) on z1 + Chunks excluded during startup: 0 + -> Seq Scan on z1 z1_1 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_49_chunk z1_2 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_50_chunk z1_3 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_51_chunk z1_4 + Filter: (((a % 2) = 0) AND f_leak(b)) +(10 rows) + +-- Query as role that is not the owner of the table or view without permissions. +SET SESSION AUTHORIZATION regress_rls_carol; +SELECT * FROM rls_view; --fail - permission denied. +ERROR: permission denied for view rls_view +EXPLAIN (COSTS OFF) SELECT * FROM rls_view; --fail - permission denied. +ERROR: permission denied for view rls_view +-- Query as role that is not the owner of the table or view with permissions. +SET SESSION AUTHORIZATION regress_rls_bob; +GRANT SELECT ON rls_view TO regress_rls_carol; +SELECT * FROM rls_view; +NOTICE: f_leak => bbb +NOTICE: f_leak => dad + a | b +---+----- + 2 | bbb + 4 | dad +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM rls_view; + QUERY PLAN +----------------------------------------------- + Custom Scan (ChunkAppend) on z1 + Chunks excluded during startup: 0 + -> Seq Scan on z1 z1_1 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_49_chunk z1_2 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_50_chunk z1_3 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_9_51_chunk z1_4 + Filter: (((a % 2) = 0) AND f_leak(b)) +(10 rows) + +SET SESSION AUTHORIZATION regress_rls_bob; +DROP VIEW rls_view; +-- +-- Command specific +-- +SET SESSION AUTHORIZATION regress_rls_alice; +CREATE TABLE x1 (a int, b text, c text); +SELECT public.create_hypertable('x1', 'a', chunk_time_interval=>2); +NOTICE: adding not-null constraint to column "a" +DETAIL: Dimensions cannot have NULL values. + create_hypertable +------------------------------ + (11,regress_rls_schema,x1,t) +(1 row) + +GRANT ALL ON x1 TO PUBLIC; +INSERT INTO x1 VALUES + (1, 'abc', 'regress_rls_bob'), + (2, 'bcd', 'regress_rls_bob'), + (3, 'cde', 'regress_rls_carol'), + (4, 'def', 'regress_rls_carol'), + (5, 'efg', 'regress_rls_bob'), + (6, 'fgh', 'regress_rls_bob'), + (7, 'fgh', 'regress_rls_carol'), + (8, 'fgh', 'regress_rls_carol'); +CREATE POLICY p0 ON x1 FOR ALL USING (c = current_user); +CREATE POLICY p1 ON x1 FOR SELECT USING (a % 2 = 0); +CREATE POLICY p2 ON x1 FOR INSERT WITH CHECK (a % 2 = 1); +CREATE POLICY p3 ON x1 FOR UPDATE USING (a % 2 = 0); +CREATE POLICY p4 ON x1 FOR DELETE USING (a < 8); +ALTER TABLE x1 ENABLE ROW LEVEL SECURITY; +SET SESSION AUTHORIZATION regress_rls_bob; +SELECT * FROM x1 WHERE f_leak(b) ORDER BY a ASC; +NOTICE: f_leak => abc +NOTICE: f_leak => bcd +NOTICE: f_leak => def +NOTICE: f_leak => efg +NOTICE: f_leak => fgh +NOTICE: f_leak => fgh + a | b | c +---+-----+------------------- + 1 | abc | regress_rls_bob + 2 | bcd | regress_rls_bob + 4 | def | regress_rls_carol + 5 | efg | regress_rls_bob + 6 | fgh | regress_rls_bob + 8 | fgh | regress_rls_carol +(6 rows) + +UPDATE x1 SET b = b || '_updt' WHERE f_leak(b) RETURNING *; +NOTICE: f_leak => abc +NOTICE: f_leak => bcd +NOTICE: f_leak => def +NOTICE: f_leak => efg +NOTICE: f_leak => fgh +NOTICE: f_leak => fgh + a | b | c +---+----------+------------------- + 1 | abc_updt | regress_rls_bob + 2 | bcd_updt | regress_rls_bob + 4 | def_updt | regress_rls_carol + 5 | efg_updt | regress_rls_bob + 6 | fgh_updt | regress_rls_bob + 8 | fgh_updt | regress_rls_carol +(6 rows) + +SET SESSION AUTHORIZATION regress_rls_carol; +SELECT * FROM x1 WHERE f_leak(b) ORDER BY a ASC; +NOTICE: f_leak => cde +NOTICE: f_leak => bcd_updt +NOTICE: f_leak => def_updt +NOTICE: f_leak => fgh +NOTICE: f_leak => fgh_updt +NOTICE: f_leak => fgh_updt + a | b | c +---+----------+------------------- + 2 | bcd_updt | regress_rls_bob + 3 | cde | regress_rls_carol + 4 | def_updt | regress_rls_carol + 6 | fgh_updt | regress_rls_bob + 7 | fgh | regress_rls_carol + 8 | fgh_updt | regress_rls_carol +(6 rows) + +UPDATE x1 SET b = b || '_updt' WHERE f_leak(b) RETURNING *; +NOTICE: f_leak => cde +NOTICE: f_leak => bcd_updt +NOTICE: f_leak => def_updt +NOTICE: f_leak => fgh +NOTICE: f_leak => fgh_updt +NOTICE: f_leak => fgh_updt + a | b | c +---+---------------+------------------- + 3 | cde_updt | regress_rls_carol + 2 | bcd_updt_updt | regress_rls_bob + 4 | def_updt_updt | regress_rls_carol + 7 | fgh_updt | regress_rls_carol + 6 | fgh_updt_updt | regress_rls_bob + 8 | fgh_updt_updt | regress_rls_carol +(6 rows) + +DELETE FROM x1 WHERE f_leak(b) RETURNING *; +NOTICE: f_leak => cde_updt +NOTICE: f_leak => bcd_updt_updt +NOTICE: f_leak => def_updt_updt +NOTICE: f_leak => fgh_updt +NOTICE: f_leak => fgh_updt_updt +NOTICE: f_leak => fgh_updt_updt + a | b | c +---+---------------+------------------- + 3 | cde_updt | regress_rls_carol + 2 | bcd_updt_updt | regress_rls_bob + 4 | def_updt_updt | regress_rls_carol + 7 | fgh_updt | regress_rls_carol + 6 | fgh_updt_updt | regress_rls_bob + 8 | fgh_updt_updt | regress_rls_carol +(6 rows) + +-- +-- Duplicate Policy Names +-- +SET SESSION AUTHORIZATION regress_rls_alice; +CREATE TABLE y1 (a int, b text); +SELECT public.create_hypertable('y1', 'a', chunk_time_interval=>2); +NOTICE: adding not-null constraint to column "a" +DETAIL: Dimensions cannot have NULL values. + create_hypertable +------------------------------ + (12,regress_rls_schema,y1,t) +(1 row) + +INSERT INTO y1 VALUES(1,2); +CREATE TABLE y2 (a int, b text); +SELECT public.create_hypertable('y2', 'a', chunk_time_interval=>2); +NOTICE: adding not-null constraint to column "a" +DETAIL: Dimensions cannot have NULL values. + create_hypertable +------------------------------ + (13,regress_rls_schema,y2,t) +(1 row) + +GRANT ALL ON y1, y2 TO regress_rls_bob; +CREATE POLICY p1 ON y1 FOR ALL USING (a % 2 = 0); +CREATE POLICY p2 ON y1 FOR SELECT USING (a > 2); +CREATE POLICY p1 ON y1 FOR SELECT USING (a % 2 = 1); --fail +ERROR: policy "p1" for table "y1" already exists +CREATE POLICY p1 ON y2 FOR ALL USING (a % 2 = 0); --OK +ALTER TABLE y1 ENABLE ROW LEVEL SECURITY; +ALTER TABLE y2 ENABLE ROW LEVEL SECURITY; +-- +-- Expression structure with SBV +-- +-- Create view as table owner. RLS should NOT be applied. +SET SESSION AUTHORIZATION regress_rls_alice; +CREATE VIEW rls_sbv WITH (security_barrier) AS + SELECT * FROM y1 WHERE f_leak(b); +EXPLAIN (COSTS OFF) SELECT * FROM rls_sbv WHERE (a = 1); + QUERY PLAN +------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on y1 + Chunks excluded during startup: 0 + -> Seq Scan on y1 y1_1 + Filter: (f_leak(b) AND (a = 1)) + -> Index Scan using _hyper_12_57_chunk_y1_a_idx on _hyper_12_57_chunk y1_2 + Index Cond: (a = 1) + Filter: f_leak(b) +(7 rows) + +DROP VIEW rls_sbv; +-- Create view as role that does not own table. RLS should be applied. +SET SESSION AUTHORIZATION regress_rls_bob; +CREATE VIEW rls_sbv WITH (security_barrier) AS + SELECT * FROM y1 WHERE f_leak(b); +EXPLAIN (COSTS OFF) SELECT * FROM rls_sbv WHERE (a = 1); + QUERY PLAN +------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on y1 + Chunks excluded during startup: 0 + -> Seq Scan on y1 y1_1 + Filter: ((a = 1) AND ((a > 2) OR ((a % 2) = 0)) AND f_leak(b)) + -> Index Scan using _hyper_12_57_chunk_y1_a_idx on _hyper_12_57_chunk y1_2 + Index Cond: (a = 1) + Filter: (((a > 2) OR ((a % 2) = 0)) AND f_leak(b)) +(7 rows) + +DROP VIEW rls_sbv; +-- +-- Expression structure +-- +SET SESSION AUTHORIZATION regress_rls_alice; +INSERT INTO y2 (SELECT x, md5(x::text) FROM generate_series(0,20) x); +CREATE POLICY p2 ON y2 USING (a % 3 = 0); +CREATE POLICY p3 ON y2 USING (a % 4 = 0); +SET SESSION AUTHORIZATION regress_rls_bob; +SELECT * FROM y2 WHERE f_leak(b); +NOTICE: f_leak => cfcd208495d565ef66e7dff9f98764da +NOTICE: f_leak => c81e728d9d4c2f636f067f89cc14862c +NOTICE: f_leak => eccbc87e4b5ce2fe28308fd9f2a7baf3 +NOTICE: f_leak => a87ff679a2f3e71d9181a67b7542122c +NOTICE: f_leak => 1679091c5a880faf6fb5e6087eb1b2dc +NOTICE: f_leak => c9f0f895fb98ab9159f51fd0297e236d +NOTICE: f_leak => 45c48cce2e2d7fbdea1afc51c7c6ad26 +NOTICE: f_leak => d3d9446802a44259755d38e6d163e820 +NOTICE: f_leak => c20ad4d76fe97759aa27a0c99bff6710 +NOTICE: f_leak => aab3238922bcc25a6f606eb525ffdc56 +NOTICE: f_leak => 9bf31c7ff062936a96d3c8bd1f8f2ff3 +NOTICE: f_leak => c74d97b01eae257e44aa9d5bade97baf +NOTICE: f_leak => 6f4922f45568161a8cdf4ad2299f6d23 +NOTICE: f_leak => 98f13708210194c475687be6106a3b84 + a | b +----+---------------------------------- + 0 | cfcd208495d565ef66e7dff9f98764da + 2 | c81e728d9d4c2f636f067f89cc14862c + 3 | eccbc87e4b5ce2fe28308fd9f2a7baf3 + 4 | a87ff679a2f3e71d9181a67b7542122c + 6 | 1679091c5a880faf6fb5e6087eb1b2dc + 8 | c9f0f895fb98ab9159f51fd0297e236d + 9 | 45c48cce2e2d7fbdea1afc51c7c6ad26 + 10 | d3d9446802a44259755d38e6d163e820 + 12 | c20ad4d76fe97759aa27a0c99bff6710 + 14 | aab3238922bcc25a6f606eb525ffdc56 + 15 | 9bf31c7ff062936a96d3c8bd1f8f2ff3 + 16 | c74d97b01eae257e44aa9d5bade97baf + 18 | 6f4922f45568161a8cdf4ad2299f6d23 + 20 | 98f13708210194c475687be6106a3b84 +(14 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM y2 WHERE f_leak(b); + QUERY PLAN +----------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on y2 + Chunks excluded during startup: 0 + -> Seq Scan on y2 y2_1 + Filter: ((((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0)) AND f_leak(b)) + -> Seq Scan on _hyper_13_58_chunk y2_2 + Filter: ((((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0)) AND f_leak(b)) + -> Seq Scan on _hyper_13_59_chunk y2_3 + Filter: ((((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0)) AND f_leak(b)) + -> Seq Scan on _hyper_13_60_chunk y2_4 + Filter: ((((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0)) AND f_leak(b)) + -> Seq Scan on _hyper_13_61_chunk y2_5 + Filter: ((((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0)) AND f_leak(b)) + -> Seq Scan on _hyper_13_62_chunk y2_6 + Filter: ((((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0)) AND f_leak(b)) + -> Seq Scan on _hyper_13_63_chunk y2_7 + Filter: ((((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0)) AND f_leak(b)) + -> Seq Scan on _hyper_13_64_chunk y2_8 + Filter: ((((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0)) AND f_leak(b)) + -> Seq Scan on _hyper_13_65_chunk y2_9 + Filter: ((((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0)) AND f_leak(b)) + -> Seq Scan on _hyper_13_66_chunk y2_10 + Filter: ((((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0)) AND f_leak(b)) + -> Seq Scan on _hyper_13_67_chunk y2_11 + Filter: ((((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0)) AND f_leak(b)) + -> Seq Scan on _hyper_13_68_chunk y2_12 + Filter: ((((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0)) AND f_leak(b)) +(26 rows) + +-- +-- Qual push-down of leaky functions, when not referring to table +-- +SELECT * FROM y2 WHERE f_leak('abc'); +NOTICE: f_leak => abc +NOTICE: f_leak => abc +NOTICE: f_leak => abc +NOTICE: f_leak => abc +NOTICE: f_leak => abc +NOTICE: f_leak => abc +NOTICE: f_leak => abc +NOTICE: f_leak => abc +NOTICE: f_leak => abc +NOTICE: f_leak => abc +NOTICE: f_leak => abc +NOTICE: f_leak => abc +NOTICE: f_leak => abc +NOTICE: f_leak => abc +NOTICE: f_leak => abc +NOTICE: f_leak => abc +NOTICE: f_leak => abc +NOTICE: f_leak => abc +NOTICE: f_leak => abc +NOTICE: f_leak => abc +NOTICE: f_leak => abc + a | b +----+---------------------------------- + 0 | cfcd208495d565ef66e7dff9f98764da + 2 | c81e728d9d4c2f636f067f89cc14862c + 3 | eccbc87e4b5ce2fe28308fd9f2a7baf3 + 4 | a87ff679a2f3e71d9181a67b7542122c + 6 | 1679091c5a880faf6fb5e6087eb1b2dc + 8 | c9f0f895fb98ab9159f51fd0297e236d + 9 | 45c48cce2e2d7fbdea1afc51c7c6ad26 + 10 | d3d9446802a44259755d38e6d163e820 + 12 | c20ad4d76fe97759aa27a0c99bff6710 + 14 | aab3238922bcc25a6f606eb525ffdc56 + 15 | 9bf31c7ff062936a96d3c8bd1f8f2ff3 + 16 | c74d97b01eae257e44aa9d5bade97baf + 18 | 6f4922f45568161a8cdf4ad2299f6d23 + 20 | 98f13708210194c475687be6106a3b84 +(14 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM y2 WHERE f_leak('abc'); + QUERY PLAN +--------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on y2 + Chunks excluded during startup: 0 + -> Seq Scan on y2 y2_1 + Filter: (f_leak('abc'::text) AND (((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0))) + -> Seq Scan on _hyper_13_58_chunk y2_2 + Filter: (f_leak('abc'::text) AND (((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0))) + -> Seq Scan on _hyper_13_59_chunk y2_3 + Filter: (f_leak('abc'::text) AND (((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0))) + -> Seq Scan on _hyper_13_60_chunk y2_4 + Filter: (f_leak('abc'::text) AND (((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0))) + -> Seq Scan on _hyper_13_61_chunk y2_5 + Filter: (f_leak('abc'::text) AND (((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0))) + -> Seq Scan on _hyper_13_62_chunk y2_6 + Filter: (f_leak('abc'::text) AND (((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0))) + -> Seq Scan on _hyper_13_63_chunk y2_7 + Filter: (f_leak('abc'::text) AND (((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0))) + -> Seq Scan on _hyper_13_64_chunk y2_8 + Filter: (f_leak('abc'::text) AND (((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0))) + -> Seq Scan on _hyper_13_65_chunk y2_9 + Filter: (f_leak('abc'::text) AND (((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0))) + -> Seq Scan on _hyper_13_66_chunk y2_10 + Filter: (f_leak('abc'::text) AND (((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0))) + -> Seq Scan on _hyper_13_67_chunk y2_11 + Filter: (f_leak('abc'::text) AND (((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0))) + -> Seq Scan on _hyper_13_68_chunk y2_12 + Filter: (f_leak('abc'::text) AND (((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0))) +(26 rows) + +CREATE TABLE test_qual_pushdown ( + abc text +); +INSERT INTO test_qual_pushdown VALUES ('abc'),('def'); +SELECT * FROM y2 JOIN test_qual_pushdown ON (b = abc) WHERE f_leak(abc); +NOTICE: f_leak => abc +NOTICE: f_leak => def + a | b | abc +---+---+----- +(0 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM y2 JOIN test_qual_pushdown ON (b = abc) WHERE f_leak(abc); + QUERY PLAN +------------------------------------------------------------------------- + Hash Join + Hash Cond: (y2.b = test_qual_pushdown.abc) + -> Append + -> Seq Scan on y2 y2_1 + Filter: (((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0)) + -> Seq Scan on _hyper_13_58_chunk y2_2 + Filter: (((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0)) + -> Seq Scan on _hyper_13_59_chunk y2_3 + Filter: (((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0)) + -> Seq Scan on _hyper_13_60_chunk y2_4 + Filter: (((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0)) + -> Seq Scan on _hyper_13_61_chunk y2_5 + Filter: (((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0)) + -> Seq Scan on _hyper_13_62_chunk y2_6 + Filter: (((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0)) + -> Seq Scan on _hyper_13_63_chunk y2_7 + Filter: (((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0)) + -> Seq Scan on _hyper_13_64_chunk y2_8 + Filter: (((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0)) + -> Seq Scan on _hyper_13_65_chunk y2_9 + Filter: (((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0)) + -> Seq Scan on _hyper_13_66_chunk y2_10 + Filter: (((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0)) + -> Seq Scan on _hyper_13_67_chunk y2_11 + Filter: (((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0)) + -> Seq Scan on _hyper_13_68_chunk y2_12 + Filter: (((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0)) + -> Hash + -> Seq Scan on test_qual_pushdown + Filter: f_leak(abc) +(30 rows) + +SELECT * FROM y2 JOIN test_qual_pushdown ON (b = abc) WHERE f_leak(b); +NOTICE: f_leak => cfcd208495d565ef66e7dff9f98764da +NOTICE: f_leak => c81e728d9d4c2f636f067f89cc14862c +NOTICE: f_leak => eccbc87e4b5ce2fe28308fd9f2a7baf3 +NOTICE: f_leak => a87ff679a2f3e71d9181a67b7542122c +NOTICE: f_leak => 1679091c5a880faf6fb5e6087eb1b2dc +NOTICE: f_leak => c9f0f895fb98ab9159f51fd0297e236d +NOTICE: f_leak => 45c48cce2e2d7fbdea1afc51c7c6ad26 +NOTICE: f_leak => d3d9446802a44259755d38e6d163e820 +NOTICE: f_leak => c20ad4d76fe97759aa27a0c99bff6710 +NOTICE: f_leak => aab3238922bcc25a6f606eb525ffdc56 +NOTICE: f_leak => 9bf31c7ff062936a96d3c8bd1f8f2ff3 +NOTICE: f_leak => c74d97b01eae257e44aa9d5bade97baf +NOTICE: f_leak => 6f4922f45568161a8cdf4ad2299f6d23 +NOTICE: f_leak => 98f13708210194c475687be6106a3b84 + a | b | abc +---+---+----- +(0 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM y2 JOIN test_qual_pushdown ON (b = abc) WHERE f_leak(b); + QUERY PLAN +----------------------------------------------------------------------------------------------- + Hash Join + Hash Cond: (test_qual_pushdown.abc = y2.b) + -> Seq Scan on test_qual_pushdown + -> Hash + -> Custom Scan (ChunkAppend) on y2 + Chunks excluded during startup: 0 + -> Seq Scan on y2 y2_1 + Filter: ((((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0)) AND f_leak(b)) + -> Seq Scan on _hyper_13_58_chunk y2_2 + Filter: ((((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0)) AND f_leak(b)) + -> Seq Scan on _hyper_13_59_chunk y2_3 + Filter: ((((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0)) AND f_leak(b)) + -> Seq Scan on _hyper_13_60_chunk y2_4 + Filter: ((((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0)) AND f_leak(b)) + -> Seq Scan on _hyper_13_61_chunk y2_5 + Filter: ((((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0)) AND f_leak(b)) + -> Seq Scan on _hyper_13_62_chunk y2_6 + Filter: ((((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0)) AND f_leak(b)) + -> Seq Scan on _hyper_13_63_chunk y2_7 + Filter: ((((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0)) AND f_leak(b)) + -> Seq Scan on _hyper_13_64_chunk y2_8 + Filter: ((((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0)) AND f_leak(b)) + -> Seq Scan on _hyper_13_65_chunk y2_9 + Filter: ((((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0)) AND f_leak(b)) + -> Seq Scan on _hyper_13_66_chunk y2_10 + Filter: ((((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0)) AND f_leak(b)) + -> Seq Scan on _hyper_13_67_chunk y2_11 + Filter: ((((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0)) AND f_leak(b)) + -> Seq Scan on _hyper_13_68_chunk y2_12 + Filter: ((((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0)) AND f_leak(b)) +(30 rows) + +DROP TABLE test_qual_pushdown; +-- +-- Plancache invalidate on user change. +-- +RESET SESSION AUTHORIZATION; +\set VERBOSITY terse \\ -- suppress cascade details +DROP TABLE t1 CASCADE; +NOTICE: drop cascades to 2 other objects +\set VERBOSITY default +CREATE TABLE t1 (a integer); +SELECT public.create_hypertable('t1', 'a', chunk_time_interval=>2); +NOTICE: adding not-null constraint to column "a" +DETAIL: Dimensions cannot have NULL values. + create_hypertable +------------------------------ + (14,regress_rls_schema,t1,t) +(1 row) + +GRANT SELECT ON t1 TO regress_rls_bob, regress_rls_carol; +CREATE POLICY p1 ON t1 TO regress_rls_bob USING ((a % 2) = 0); +CREATE POLICY p2 ON t1 TO regress_rls_carol USING ((a % 4) = 0); +ALTER TABLE t1 ENABLE ROW LEVEL SECURITY; +-- Prepare as regress_rls_bob +SET ROLE regress_rls_bob; +PREPARE role_inval AS SELECT * FROM t1; +-- Check plan +EXPLAIN (COSTS OFF) EXECUTE role_inval; + QUERY PLAN +------------------------- + Seq Scan on t1 + Filter: ((a % 2) = 0) +(2 rows) + +-- Change to regress_rls_carol +SET ROLE regress_rls_carol; +-- Check plan- should be different +EXPLAIN (COSTS OFF) EXECUTE role_inval; + QUERY PLAN +------------------------- + Seq Scan on t1 + Filter: ((a % 4) = 0) +(2 rows) + +-- Change back to regress_rls_bob +SET ROLE regress_rls_bob; +-- Check plan- should be back to original +EXPLAIN (COSTS OFF) EXECUTE role_inval; + QUERY PLAN +------------------------- + Seq Scan on t1 + Filter: ((a % 2) = 0) +(2 rows) + +-- +-- CTE and RLS +-- +RESET SESSION AUTHORIZATION; +DROP TABLE t1 CASCADE; +CREATE TABLE t1 (a integer, b text); +SELECT public.create_hypertable('t1', 'a', chunk_time_interval=>2); +NOTICE: adding not-null constraint to column "a" +DETAIL: Dimensions cannot have NULL values. + create_hypertable +------------------------------ + (15,regress_rls_schema,t1,t) +(1 row) + +CREATE POLICY p1 ON t1 USING (a % 2 = 0); +ALTER TABLE t1 ENABLE ROW LEVEL SECURITY; +GRANT ALL ON t1 TO regress_rls_bob; +INSERT INTO t1 (SELECT x, md5(x::text) FROM generate_series(0,20) x); +SET SESSION AUTHORIZATION regress_rls_bob; +WITH cte1 AS (SELECT * FROM t1 WHERE f_leak(b)) SELECT * FROM cte1; +NOTICE: f_leak => cfcd208495d565ef66e7dff9f98764da +NOTICE: f_leak => c81e728d9d4c2f636f067f89cc14862c +NOTICE: f_leak => a87ff679a2f3e71d9181a67b7542122c +NOTICE: f_leak => 1679091c5a880faf6fb5e6087eb1b2dc +NOTICE: f_leak => c9f0f895fb98ab9159f51fd0297e236d +NOTICE: f_leak => d3d9446802a44259755d38e6d163e820 +NOTICE: f_leak => c20ad4d76fe97759aa27a0c99bff6710 +NOTICE: f_leak => aab3238922bcc25a6f606eb525ffdc56 +NOTICE: f_leak => c74d97b01eae257e44aa9d5bade97baf +NOTICE: f_leak => 6f4922f45568161a8cdf4ad2299f6d23 +NOTICE: f_leak => 98f13708210194c475687be6106a3b84 + a | b +----+---------------------------------- + 0 | cfcd208495d565ef66e7dff9f98764da + 2 | c81e728d9d4c2f636f067f89cc14862c + 4 | a87ff679a2f3e71d9181a67b7542122c + 6 | 1679091c5a880faf6fb5e6087eb1b2dc + 8 | c9f0f895fb98ab9159f51fd0297e236d + 10 | d3d9446802a44259755d38e6d163e820 + 12 | c20ad4d76fe97759aa27a0c99bff6710 + 14 | aab3238922bcc25a6f606eb525ffdc56 + 16 | c74d97b01eae257e44aa9d5bade97baf + 18 | 6f4922f45568161a8cdf4ad2299f6d23 + 20 | 98f13708210194c475687be6106a3b84 +(11 rows) + +EXPLAIN (COSTS OFF) WITH cte1 AS (SELECT * FROM t1 WHERE f_leak(b)) SELECT * FROM cte1; + QUERY PLAN +------------------------------------------------------- + CTE Scan on cte1 + CTE cte1 + -> Custom Scan (ChunkAppend) on t1 + Chunks excluded during startup: 0 + -> Seq Scan on t1 t1_1 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_15_69_chunk t1_2 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_15_70_chunk t1_3 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_15_71_chunk t1_4 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_15_72_chunk t1_5 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_15_73_chunk t1_6 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_15_74_chunk t1_7 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_15_75_chunk t1_8 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_15_76_chunk t1_9 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_15_77_chunk t1_10 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_15_78_chunk t1_11 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on _hyper_15_79_chunk t1_12 + Filter: (((a % 2) = 0) AND f_leak(b)) +(28 rows) + +WITH cte1 AS (UPDATE t1 SET a = a + 1 RETURNING *) SELECT * FROM cte1; --fail +ERROR: new row violates row-level security policy for table "t1" +WITH cte1 AS (UPDATE t1 SET a = a RETURNING *) SELECT * FROM cte1; --ok + a | b +----+---------------------------------- + 0 | cfcd208495d565ef66e7dff9f98764da + 2 | c81e728d9d4c2f636f067f89cc14862c + 4 | a87ff679a2f3e71d9181a67b7542122c + 6 | 1679091c5a880faf6fb5e6087eb1b2dc + 8 | c9f0f895fb98ab9159f51fd0297e236d + 10 | d3d9446802a44259755d38e6d163e820 + 12 | c20ad4d76fe97759aa27a0c99bff6710 + 14 | aab3238922bcc25a6f606eb525ffdc56 + 16 | c74d97b01eae257e44aa9d5bade97baf + 18 | 6f4922f45568161a8cdf4ad2299f6d23 + 20 | 98f13708210194c475687be6106a3b84 +(11 rows) + +WITH cte1 AS (INSERT INTO t1 VALUES (21, 'Fail') RETURNING *) SELECT * FROM cte1; --fail +ERROR: new row violates row-level security policy for table "t1" +WITH cte1 AS (INSERT INTO t1 VALUES (20, 'Success') RETURNING *) SELECT * FROM cte1; --ok + a | b +----+--------- + 20 | Success +(1 row) + +-- +-- Rename Policy +-- +RESET SESSION AUTHORIZATION; +ALTER POLICY p1 ON t1 RENAME TO p1; --fail +ERROR: policy "p1" for table "t1" already exists +SELECT polname, relname + FROM pg_policy pol + JOIN pg_class pc ON (pc.oid = pol.polrelid) + WHERE relname = 't1'; + polname | relname +---------+--------- + p1 | t1 +(1 row) + +ALTER POLICY p1 ON t1 RENAME TO p2; --ok +SELECT polname, relname + FROM pg_policy pol + JOIN pg_class pc ON (pc.oid = pol.polrelid) + WHERE relname = 't1'; + polname | relname +---------+--------- + p2 | t1 +(1 row) + +-- +-- Check INSERT SELECT +-- +SET SESSION AUTHORIZATION regress_rls_bob; +CREATE TABLE t2 (a integer, b text); +SELECT public.create_hypertable('t2', 'a', chunk_time_interval=>2); +NOTICE: adding not-null constraint to column "a" +DETAIL: Dimensions cannot have NULL values. + create_hypertable +------------------------------ + (16,regress_rls_schema,t2,t) +(1 row) + +INSERT INTO t2 (SELECT * FROM t1); +EXPLAIN (COSTS OFF) INSERT INTO t2 (SELECT * FROM t1); + QUERY PLAN +-------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Insert on t2 + -> Custom Scan (ChunkDispatch) + -> Append + -> Seq Scan on t1 t1_1 + Filter: ((a % 2) = 0) + -> Seq Scan on _hyper_15_69_chunk t1_2 + Filter: ((a % 2) = 0) + -> Seq Scan on _hyper_15_70_chunk t1_3 + Filter: ((a % 2) = 0) + -> Seq Scan on _hyper_15_71_chunk t1_4 + Filter: ((a % 2) = 0) + -> Seq Scan on _hyper_15_72_chunk t1_5 + Filter: ((a % 2) = 0) + -> Seq Scan on _hyper_15_73_chunk t1_6 + Filter: ((a % 2) = 0) + -> Seq Scan on _hyper_15_74_chunk t1_7 + Filter: ((a % 2) = 0) + -> Seq Scan on _hyper_15_75_chunk t1_8 + Filter: ((a % 2) = 0) + -> Seq Scan on _hyper_15_76_chunk t1_9 + Filter: ((a % 2) = 0) + -> Seq Scan on _hyper_15_77_chunk t1_10 + Filter: ((a % 2) = 0) + -> Seq Scan on _hyper_15_78_chunk t1_11 + Filter: ((a % 2) = 0) + -> Seq Scan on _hyper_15_79_chunk t1_12 + Filter: ((a % 2) = 0) +(28 rows) + +SELECT * FROM t2; + a | b +----+---------------------------------- + 0 | cfcd208495d565ef66e7dff9f98764da + 2 | c81e728d9d4c2f636f067f89cc14862c + 4 | a87ff679a2f3e71d9181a67b7542122c + 6 | 1679091c5a880faf6fb5e6087eb1b2dc + 8 | c9f0f895fb98ab9159f51fd0297e236d + 10 | d3d9446802a44259755d38e6d163e820 + 12 | c20ad4d76fe97759aa27a0c99bff6710 + 14 | aab3238922bcc25a6f606eb525ffdc56 + 16 | c74d97b01eae257e44aa9d5bade97baf + 18 | 6f4922f45568161a8cdf4ad2299f6d23 + 20 | 98f13708210194c475687be6106a3b84 + 20 | Success +(12 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM t2; + QUERY PLAN +-------------------------------------------- + Append + -> Seq Scan on t2 t2_1 + -> Seq Scan on _hyper_16_80_chunk t2_2 + -> Seq Scan on _hyper_16_81_chunk t2_3 + -> Seq Scan on _hyper_16_82_chunk t2_4 + -> Seq Scan on _hyper_16_83_chunk t2_5 + -> Seq Scan on _hyper_16_84_chunk t2_6 + -> Seq Scan on _hyper_16_85_chunk t2_7 + -> Seq Scan on _hyper_16_86_chunk t2_8 + -> Seq Scan on _hyper_16_87_chunk t2_9 + -> Seq Scan on _hyper_16_88_chunk t2_10 + -> Seq Scan on _hyper_16_89_chunk t2_11 + -> Seq Scan on _hyper_16_90_chunk t2_12 +(13 rows) + +CREATE TABLE t3 AS SELECT * FROM t1; +SELECT public.create_hypertable('t2', 'a', chunk_time_interval=>2); +ERROR: table "t2" is already a hypertable +SELECT * FROM t3; + a | b +----+---------------------------------- + 0 | cfcd208495d565ef66e7dff9f98764da + 2 | c81e728d9d4c2f636f067f89cc14862c + 4 | a87ff679a2f3e71d9181a67b7542122c + 6 | 1679091c5a880faf6fb5e6087eb1b2dc + 8 | c9f0f895fb98ab9159f51fd0297e236d + 10 | d3d9446802a44259755d38e6d163e820 + 12 | c20ad4d76fe97759aa27a0c99bff6710 + 14 | aab3238922bcc25a6f606eb525ffdc56 + 16 | c74d97b01eae257e44aa9d5bade97baf + 18 | 6f4922f45568161a8cdf4ad2299f6d23 + 20 | 98f13708210194c475687be6106a3b84 + 20 | Success +(12 rows) + +SELECT * INTO t4 FROM t1; +SELECT * FROM t4; + a | b +----+---------------------------------- + 0 | cfcd208495d565ef66e7dff9f98764da + 2 | c81e728d9d4c2f636f067f89cc14862c + 4 | a87ff679a2f3e71d9181a67b7542122c + 6 | 1679091c5a880faf6fb5e6087eb1b2dc + 8 | c9f0f895fb98ab9159f51fd0297e236d + 10 | d3d9446802a44259755d38e6d163e820 + 12 | c20ad4d76fe97759aa27a0c99bff6710 + 14 | aab3238922bcc25a6f606eb525ffdc56 + 16 | c74d97b01eae257e44aa9d5bade97baf + 18 | 6f4922f45568161a8cdf4ad2299f6d23 + 20 | 98f13708210194c475687be6106a3b84 + 20 | Success +(12 rows) + +-- +-- RLS with JOIN +-- +SET SESSION AUTHORIZATION regress_rls_alice; +CREATE TABLE blog (id integer, author text, post text); +SELECT public.create_hypertable('blog', 'id', chunk_time_interval=>2); +NOTICE: adding not-null constraint to column "id" +DETAIL: Dimensions cannot have NULL values. + create_hypertable +-------------------------------- + (17,regress_rls_schema,blog,t) +(1 row) + +CREATE TABLE comment (blog_id integer, message text); +SELECT public.create_hypertable('comment', 'blog_id', chunk_time_interval=>2); +NOTICE: adding not-null constraint to column "blog_id" +DETAIL: Dimensions cannot have NULL values. + create_hypertable +----------------------------------- + (18,regress_rls_schema,comment,t) +(1 row) + +GRANT ALL ON blog, comment TO regress_rls_bob; +CREATE POLICY blog_1 ON blog USING (id % 2 = 0); +ALTER TABLE blog ENABLE ROW LEVEL SECURITY; +INSERT INTO blog VALUES + (1, 'alice', 'blog #1'), + (2, 'bob', 'blog #1'), + (3, 'alice', 'blog #2'), + (4, 'alice', 'blog #3'), + (5, 'john', 'blog #1'); +INSERT INTO comment VALUES + (1, 'cool blog'), + (1, 'fun blog'), + (3, 'crazy blog'), + (5, 'what?'), + (4, 'insane!'), + (2, 'who did it?'); +SET SESSION AUTHORIZATION regress_rls_bob; +-- Check RLS JOIN with Non-RLS. +SELECT id, author, message FROM blog JOIN comment ON id = blog_id; + id | author | message +----+--------+------------- + 2 | bob | who did it? + 4 | alice | insane! +(2 rows) + +-- Check Non-RLS JOIN with RLS. +SELECT id, author, message FROM comment JOIN blog ON id = blog_id; + id | author | message +----+--------+------------- + 2 | bob | who did it? + 4 | alice | insane! +(2 rows) + +SET SESSION AUTHORIZATION regress_rls_alice; +CREATE POLICY comment_1 ON comment USING (blog_id < 4); +ALTER TABLE comment ENABLE ROW LEVEL SECURITY; +SET SESSION AUTHORIZATION regress_rls_bob; +-- Check RLS JOIN RLS +SELECT id, author, message FROM blog JOIN comment ON id = blog_id; + id | author | message +----+--------+------------- + 2 | bob | who did it? +(1 row) + +SELECT id, author, message FROM comment JOIN blog ON id = blog_id; + id | author | message +----+--------+------------- + 2 | bob | who did it? +(1 row) + +SET SESSION AUTHORIZATION regress_rls_alice; +DROP TABLE blog; +DROP TABLE comment; +-- +-- Default Deny Policy +-- +RESET SESSION AUTHORIZATION; +DROP POLICY p2 ON t1; +ALTER TABLE t1 OWNER TO regress_rls_alice; +-- Check that default deny does not apply to superuser. +RESET SESSION AUTHORIZATION; +SELECT * FROM t1; + a | b +----+---------------------------------- + 1 | c4ca4238a0b923820dcc509a6f75849b + 0 | cfcd208495d565ef66e7dff9f98764da + 3 | eccbc87e4b5ce2fe28308fd9f2a7baf3 + 2 | c81e728d9d4c2f636f067f89cc14862c + 5 | e4da3b7fbbce2345d7772b0674a318d5 + 4 | a87ff679a2f3e71d9181a67b7542122c + 7 | 8f14e45fceea167a5a36dedd4bea2543 + 6 | 1679091c5a880faf6fb5e6087eb1b2dc + 9 | 45c48cce2e2d7fbdea1afc51c7c6ad26 + 8 | c9f0f895fb98ab9159f51fd0297e236d + 11 | 6512bd43d9caa6e02c990b0a82652dca + 10 | d3d9446802a44259755d38e6d163e820 + 13 | c51ce410c124a10e0db5e4b97fc2af39 + 12 | c20ad4d76fe97759aa27a0c99bff6710 + 15 | 9bf31c7ff062936a96d3c8bd1f8f2ff3 + 14 | aab3238922bcc25a6f606eb525ffdc56 + 17 | 70efdf2ec9b086079795c442636b55fb + 16 | c74d97b01eae257e44aa9d5bade97baf + 19 | 1f0e3dad99908345f7439f8ffabdffc4 + 18 | 6f4922f45568161a8cdf4ad2299f6d23 + 20 | 98f13708210194c475687be6106a3b84 + 20 | Success +(22 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM t1; + QUERY PLAN +-------------------------------------------- + Append + -> Seq Scan on t1 t1_1 + -> Seq Scan on _hyper_15_69_chunk t1_2 + -> Seq Scan on _hyper_15_70_chunk t1_3 + -> Seq Scan on _hyper_15_71_chunk t1_4 + -> Seq Scan on _hyper_15_72_chunk t1_5 + -> Seq Scan on _hyper_15_73_chunk t1_6 + -> Seq Scan on _hyper_15_74_chunk t1_7 + -> Seq Scan on _hyper_15_75_chunk t1_8 + -> Seq Scan on _hyper_15_76_chunk t1_9 + -> Seq Scan on _hyper_15_77_chunk t1_10 + -> Seq Scan on _hyper_15_78_chunk t1_11 + -> Seq Scan on _hyper_15_79_chunk t1_12 +(13 rows) + +-- Check that default deny does not apply to table owner. +SET SESSION AUTHORIZATION regress_rls_alice; +SELECT * FROM t1; + a | b +----+---------------------------------- + 1 | c4ca4238a0b923820dcc509a6f75849b + 0 | cfcd208495d565ef66e7dff9f98764da + 3 | eccbc87e4b5ce2fe28308fd9f2a7baf3 + 2 | c81e728d9d4c2f636f067f89cc14862c + 5 | e4da3b7fbbce2345d7772b0674a318d5 + 4 | a87ff679a2f3e71d9181a67b7542122c + 7 | 8f14e45fceea167a5a36dedd4bea2543 + 6 | 1679091c5a880faf6fb5e6087eb1b2dc + 9 | 45c48cce2e2d7fbdea1afc51c7c6ad26 + 8 | c9f0f895fb98ab9159f51fd0297e236d + 11 | 6512bd43d9caa6e02c990b0a82652dca + 10 | d3d9446802a44259755d38e6d163e820 + 13 | c51ce410c124a10e0db5e4b97fc2af39 + 12 | c20ad4d76fe97759aa27a0c99bff6710 + 15 | 9bf31c7ff062936a96d3c8bd1f8f2ff3 + 14 | aab3238922bcc25a6f606eb525ffdc56 + 17 | 70efdf2ec9b086079795c442636b55fb + 16 | c74d97b01eae257e44aa9d5bade97baf + 19 | 1f0e3dad99908345f7439f8ffabdffc4 + 18 | 6f4922f45568161a8cdf4ad2299f6d23 + 20 | 98f13708210194c475687be6106a3b84 + 20 | Success +(22 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM t1; + QUERY PLAN +-------------------------------------------- + Append + -> Seq Scan on t1 t1_1 + -> Seq Scan on _hyper_15_69_chunk t1_2 + -> Seq Scan on _hyper_15_70_chunk t1_3 + -> Seq Scan on _hyper_15_71_chunk t1_4 + -> Seq Scan on _hyper_15_72_chunk t1_5 + -> Seq Scan on _hyper_15_73_chunk t1_6 + -> Seq Scan on _hyper_15_74_chunk t1_7 + -> Seq Scan on _hyper_15_75_chunk t1_8 + -> Seq Scan on _hyper_15_76_chunk t1_9 + -> Seq Scan on _hyper_15_77_chunk t1_10 + -> Seq Scan on _hyper_15_78_chunk t1_11 + -> Seq Scan on _hyper_15_79_chunk t1_12 +(13 rows) + +-- Check that default deny applies to non-owner/non-superuser when RLS on. +SET SESSION AUTHORIZATION regress_rls_bob; +SET row_security TO ON; +SELECT * FROM t1; + a | b +---+--- +(0 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM t1; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +SET SESSION AUTHORIZATION regress_rls_bob; +SELECT * FROM t1; + a | b +---+--- +(0 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM t1; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +-- +-- COPY TO/FROM +-- +RESET SESSION AUTHORIZATION; +DROP TABLE copy_t CASCADE; +ERROR: table "copy_t" does not exist +CREATE TABLE copy_t (a integer, b text); +SELECT public.create_hypertable('copy_t', 'a', chunk_time_interval=>2); +NOTICE: adding not-null constraint to column "a" +DETAIL: Dimensions cannot have NULL values. + create_hypertable +---------------------------------- + (19,regress_rls_schema,copy_t,t) +(1 row) + +CREATE POLICY p1 ON copy_t USING (a % 2 = 0); +ALTER TABLE copy_t ENABLE ROW LEVEL SECURITY; +GRANT ALL ON copy_t TO regress_rls_bob, regress_rls_exempt_user; +INSERT INTO copy_t (SELECT x, md5(x::text) FROM generate_series(0,10) x); +-- Check COPY TO as Superuser/owner. +RESET SESSION AUTHORIZATION; +SET row_security TO OFF; +COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; +0,cfcd208495d565ef66e7dff9f98764da +1,c4ca4238a0b923820dcc509a6f75849b +2,c81e728d9d4c2f636f067f89cc14862c +3,eccbc87e4b5ce2fe28308fd9f2a7baf3 +4,a87ff679a2f3e71d9181a67b7542122c +5,e4da3b7fbbce2345d7772b0674a318d5 +6,1679091c5a880faf6fb5e6087eb1b2dc +7,8f14e45fceea167a5a36dedd4bea2543 +8,c9f0f895fb98ab9159f51fd0297e236d +9,45c48cce2e2d7fbdea1afc51c7c6ad26 +10,d3d9446802a44259755d38e6d163e820 +SET row_security TO ON; +COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; +0,cfcd208495d565ef66e7dff9f98764da +1,c4ca4238a0b923820dcc509a6f75849b +2,c81e728d9d4c2f636f067f89cc14862c +3,eccbc87e4b5ce2fe28308fd9f2a7baf3 +4,a87ff679a2f3e71d9181a67b7542122c +5,e4da3b7fbbce2345d7772b0674a318d5 +6,1679091c5a880faf6fb5e6087eb1b2dc +7,8f14e45fceea167a5a36dedd4bea2543 +8,c9f0f895fb98ab9159f51fd0297e236d +9,45c48cce2e2d7fbdea1afc51c7c6ad26 +10,d3d9446802a44259755d38e6d163e820 +-- Check COPY TO as user with permissions. +SET SESSION AUTHORIZATION regress_rls_bob; +SET row_security TO OFF; +COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; --fail - would be affected by RLS +ERROR: query would be affected by row-level security policy for table "copy_t" +SET row_security TO ON; +COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; --ok +0,cfcd208495d565ef66e7dff9f98764da +2,c81e728d9d4c2f636f067f89cc14862c +4,a87ff679a2f3e71d9181a67b7542122c +6,1679091c5a880faf6fb5e6087eb1b2dc +8,c9f0f895fb98ab9159f51fd0297e236d +10,d3d9446802a44259755d38e6d163e820 +-- Check COPY TO as user with permissions and BYPASSRLS +SET SESSION AUTHORIZATION regress_rls_exempt_user; +SET row_security TO OFF; +COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; --ok +0,cfcd208495d565ef66e7dff9f98764da +1,c4ca4238a0b923820dcc509a6f75849b +2,c81e728d9d4c2f636f067f89cc14862c +3,eccbc87e4b5ce2fe28308fd9f2a7baf3 +4,a87ff679a2f3e71d9181a67b7542122c +5,e4da3b7fbbce2345d7772b0674a318d5 +6,1679091c5a880faf6fb5e6087eb1b2dc +7,8f14e45fceea167a5a36dedd4bea2543 +8,c9f0f895fb98ab9159f51fd0297e236d +9,45c48cce2e2d7fbdea1afc51c7c6ad26 +10,d3d9446802a44259755d38e6d163e820 +SET row_security TO ON; +COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; --ok +0,cfcd208495d565ef66e7dff9f98764da +1,c4ca4238a0b923820dcc509a6f75849b +2,c81e728d9d4c2f636f067f89cc14862c +3,eccbc87e4b5ce2fe28308fd9f2a7baf3 +4,a87ff679a2f3e71d9181a67b7542122c +5,e4da3b7fbbce2345d7772b0674a318d5 +6,1679091c5a880faf6fb5e6087eb1b2dc +7,8f14e45fceea167a5a36dedd4bea2543 +8,c9f0f895fb98ab9159f51fd0297e236d +9,45c48cce2e2d7fbdea1afc51c7c6ad26 +10,d3d9446802a44259755d38e6d163e820 +-- Check COPY TO as user without permissions. SET row_security TO OFF; +SET SESSION AUTHORIZATION regress_rls_carol; +SET row_security TO OFF; +COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; --fail - would be affected by RLS +ERROR: query would be affected by row-level security policy for table "copy_t" +SET row_security TO ON; +COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; --fail - permission denied +ERROR: permission denied for table copy_t +-- Check COPY relation TO; keep it just one row to avoid reordering issues +RESET SESSION AUTHORIZATION; +SET row_security TO ON; +CREATE TABLE copy_rel_to (a integer, b text); +SELECT public.create_hypertable('copy_rel_to', 'a', chunk_time_interval=>2); +NOTICE: adding not-null constraint to column "a" +DETAIL: Dimensions cannot have NULL values. + create_hypertable +--------------------------------------- + (20,regress_rls_schema,copy_rel_to,t) +(1 row) + +CREATE POLICY p1 ON copy_rel_to USING (a % 2 = 0); +ALTER TABLE copy_rel_to ENABLE ROW LEVEL SECURITY; +GRANT ALL ON copy_rel_to TO regress_rls_bob, regress_rls_exempt_user; +INSERT INTO copy_rel_to VALUES (1, md5('1')); +-- Check COPY TO as Superuser/owner. +RESET SESSION AUTHORIZATION; +SET row_security TO OFF; +COPY (SELECT * FROM copy_rel_to) TO STDOUT WITH DELIMITER ','; +1,c4ca4238a0b923820dcc509a6f75849b +SET row_security TO ON; +COPY (SELECT * FROM copy_rel_to) TO STDOUT WITH DELIMITER ','; +1,c4ca4238a0b923820dcc509a6f75849b +-- Check COPY TO as user with permissions. +SET SESSION AUTHORIZATION regress_rls_bob; +SET row_security TO OFF; +COPY (SELECT * FROM copy_rel_to) TO STDOUT WITH DELIMITER ','; --fail - would be affected by RLS +ERROR: query would be affected by row-level security policy for table "copy_rel_to" +SET row_security TO ON; +COPY (SELECT * FROM copy_rel_to) TO STDOUT WITH DELIMITER ','; --ok +-- Check COPY TO as user with permissions and BYPASSRLS +SET SESSION AUTHORIZATION regress_rls_exempt_user; +SET row_security TO OFF; +COPY (SELECT * FROM copy_rel_to) TO STDOUT WITH DELIMITER ','; --ok +1,c4ca4238a0b923820dcc509a6f75849b +SET row_security TO ON; +COPY (SELECT * FROM copy_rel_to) TO STDOUT WITH DELIMITER ','; --ok +1,c4ca4238a0b923820dcc509a6f75849b +-- Check COPY TO as user without permissions. SET row_security TO OFF; +SET SESSION AUTHORIZATION regress_rls_carol; +SET row_security TO OFF; +COPY (SELECT * FROM copy_rel_to) TO STDOUT WITH DELIMITER ','; --fail - permission denied +ERROR: query would be affected by row-level security policy for table "copy_rel_to" +SET row_security TO ON; +COPY (SELECT * FROM copy_rel_to) TO STDOUT WITH DELIMITER ','; --fail - permission denied +ERROR: permission denied for table copy_rel_to +-- Check COPY FROM as Superuser/owner. +RESET SESSION AUTHORIZATION; +SET row_security TO OFF; +COPY copy_t FROM STDIN; --ok +SET row_security TO ON; +COPY copy_t FROM STDIN; --ok +-- Check COPY FROM as user with permissions. +SET SESSION AUTHORIZATION regress_rls_bob; +SET row_security TO OFF; +COPY copy_t FROM STDIN; --fail - would be affected by RLS. +ERROR: query would be affected by row-level security policy for table "copy_t" +SET row_security TO ON; +COPY copy_t FROM STDIN; --fail - COPY FROM not supported by RLS. +ERROR: COPY FROM not supported with row-level security +HINT: Use INSERT statements instead. +-- Check COPY FROM as user with permissions and BYPASSRLS +SET SESSION AUTHORIZATION regress_rls_exempt_user; +SET row_security TO ON; +COPY copy_t FROM STDIN; --ok +-- Check COPY FROM as user without permissions. +SET SESSION AUTHORIZATION regress_rls_carol; +SET row_security TO OFF; +COPY copy_t FROM STDIN; --fail - permission denied. +ERROR: permission denied for table copy_t +SET row_security TO ON; +COPY copy_t FROM STDIN; --fail - permission denied. +ERROR: permission denied for table copy_t +RESET SESSION AUTHORIZATION; +DROP TABLE copy_t; +DROP TABLE copy_rel_to CASCADE; +-- Check WHERE CURRENT OF +SET SESSION AUTHORIZATION regress_rls_alice; +CREATE TABLE current_check (currentid int, payload text, rlsuser text); +SELECT public.create_hypertable('current_check', 'currentid', chunk_time_interval=>10); +NOTICE: adding not-null constraint to column "currentid" +DETAIL: Dimensions cannot have NULL values. + create_hypertable +----------------------------------------- + (21,regress_rls_schema,current_check,t) +(1 row) + +GRANT ALL ON current_check TO PUBLIC; +INSERT INTO current_check VALUES + (1, 'abc', 'regress_rls_bob'), + (2, 'bcd', 'regress_rls_bob'), + (3, 'cde', 'regress_rls_bob'), + (4, 'def', 'regress_rls_bob'); +CREATE POLICY p1 ON current_check FOR SELECT USING (currentid % 2 = 0); +CREATE POLICY p2 ON current_check FOR DELETE USING (currentid = 4 AND rlsuser = current_user); +CREATE POLICY p3 ON current_check FOR UPDATE USING (currentid = 4) WITH CHECK (rlsuser = current_user); +ALTER TABLE current_check ENABLE ROW LEVEL SECURITY; +SET SESSION AUTHORIZATION regress_rls_bob; +-- Can SELECT even rows +SELECT * FROM current_check; + currentid | payload | rlsuser +-----------+---------+----------------- + 2 | bcd | regress_rls_bob + 4 | def | regress_rls_bob +(2 rows) + +-- Cannot UPDATE row 2 +UPDATE current_check SET payload = payload || '_new' WHERE currentid = 2 RETURNING *; + currentid | payload | rlsuser +-----------+---------+--------- +(0 rows) + +BEGIN; +-- WHERE CURRENT OF does not work with custom scan nodes +-- so we have to disable chunk append here +SET timescaledb.enable_chunk_append TO false; +DECLARE current_check_cursor SCROLL CURSOR FOR SELECT * FROM current_check; +-- Returns rows that can be seen according to SELECT policy, like plain SELECT +-- above (even rows) +FETCH ABSOLUTE 1 FROM current_check_cursor; + currentid | payload | rlsuser +-----------+---------+----------------- + 2 | bcd | regress_rls_bob +(1 row) + +-- Still cannot UPDATE row 2 through cursor +UPDATE current_check SET payload = payload || '_new' WHERE CURRENT OF current_check_cursor RETURNING *; + currentid | payload | rlsuser +-----------+---------+--------- +(0 rows) + +-- Can update row 4 through cursor, which is the next visible row +FETCH RELATIVE 1 FROM current_check_cursor; + currentid | payload | rlsuser +-----------+---------+----------------- + 4 | def | regress_rls_bob +(1 row) + +UPDATE current_check SET payload = payload || '_new' WHERE CURRENT OF current_check_cursor RETURNING *; + currentid | payload | rlsuser +-----------+---------+----------------- + 4 | def_new | regress_rls_bob +(1 row) + +SELECT * FROM current_check; + currentid | payload | rlsuser +-----------+---------+----------------- + 2 | bcd | regress_rls_bob + 4 | def_new | regress_rls_bob +(2 rows) + +-- Plan should be a subquery TID scan +EXPLAIN (COSTS OFF) UPDATE current_check SET payload = payload WHERE CURRENT OF current_check_cursor; + QUERY PLAN +------------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Update on current_check + Update on _hyper_21_104_chunk current_check_1 + -> Tid Scan on _hyper_21_104_chunk current_check_1 + TID Cond: CURRENT OF current_check_cursor + Filter: ((currentid = 4) AND ((currentid % 2) = 0)) +(6 rows) + +-- Similarly can only delete row 4 +FETCH ABSOLUTE 1 FROM current_check_cursor; + currentid | payload | rlsuser +-----------+---------+----------------- + 2 | bcd | regress_rls_bob +(1 row) + +DELETE FROM current_check WHERE CURRENT OF current_check_cursor RETURNING *; + currentid | payload | rlsuser +-----------+---------+--------- +(0 rows) + +FETCH RELATIVE 1 FROM current_check_cursor; + currentid | payload | rlsuser +-----------+---------+----------------- + 4 | def | regress_rls_bob +(1 row) + +DELETE FROM current_check WHERE CURRENT OF current_check_cursor RETURNING *; + currentid | payload | rlsuser +-----------+---------+----------------- + 4 | def_new | regress_rls_bob +(1 row) + +SELECT * FROM current_check; + currentid | payload | rlsuser +-----------+---------+----------------- + 2 | bcd | regress_rls_bob +(1 row) + +RESET timescaledb.enable_chunk_append; +COMMIT; +-- +-- check pg_stats view filtering +-- +SET row_security TO ON; +SET SESSION AUTHORIZATION regress_rls_alice; +ANALYZE current_check; +-- Stats visible +SELECT row_security_active('current_check'); + row_security_active +--------------------- + f +(1 row) + +SELECT attname, most_common_vals FROM pg_stats + WHERE tablename = 'current_check' + ORDER BY 1; + attname | most_common_vals +-----------+------------------- + currentid | + payload | + rlsuser | {regress_rls_bob} +(3 rows) + +SET SESSION AUTHORIZATION regress_rls_bob; +-- Stats not visible +SELECT row_security_active('current_check'); + row_security_active +--------------------- + t +(1 row) + +SELECT attname, most_common_vals FROM pg_stats + WHERE tablename = 'current_check' + ORDER BY 1; + attname | most_common_vals +---------+------------------ +(0 rows) + +-- +-- Collation support +-- +BEGIN; +CREATE TABLE coll_t (c) AS VALUES ('bar'::text); +CREATE POLICY coll_p ON coll_t USING (c < ('foo'::text COLLATE "C")); +ALTER TABLE coll_t ENABLE ROW LEVEL SECURITY; +GRANT SELECT ON coll_t TO regress_rls_alice; +SELECT (string_to_array(polqual, ':'))[7] AS inputcollid FROM pg_policy WHERE polrelid = 'coll_t'::regclass; + inputcollid +------------------ + inputcollid 950 +(1 row) + +SET SESSION AUTHORIZATION regress_rls_alice; +SELECT * FROM coll_t; + c +----- + bar +(1 row) + +ROLLBACK; +-- +-- Shared Object Dependencies +-- +RESET SESSION AUTHORIZATION; +BEGIN; +CREATE ROLE regress_rls_eve; +CREATE ROLE regress_rls_frank; +CREATE TABLE tbl1 (c) AS VALUES ('bar'::text); +GRANT SELECT ON TABLE tbl1 TO regress_rls_eve; +CREATE POLICY P ON tbl1 TO regress_rls_eve, regress_rls_frank USING (true); +SELECT refclassid::regclass, deptype + FROM pg_depend + WHERE classid = 'pg_policy'::regclass + AND refobjid = 'tbl1'::regclass; + refclassid | deptype +------------+--------- + pg_class | a +(1 row) + +SELECT refclassid::regclass, deptype + FROM pg_shdepend + WHERE classid = 'pg_policy'::regclass + AND refobjid IN ('regress_rls_eve'::regrole, 'regress_rls_frank'::regrole); + refclassid | deptype +------------+--------- + pg_authid | r + pg_authid | r +(2 rows) + +SAVEPOINT q; +DROP ROLE regress_rls_eve; --fails due to dependency on POLICY p +ERROR: role "regress_rls_eve" cannot be dropped because some objects depend on it +DETAIL: privileges for table tbl1 +target of policy p on table tbl1 +ROLLBACK TO q; +ALTER POLICY p ON tbl1 TO regress_rls_frank USING (true); +SAVEPOINT q; +DROP ROLE regress_rls_eve; --fails due to dependency on GRANT SELECT +ERROR: role "regress_rls_eve" cannot be dropped because some objects depend on it +DETAIL: privileges for table tbl1 +ROLLBACK TO q; +REVOKE ALL ON TABLE tbl1 FROM regress_rls_eve; +SAVEPOINT q; +DROP ROLE regress_rls_eve; --succeeds +ROLLBACK TO q; +SAVEPOINT q; +DROP ROLE regress_rls_frank; --fails due to dependency on POLICY p +ERROR: role "regress_rls_frank" cannot be dropped because some objects depend on it +DETAIL: target of policy p on table tbl1 +ROLLBACK TO q; +DROP POLICY p ON tbl1; +SAVEPOINT q; +DROP ROLE regress_rls_frank; -- succeeds +ROLLBACK TO q; +ROLLBACK; -- cleanup +-- +-- Converting table to view +-- +BEGIN; +CREATE TABLE t (c int); +SELECT public.create_hypertable('t', 'c', chunk_time_interval=>2); +NOTICE: adding not-null constraint to column "c" +DETAIL: Dimensions cannot have NULL values. + create_hypertable +----------------------------- + (22,regress_rls_schema,t,t) +(1 row) + +CREATE POLICY p ON t USING (c % 2 = 1); +ALTER TABLE t ENABLE ROW LEVEL SECURITY; +SAVEPOINT q; +CREATE RULE "_RETURN" AS ON SELECT TO t DO INSTEAD + SELECT * FROM generate_series(1,5) t0(c); -- fails due to row level security enabled +ERROR: hypertables do not support rules +ROLLBACK TO q; +ALTER TABLE t DISABLE ROW LEVEL SECURITY; +SAVEPOINT q; +CREATE RULE "_RETURN" AS ON SELECT TO t DO INSTEAD + SELECT * FROM generate_series(1,5) t0(c); -- fails due to policy p on t +ERROR: hypertables do not support rules +ROLLBACK TO q; +DROP POLICY p ON t; +CREATE RULE "_RETURN" AS ON SELECT TO t DO INSTEAD + SELECT * FROM generate_series(1,5) t0(c); -- succeeds +ERROR: hypertables do not support rules +ROLLBACK; +-- +-- Policy expression handling +-- +BEGIN; +CREATE TABLE t (c) AS VALUES ('bar'::text); +CREATE POLICY p ON t USING (max(c)); -- fails: aggregate functions are not allowed in policy expressions +ERROR: aggregate functions are not allowed in policy expressions +ROLLBACK; +-- +-- Non-target relations are only subject to SELECT policies +-- +SET SESSION AUTHORIZATION regress_rls_alice; +CREATE TABLE r1 (a int); +SELECT public.create_hypertable('r1', 'a', chunk_time_interval=>2); +NOTICE: adding not-null constraint to column "a" +DETAIL: Dimensions cannot have NULL values. + create_hypertable +------------------------------ + (23,regress_rls_schema,r1,t) +(1 row) + +CREATE TABLE r2 (a int); +SELECT public.create_hypertable('r2', 'a', chunk_time_interval=>2); +NOTICE: adding not-null constraint to column "a" +DETAIL: Dimensions cannot have NULL values. + create_hypertable +------------------------------ + (24,regress_rls_schema,r2,t) +(1 row) + +INSERT INTO r1 VALUES (10), (20); +INSERT INTO r2 VALUES (10), (20); +GRANT ALL ON r1, r2 TO regress_rls_bob; +CREATE POLICY p1 ON r1 USING (true); +ALTER TABLE r1 ENABLE ROW LEVEL SECURITY; +CREATE POLICY p1 ON r2 FOR SELECT USING (true); +CREATE POLICY p2 ON r2 FOR INSERT WITH CHECK (false); +CREATE POLICY p3 ON r2 FOR UPDATE USING (false); +CREATE POLICY p4 ON r2 FOR DELETE USING (false); +ALTER TABLE r2 ENABLE ROW LEVEL SECURITY; +SET SESSION AUTHORIZATION regress_rls_bob; +SELECT * FROM r1; + a +---- + 10 + 20 +(2 rows) + +SELECT * FROM r2; + a +---- + 10 + 20 +(2 rows) + +-- r2 is read-only +INSERT INTO r2 VALUES (2); -- Not allowed +ERROR: new row violates row-level security policy for table "r2" +\pset tuples_only 1 +UPDATE r2 SET a = 2 RETURNING *; -- Updates nothing + +DELETE FROM r2 RETURNING *; -- Deletes nothing + +\pset tuples_only 0 +-- r2 can be used as a non-target relation in DML +INSERT INTO r1 SELECT a + 1 FROM r2 RETURNING *; -- OK + a +---- + 11 + 21 +(2 rows) + +UPDATE r1 SET a = r2.a + 2 FROM r2 WHERE r1.a = r2.a RETURNING *; -- OK +ERROR: new row for relation "_hyper_23_105_chunk" violates check constraint "constraint_105" +DELETE FROM r1 USING r2 WHERE r1.a = r2.a + 2 RETURNING *; -- OK + a | a +---+--- +(0 rows) + +SELECT * FROM r1; + a +---- + 10 + 11 + 20 + 21 +(4 rows) + +SELECT * FROM r2; + a +---- + 10 + 20 +(2 rows) + +SET SESSION AUTHORIZATION regress_rls_alice; +DROP TABLE r1; +DROP TABLE r2; +-- +-- FORCE ROW LEVEL SECURITY applies RLS to owners too +-- +SET SESSION AUTHORIZATION regress_rls_alice; +SET row_security = on; +CREATE TABLE r1 (a int); +SELECT public.create_hypertable('r1', 'a', chunk_time_interval=>2); +NOTICE: adding not-null constraint to column "a" +DETAIL: Dimensions cannot have NULL values. + create_hypertable +------------------------------ + (25,regress_rls_schema,r1,t) +(1 row) + +INSERT INTO r1 VALUES (10), (20); +CREATE POLICY p1 ON r1 USING (false); +ALTER TABLE r1 ENABLE ROW LEVEL SECURITY; +ALTER TABLE r1 FORCE ROW LEVEL SECURITY; +-- No error, but no rows +TABLE r1; + a +--- +(0 rows) + +-- RLS error +INSERT INTO r1 VALUES (1); +ERROR: new row violates row-level security policy for table "r1" +-- No error (unable to see any rows to update) +UPDATE r1 SET a = 1; +TABLE r1; + a +--- +(0 rows) + +-- No error (unable to see any rows to delete) +DELETE FROM r1; +TABLE r1; + a +--- +(0 rows) + +SET row_security = off; +-- these all fail, would be affected by RLS +TABLE r1; +ERROR: query would be affected by row-level security policy for table "r1" +HINT: To disable the policy for the table's owner, use ALTER TABLE NO FORCE ROW LEVEL SECURITY. +UPDATE r1 SET a = 1; +ERROR: query would be affected by row-level security policy for table "r1" +HINT: To disable the policy for the table's owner, use ALTER TABLE NO FORCE ROW LEVEL SECURITY. +DELETE FROM r1; +ERROR: query would be affected by row-level security policy for table "r1" +HINT: To disable the policy for the table's owner, use ALTER TABLE NO FORCE ROW LEVEL SECURITY. +DROP TABLE r1; +-- +-- FORCE ROW LEVEL SECURITY does not break RI +-- +SET SESSION AUTHORIZATION regress_rls_alice; +SET row_security = on; +CREATE TABLE r1 (a int PRIMARY KEY); +-- r1 is not a hypertable since r1.a is referenced by r2 +CREATE TABLE r2 (a int REFERENCES r1); +SELECT public.create_hypertable('r2', 'a', chunk_time_interval=>2); +NOTICE: adding not-null constraint to column "a" +DETAIL: Dimensions cannot have NULL values. + create_hypertable +------------------------------ + (26,regress_rls_schema,r2,t) +(1 row) + +INSERT INTO r1 VALUES (10), (20); +INSERT INTO r2 VALUES (10), (20); +-- Create policies on r2 which prevent the +-- owner from seeing any rows, but RI should +-- still see them. +CREATE POLICY p1 ON r2 USING (false); +ALTER TABLE r2 ENABLE ROW LEVEL SECURITY; +ALTER TABLE r2 FORCE ROW LEVEL SECURITY; +-- Errors due to rows in r2 +DELETE FROM r1; +ERROR: update or delete on table "r1" violates foreign key constraint "113_23_r2_a_fkey" on table "_hyper_26_113_chunk" +DETAIL: Key (a)=(10) is still referenced from table "_hyper_26_113_chunk". +-- Reset r2 to no-RLS +DROP POLICY p1 ON r2; +ALTER TABLE r2 NO FORCE ROW LEVEL SECURITY; +ALTER TABLE r2 DISABLE ROW LEVEL SECURITY; +-- clean out r2 for INSERT test below +DELETE FROM r2; +-- Change r1 to not allow rows to be seen +CREATE POLICY p1 ON r1 USING (false); +ALTER TABLE r1 ENABLE ROW LEVEL SECURITY; +ALTER TABLE r1 FORCE ROW LEVEL SECURITY; +-- No rows seen +TABLE r1; + a +--- +(0 rows) + +-- No error, RI still sees that row exists in r1 +INSERT INTO r2 VALUES (10); +DROP TABLE r2; +DROP TABLE r1; +-- Ensure cascaded DELETE works +CREATE TABLE r1 (a int PRIMARY KEY); +-- r1 is not a hypertable since r1.a is referenced by r2 +CREATE TABLE r2 (a int REFERENCES r1 ON DELETE CASCADE); +SELECT public.create_hypertable('r2', 'a', chunk_time_interval=>2); +NOTICE: adding not-null constraint to column "a" +DETAIL: Dimensions cannot have NULL values. + create_hypertable +------------------------------ + (27,regress_rls_schema,r2,t) +(1 row) + +INSERT INTO r1 VALUES (10), (20); +INSERT INTO r2 VALUES (10), (20); +-- Create policies on r2 which prevent the +-- owner from seeing any rows, but RI should +-- still see them. +CREATE POLICY p1 ON r2 USING (false); +ALTER TABLE r2 ENABLE ROW LEVEL SECURITY; +ALTER TABLE r2 FORCE ROW LEVEL SECURITY; +-- Deletes all records from both +DELETE FROM r1; +-- Remove FORCE from r2 +ALTER TABLE r2 NO FORCE ROW LEVEL SECURITY; +-- As owner, we now bypass RLS +-- verify no rows in r2 now +TABLE r2; + a +--- +(0 rows) + +DROP TABLE r2; +DROP TABLE r1; +-- Ensure cascaded UPDATE works +CREATE TABLE r1 (a int PRIMARY KEY); +-- r1 is not a hypertable since r1.a is referenced by r2 +CREATE TABLE r2 (a int REFERENCES r1 ON UPDATE CASCADE); +SELECT public.create_hypertable('r2', 'a', chunk_time_interval=>2); +NOTICE: adding not-null constraint to column "a" +DETAIL: Dimensions cannot have NULL values. + create_hypertable +------------------------------ + (28,regress_rls_schema,r2,t) +(1 row) + +INSERT INTO r1 VALUES (10), (20); +INSERT INTO r2 VALUES (10), (20); +-- Create policies on r2 which prevent the +-- owner from seeing any rows, but RI should +-- still see them. +CREATE POLICY p1 ON r2 USING (false); +ALTER TABLE r2 ENABLE ROW LEVEL SECURITY; +ALTER TABLE r2 FORCE ROW LEVEL SECURITY; +-- Updates records in both +UPDATE r1 SET a = a+5; +ERROR: new row for relation "_hyper_28_117_chunk" violates check constraint "constraint_117" +DETAIL: Failing row contains (15). +CONTEXT: SQL statement "UPDATE ONLY "_timescaledb_internal"."_hyper_28_117_chunk" SET "a" = $1 WHERE $2 OPERATOR(pg_catalog.=) "a"" +-- Remove FORCE from r2 +ALTER TABLE r2 NO FORCE ROW LEVEL SECURITY; +-- As owner, we now bypass RLS +-- verify records in r2 updated +TABLE r2; + a +---- + 10 + 20 +(2 rows) + +DROP TABLE r2; +DROP TABLE r1; +-- +-- Test INSERT+RETURNING applies SELECT policies as +-- WithCheckOptions (meaning an error is thrown) +-- +SET SESSION AUTHORIZATION regress_rls_alice; +SET row_security = on; +CREATE TABLE r1 (a int); +SELECT public.create_hypertable('r1', 'a', chunk_time_interval=>2); +NOTICE: adding not-null constraint to column "a" +DETAIL: Dimensions cannot have NULL values. + create_hypertable +------------------------------ + (29,regress_rls_schema,r1,t) +(1 row) + +CREATE POLICY p1 ON r1 FOR SELECT USING (false); +CREATE POLICY p2 ON r1 FOR INSERT WITH CHECK (true); +ALTER TABLE r1 ENABLE ROW LEVEL SECURITY; +ALTER TABLE r1 FORCE ROW LEVEL SECURITY; +-- Works fine +INSERT INTO r1 VALUES (10), (20); +-- No error, but no rows +TABLE r1; + a +--- +(0 rows) + +SET row_security = off; +-- fail, would be affected by RLS +TABLE r1; +ERROR: query would be affected by row-level security policy for table "r1" +HINT: To disable the policy for the table's owner, use ALTER TABLE NO FORCE ROW LEVEL SECURITY. +SET row_security = on; +-- Error +INSERT INTO r1 VALUES (10), (20) RETURNING *; +ERROR: new row violates row-level security policy for table "r1" +DROP TABLE r1; +-- +-- Test UPDATE+RETURNING applies SELECT policies as +-- WithCheckOptions (meaning an error is thrown) +-- +SET SESSION AUTHORIZATION regress_rls_alice; +SET row_security = on; +CREATE TABLE r1 (a int PRIMARY KEY); +SELECT public.create_hypertable('r1', 'a', chunk_time_interval=>100); + create_hypertable +------------------------------ + (30,regress_rls_schema,r1,t) +(1 row) + +CREATE POLICY p1 ON r1 FOR SELECT USING (a < 20); +CREATE POLICY p2 ON r1 FOR UPDATE USING (a < 20) WITH CHECK (true); +CREATE POLICY p3 ON r1 FOR INSERT WITH CHECK (true); +INSERT INTO r1 VALUES (10); +ALTER TABLE r1 ENABLE ROW LEVEL SECURITY; +ALTER TABLE r1 FORCE ROW LEVEL SECURITY; +-- Works fine +UPDATE r1 SET a = 30; +-- Show updated rows +ALTER TABLE r1 NO FORCE ROW LEVEL SECURITY; +TABLE r1; + a +---- + 30 +(1 row) + +-- reset value in r1 for test with RETURNING +UPDATE r1 SET a = 10; +-- Verify row reset +TABLE r1; + a +---- + 10 +(1 row) + +ALTER TABLE r1 FORCE ROW LEVEL SECURITY; +-- Error +UPDATE r1 SET a = 30 RETURNING *; +ERROR: new row violates row-level security policy for table "r1" +-- UPDATE path of INSERT ... ON CONFLICT DO UPDATE should also error out +INSERT INTO r1 VALUES (10) + ON CONFLICT (a) DO UPDATE SET a = 30 RETURNING *; +ERROR: new row violates row-level security policy for table "r1" +-- Should still error out without RETURNING (use of arbiter always requires +-- SELECT permissions) +INSERT INTO r1 VALUES (10) + ON CONFLICT (a) DO UPDATE SET a = 30; +ERROR: new row violates row-level security policy for table "r1" +-- ON CONFLICT ON CONSTRAINT +INSERT INTO r1 VALUES (10) + ON CONFLICT ON CONSTRAINT r1_pkey DO UPDATE SET a = 30; +ERROR: new row violates row-level security policy for table "r1" +DROP TABLE r1; +-- Check dependency handling +RESET SESSION AUTHORIZATION; +CREATE TABLE dep1 (c1 int); +SELECT public.create_hypertable('dep1', 'c1', chunk_time_interval=>2); +NOTICE: adding not-null constraint to column "c1" +DETAIL: Dimensions cannot have NULL values. + create_hypertable +-------------------------------- + (31,regress_rls_schema,dep1,t) +(1 row) + +CREATE TABLE dep2 (c1 int); +SELECT public.create_hypertable('dep2', 'c1', chunk_time_interval=>2); +NOTICE: adding not-null constraint to column "c1" +DETAIL: Dimensions cannot have NULL values. + create_hypertable +-------------------------------- + (32,regress_rls_schema,dep2,t) +(1 row) + +CREATE POLICY dep_p1 ON dep1 TO regress_rls_bob USING (c1 > (select max(dep2.c1) from dep2)); +ALTER POLICY dep_p1 ON dep1 TO regress_rls_bob,regress_rls_carol; +-- Should return one +SELECT count(*) = 1 FROM pg_depend + WHERE objid = (SELECT oid FROM pg_policy WHERE polname = 'dep_p1') + AND refobjid = (SELECT oid FROM pg_class WHERE relname = 'dep2'); + ?column? +---------- + t +(1 row) + +ALTER POLICY dep_p1 ON dep1 USING (true); +-- Should return one +SELECT count(*) = 1 FROM pg_shdepend + WHERE objid = (SELECT oid FROM pg_policy WHERE polname = 'dep_p1') + AND refobjid = (SELECT oid FROM pg_authid WHERE rolname = 'regress_rls_bob'); + ?column? +---------- + t +(1 row) + +-- Should return one +SELECT count(*) = 1 FROM pg_shdepend + WHERE objid = (SELECT oid FROM pg_policy WHERE polname = 'dep_p1') + AND refobjid = (SELECT oid FROM pg_authid WHERE rolname = 'regress_rls_carol'); + ?column? +---------- + t +(1 row) + +-- Should return zero +SELECT count(*) = 0 FROM pg_depend + WHERE objid = (SELECT oid FROM pg_policy WHERE polname = 'dep_p1') + AND refobjid = (SELECT oid FROM pg_class WHERE relname = 'dep2'); + ?column? +---------- + t +(1 row) + +-- DROP OWNED BY testing +RESET SESSION AUTHORIZATION; +CREATE ROLE regress_rls_dob_role1; +CREATE ROLE regress_rls_dob_role2; +CREATE TABLE dob_t1 (c1 int); +SELECT public.create_hypertable('dob_t1', 'c1', chunk_time_interval=>2); +NOTICE: adding not-null constraint to column "c1" +DETAIL: Dimensions cannot have NULL values. + create_hypertable +---------------------------------- + (33,regress_rls_schema,dob_t1,t) +(1 row) + +CREATE TABLE dob_t2 (c1 int) PARTITION BY RANGE (c1); +CREATE POLICY p1 ON dob_t1 TO regress_rls_dob_role1 USING (true); +DROP OWNED BY regress_rls_dob_role1; +DROP POLICY p1 ON dob_t1; -- should fail, already gone +ERROR: policy "p1" for table "dob_t1" does not exist +CREATE POLICY p1 ON dob_t1 TO regress_rls_dob_role1,regress_rls_dob_role2 USING (true); +DROP OWNED BY regress_rls_dob_role1; +DROP POLICY p1 ON dob_t1; -- should succeed +CREATE POLICY p1 ON dob_t2 TO regress_rls_dob_role1,regress_rls_dob_role2 USING (true); +DROP OWNED BY regress_rls_dob_role1; +DROP POLICY p1 ON dob_t2; -- should succeed +DROP USER regress_rls_dob_role1; +DROP USER regress_rls_dob_role2; +-- +-- Clean up objects +-- +RESET SESSION AUTHORIZATION; +\set VERBOSITY terse \\ -- suppress cascade details +DROP SCHEMA regress_rls_schema CASCADE; +NOTICE: drop cascades to 116 other objects +\set VERBOSITY default +DROP USER regress_rls_alice; +DROP USER regress_rls_bob; +DROP USER regress_rls_carol; +DROP USER regress_rls_dave; +DROP USER regress_rls_exempt_user; +DROP ROLE regress_rls_group1; +DROP ROLE regress_rls_group2; +-- Arrange to have a few policies left over, for testing +-- pg_dump/pg_restore +CREATE SCHEMA regress_rls_schema; +CREATE TABLE rls_tbl (c1 int); +SELECT public.create_hypertable('rls_tbl', 'c1', chunk_time_interval=>2); +NOTICE: adding not-null constraint to column "c1" +DETAIL: Dimensions cannot have NULL values. + create_hypertable +----------------------------------- + (34,regress_rls_schema,rls_tbl,t) +(1 row) + +ALTER TABLE rls_tbl ENABLE ROW LEVEL SECURITY; +CREATE POLICY p1 ON rls_tbl USING (c1 > 5); +CREATE POLICY p2 ON rls_tbl FOR SELECT USING (c1 <= 3); +CREATE POLICY p3 ON rls_tbl FOR UPDATE USING (c1 <= 3) WITH CHECK (c1 > 5); +CREATE POLICY p4 ON rls_tbl FOR DELETE USING (c1 <= 3); +CREATE TABLE rls_tbl_force (c1 int); +SELECT public.create_hypertable('rls_tbl_force', 'c1', chunk_time_interval=>2); +NOTICE: adding not-null constraint to column "c1" +DETAIL: Dimensions cannot have NULL values. + create_hypertable +----------------------------------------- + (35,regress_rls_schema,rls_tbl_force,t) +(1 row) + +ALTER TABLE rls_tbl_force ENABLE ROW LEVEL SECURITY; +ALTER TABLE rls_tbl_force FORCE ROW LEVEL SECURITY; +CREATE POLICY p1 ON rls_tbl_force USING (c1 = 5) WITH CHECK (c1 < 5); +CREATE POLICY p2 ON rls_tbl_force FOR SELECT USING (c1 = 8); +CREATE POLICY p3 ON rls_tbl_force FOR UPDATE USING (c1 = 8) WITH CHECK (c1 >= 5); +CREATE POLICY p4 ON rls_tbl_force FOR DELETE USING (c1 = 8); diff --git a/test/expected/update-16.out b/test/expected/update-16.out new file mode 100644 index 00000000000..4c15d6a45d5 --- /dev/null +++ b/test/expected/update-16.out @@ -0,0 +1,164 @@ +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +\o /dev/null +\ir include/insert_single.sql +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +CREATE TABLE PUBLIC."one_Partition" ( + "timeCustom" BIGINT NOT NULL, + device_id TEXT NOT NULL, + series_0 DOUBLE PRECISION NULL, + series_1 DOUBLE PRECISION NULL, + series_2 DOUBLE PRECISION NULL, + series_bool BOOLEAN NULL +); +CREATE INDEX ON PUBLIC."one_Partition" (device_id, "timeCustom" DESC NULLS LAST) WHERE device_id IS NOT NULL; +CREATE INDEX ON PUBLIC."one_Partition" ("timeCustom" DESC NULLS LAST, series_0) WHERE series_0 IS NOT NULL; +CREATE INDEX ON PUBLIC."one_Partition" ("timeCustom" DESC NULLS LAST, series_1) WHERE series_1 IS NOT NULL; +CREATE INDEX ON PUBLIC."one_Partition" ("timeCustom" DESC NULLS LAST, series_2) WHERE series_2 IS NOT NULL; +CREATE INDEX ON PUBLIC."one_Partition" ("timeCustom" DESC NULLS LAST, series_bool) WHERE series_bool IS NOT NULL; +\c :DBNAME :ROLE_SUPERUSER +CREATE SCHEMA "one_Partition" AUTHORIZATION :ROLE_DEFAULT_PERM_USER; +\c :DBNAME :ROLE_DEFAULT_PERM_USER; +SELECT * FROM create_hypertable('"public"."one_Partition"', 'timeCustom', associated_schema_name=>'one_Partition', chunk_time_interval=>_timescaledb_functions.interval_to_usec('1 month')); +--output command tags +\set QUIET off +BEGIN; +\COPY "one_Partition" FROM 'data/ds1_dev1_1.tsv' NULL AS ''; +COMMIT; +INSERT INTO "one_Partition"("timeCustom", device_id, series_0, series_1) VALUES +(1257987600000000000, 'dev1', 1.5, 1), +(1257987600000000000, 'dev1', 1.5, 2), +(1257894000000000000, 'dev2', 1.5, 1), +(1257894002000000000, 'dev1', 2.5, 3); +INSERT INTO "one_Partition"("timeCustom", device_id, series_0, series_1) VALUES +(1257894000000000000, 'dev2', 1.5, 2); +\set QUIET on +\o +-- Make sure UPDATE isn't optimized if it includes Append plans +-- Need to turn of nestloop to make append appear the same on PG96 and PG10 +set enable_nestloop = 'off'; +CREATE OR REPLACE FUNCTION series_val() +RETURNS integer LANGUAGE PLPGSQL STABLE AS +$BODY$ +BEGIN + RETURN 5; +END; +$BODY$; +-- ConstraintAwareAppend applied for SELECT +EXPLAIN (costs off) +SELECT FROM "one_Partition" +WHERE series_1 IN (SELECT series_1 FROM "one_Partition" WHERE series_1 > series_val()); + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------- + Hash Join + Hash Cond: ("one_Partition".series_1 = "one_Partition_1".series_1) + -> Custom Scan (ChunkAppend) on "one_Partition" + Chunks excluded during startup: 0 + -> Index Only Scan using "_hyper_1_1_chunk_one_Partition_timeCustom_series_1_idx" on _hyper_1_1_chunk + Index Cond: (series_1 > (series_val())::double precision) + -> Index Only Scan using "_hyper_1_2_chunk_one_Partition_timeCustom_series_1_idx" on _hyper_1_2_chunk + Index Cond: (series_1 > (series_val())::double precision) + -> Index Only Scan using "_hyper_1_3_chunk_one_Partition_timeCustom_series_1_idx" on _hyper_1_3_chunk + Index Cond: (series_1 > (series_val())::double precision) + -> Hash + -> HashAggregate + Group Key: "one_Partition_1".series_1 + -> Custom Scan (ChunkAppend) on "one_Partition" "one_Partition_1" + Chunks excluded during startup: 0 + -> Index Only Scan using "_hyper_1_1_chunk_one_Partition_timeCustom_series_1_idx" on _hyper_1_1_chunk _hyper_1_1_chunk_1 + Index Cond: (series_1 > (series_val())::double precision) + -> Index Only Scan using "_hyper_1_2_chunk_one_Partition_timeCustom_series_1_idx" on _hyper_1_2_chunk _hyper_1_2_chunk_1 + Index Cond: (series_1 > (series_val())::double precision) + -> Index Only Scan using "_hyper_1_3_chunk_one_Partition_timeCustom_series_1_idx" on _hyper_1_3_chunk _hyper_1_3_chunk_1 + Index Cond: (series_1 > (series_val())::double precision) +(21 rows) + +-- ConstraintAwareAppend NOT applied for UPDATE +EXPLAIN (costs off) +UPDATE "one_Partition" +SET series_1 = 8 +WHERE series_1 IN (SELECT series_1 FROM "one_Partition" WHERE series_1 > series_val()); + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Update on "one_Partition" + Update on _hyper_1_1_chunk "one_Partition_2" + Update on _hyper_1_2_chunk "one_Partition_3" + Update on _hyper_1_3_chunk "one_Partition_4" + -> Hash Join + Hash Cond: ("one_Partition".series_1 = "one_Partition_1".series_1) + -> Append + -> Seq Scan on _hyper_1_1_chunk "one_Partition_2" + -> Seq Scan on _hyper_1_2_chunk "one_Partition_3" + -> Seq Scan on _hyper_1_3_chunk "one_Partition_4" + -> Hash + -> HashAggregate + Group Key: "one_Partition_1".series_1 + -> Append + -> Index Scan using "_hyper_1_1_chunk_one_Partition_timeCustom_series_1_idx" on _hyper_1_1_chunk "one_Partition_5" + Index Cond: (series_1 > (series_val())::double precision) + -> Index Scan using "_hyper_1_2_chunk_one_Partition_timeCustom_series_1_idx" on _hyper_1_2_chunk "one_Partition_6" + Index Cond: (series_1 > (series_val())::double precision) + -> Index Scan using "_hyper_1_3_chunk_one_Partition_timeCustom_series_1_idx" on _hyper_1_3_chunk "one_Partition_7" + Index Cond: (series_1 > (series_val())::double precision) +(21 rows) + +SELECT * FROM "one_Partition" ORDER BY "timeCustom", device_id, series_0, series_1, series_2; + timeCustom | device_id | series_0 | series_1 | series_2 | series_bool +---------------------+-----------+----------+----------+----------+------------- + 1257894000000000000 | dev1 | 1.5 | 1 | 2 | t + 1257894000000000000 | dev1 | 1.5 | 2 | | + 1257894000000000000 | dev2 | 1.5 | 1 | | + 1257894000000000000 | dev2 | 1.5 | 2 | | + 1257894000000001000 | dev1 | 2.5 | 3 | | + 1257894001000000000 | dev1 | 3.5 | 4 | | + 1257894002000000000 | dev1 | 2.5 | 3 | | + 1257894002000000000 | dev1 | 5.5 | 6 | | t + 1257894002000000000 | dev1 | 5.5 | 7 | | f + 1257897600000000000 | dev1 | 4.5 | 5 | | f + 1257987600000000000 | dev1 | 1.5 | 1 | | + 1257987600000000000 | dev1 | 1.5 | 2 | | +(12 rows) + +UPDATE "one_Partition" +SET series_1 = 8 +WHERE series_1 IN (SELECT series_1 FROM "one_Partition" WHERE series_1 > series_val()); +SELECT * FROM "one_Partition" ORDER BY "timeCustom", device_id, series_0, series_1, series_2; + timeCustom | device_id | series_0 | series_1 | series_2 | series_bool +---------------------+-----------+----------+----------+----------+------------- + 1257894000000000000 | dev1 | 1.5 | 1 | 2 | t + 1257894000000000000 | dev1 | 1.5 | 2 | | + 1257894000000000000 | dev2 | 1.5 | 1 | | + 1257894000000000000 | dev2 | 1.5 | 2 | | + 1257894000000001000 | dev1 | 2.5 | 3 | | + 1257894001000000000 | dev1 | 3.5 | 4 | | + 1257894002000000000 | dev1 | 2.5 | 3 | | + 1257894002000000000 | dev1 | 5.5 | 8 | | f + 1257894002000000000 | dev1 | 5.5 | 8 | | t + 1257897600000000000 | dev1 | 4.5 | 5 | | f + 1257987600000000000 | dev1 | 1.5 | 1 | | + 1257987600000000000 | dev1 | 1.5 | 2 | | +(12 rows) + +UPDATE "one_Partition" SET series_1 = 47; +UPDATE "one_Partition" SET series_bool = true; +SELECT * FROM "one_Partition" ORDER BY "timeCustom", device_id, series_0, series_1, series_2; + timeCustom | device_id | series_0 | series_1 | series_2 | series_bool +---------------------+-----------+----------+----------+----------+------------- + 1257894000000000000 | dev1 | 1.5 | 47 | 2 | t + 1257894000000000000 | dev1 | 1.5 | 47 | | t + 1257894000000000000 | dev2 | 1.5 | 47 | | t + 1257894000000000000 | dev2 | 1.5 | 47 | | t + 1257894000000001000 | dev1 | 2.5 | 47 | | t + 1257894001000000000 | dev1 | 3.5 | 47 | | t + 1257894002000000000 | dev1 | 2.5 | 47 | | t + 1257894002000000000 | dev1 | 5.5 | 47 | | t + 1257894002000000000 | dev1 | 5.5 | 47 | | t + 1257897600000000000 | dev1 | 4.5 | 47 | | t + 1257987600000000000 | dev1 | 1.5 | 47 | | t + 1257987600000000000 | dev1 | 1.5 | 47 | | t +(12 rows) + diff --git a/test/t/001_extension.pl b/test/t/001_extension.pl index 7a95720b1bf..a895193340f 100644 --- a/test/t/001_extension.pl +++ b/test/t/001_extension.pl @@ -20,8 +20,17 @@ my $in = ''; my $out = ''; my $timer = IPC::Run::timeout(180); -my $h = - $node->background_psql('postgres', \$in, \$out, $timer, on_error_stop => 0); +my $h; + +if ($ENV{PG_VERSION_MAJOR} >= 16) +{ + $h = $node->background_psql('postgres', on_error_stop => 0); +} +else +{ + $h = $node->background_psql('postgres', \$in, \$out, $timer, + on_error_stop => 0); +} sub check_extension_state { @@ -80,9 +89,17 @@ sub check_extension_state "state is \"created\" after extension is created in other backend"); # Quit the interactive psql session -$in .= q{ - \q -}; +if ($ENV{PG_VERSION_MAJOR} >= 16) +{ + $h->quit or die "psql returned $?"; +} +else +{ + $in .= q{ + \q + }; + + $h->finish or die "psql returned $?"; +} -$h->finish or die "psql returned $?"; $node->stop; diff --git a/tsl/src/continuous_aggs/finalize.c b/tsl/src/continuous_aggs/finalize.c index abc9e44bb52..9fe1854e132 100644 --- a/tsl/src/continuous_aggs/finalize.c +++ b/tsl/src/continuous_aggs/finalize.c @@ -616,6 +616,9 @@ finalizequery_get_select_query(FinalizeQueryInfo *inp, List *matcollist, RTEPermissionInfo *perminfo; #endif + CAGG_MAKEQUERY(final_selquery, inp->final_userquery); + final_selquery->hasAggs = !inp->finalized; + /* * For initial cagg creation rtable will have only 1 entry, * for alter table rtable will have multiple entries with our @@ -634,6 +637,7 @@ finalizequery_get_select_query(FinalizeQueryInfo *inp, List *matcollist, rte->inh = true; rte->rellockmode = 1; rte->eref = copyObject(rte->alias); + rte->relid = mattbladdress->objectId; #if PG16_GE perminfo = addRTEPermissionInfo(&final_selquery->rteperminfos, rte); perminfo->selectedCols = NULL; @@ -735,8 +739,6 @@ finalizequery_get_select_query(FinalizeQueryInfo *inp, List *matcollist, } } - CAGG_MAKEQUERY(final_selquery, inp->final_userquery); - final_selquery->hasAggs = !inp->finalized; if (list_length(inp->final_userquery->jointree->fromlist) >= CONTINUOUS_AGG_MAX_JOIN_RELATIONS || !IsA(linitial(inp->final_userquery->jointree->fromlist), RangeTblRef)) diff --git a/tsl/src/fdw/modify_plan.c b/tsl/src/fdw/modify_plan.c index ad86638721c..5a9dc8fafd2 100644 --- a/tsl/src/fdw/modify_plan.c +++ b/tsl/src/fdw/modify_plan.c @@ -192,7 +192,7 @@ fdw_plan_foreign_modify(PlannerInfo *root, ModifyTable *plan, Index result_relat #if PG16_LT Bitmapset *updatedCols = rte->updatedCols; #else - Bitmapset *updatedCols; + Bitmapset *updatedCols = NULL; if (rte->perminfoindex > 0) { RTEPermissionInfo *perminfo = getRTEPermissionInfo(root->parse->rteperminfos, rte); diff --git a/tsl/test/expected/cagg_bgw.out b/tsl/test/expected/cagg_bgw-13.out similarity index 100% rename from tsl/test/expected/cagg_bgw.out rename to tsl/test/expected/cagg_bgw-13.out diff --git a/tsl/test/expected/cagg_bgw-14.out b/tsl/test/expected/cagg_bgw-14.out new file mode 100644 index 00000000000..0b16ab36fc4 --- /dev/null +++ b/tsl/test/expected/cagg_bgw-14.out @@ -0,0 +1,702 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\set IS_DISTRIBUTED FALSE +\ir include/cagg_bgw_common.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- +-- Setup +-- +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER +CREATE OR REPLACE FUNCTION ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(timeout INT = -1, mock_start_time INT = 0) RETURNS VOID +AS :MODULE_PATHNAME LANGUAGE C VOLATILE; +CREATE OR REPLACE FUNCTION ts_bgw_db_scheduler_test_run(timeout INT = -1, mock_start_time INT = 0) RETURNS VOID +AS :MODULE_PATHNAME LANGUAGE C VOLATILE; +CREATE OR REPLACE FUNCTION ts_bgw_db_scheduler_test_wait_for_scheduler_finish() RETURNS VOID +AS :MODULE_PATHNAME LANGUAGE C VOLATILE; +CREATE OR REPLACE FUNCTION ts_bgw_params_create() RETURNS VOID +AS :MODULE_PATHNAME LANGUAGE C VOLATILE; +CREATE OR REPLACE FUNCTION ts_bgw_params_destroy() RETURNS VOID +AS :MODULE_PATHNAME LANGUAGE C VOLATILE; +CREATE OR REPLACE FUNCTION ts_bgw_params_reset_time(set_time BIGINT = 0, wait BOOLEAN = false) RETURNS VOID +AS :MODULE_PATHNAME LANGUAGE C VOLATILE; +--test that this all works under the community license +ALTER DATABASE :TEST_DBNAME SET timescaledb.license_key='Community'; +--create a function with no permissions to execute +CREATE FUNCTION get_constant_no_perms() RETURNS INTEGER LANGUAGE SQL IMMUTABLE AS +$BODY$ + SELECT 10; +$BODY$; +REVOKE EXECUTE ON FUNCTION get_constant_no_perms() FROM PUBLIC; +\set WAIT_ON_JOB 0 +\set IMMEDIATELY_SET_UNTIL 1 +\set WAIT_FOR_OTHER_TO_ADVANCE 2 +CREATE OR REPLACE FUNCTION ts_bgw_params_mock_wait_returns_immediately(new_val INTEGER) RETURNS VOID +AS :MODULE_PATHNAME LANGUAGE C VOLATILE; +-- Remove any default jobs, e.g., telemetry +DELETE FROM _timescaledb_config.bgw_job WHERE TRUE; +TRUNCATE _timescaledb_internal.bgw_job_stat; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +CREATE TABLE public.bgw_log( + msg_no INT, + mock_time BIGINT, + application_name TEXT, + msg TEXT +); +CREATE VIEW sorted_bgw_log AS + SELECT msg_no, + mock_time, + application_name, + regexp_replace(regexp_replace(msg, '(Wait until|started at|execution time) [0-9]+(\.[0-9]+)?', '\1 (RANDOM)', 'g'), 'background worker "[^"]+"','connection') AS msg + FROM bgw_log ORDER BY mock_time, application_name COLLATE "C", msg_no; +CREATE TABLE public.bgw_dsm_handle_store( + handle BIGINT +); +INSERT INTO public.bgw_dsm_handle_store VALUES (0); +SELECT ts_bgw_params_create(); + ts_bgw_params_create +---------------------- + +(1 row) + +SELECT * FROM _timescaledb_config.bgw_job; + id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone +----+------------------+-------------------+-------------+-------------+--------------+-------------+-----------+-------+-----------+----------------+---------------+---------------+--------+--------------+------------+---------- +(0 rows) + +SELECT * FROM timescaledb_information.job_stats; + hypertable_schema | hypertable_name | job_id | last_run_started_at | last_successful_finish | last_run_status | job_status | last_run_duration | next_start | total_runs | total_successes | total_failures +-------------------+-----------------+--------+---------------------+------------------------+-----------------+------------+-------------------+------------+------------+-----------------+---------------- +(0 rows) + +SELECT * FROM _timescaledb_catalog.continuous_agg; + mat_hypertable_id | raw_hypertable_id | parent_mat_hypertable_id | user_view_schema | user_view_name | partial_view_schema | partial_view_name | bucket_width | direct_view_schema | direct_view_name | materialized_only | finalized +-------------------+-------------------+--------------------------+------------------+----------------+---------------------+-------------------+--------------+--------------------+------------------+-------------------+----------- +(0 rows) + +-- though user on access node has required GRANTS, this will propagate GRANTS to the connected data nodes +GRANT CREATE ON SCHEMA public TO :ROLE_DEFAULT_PERM_USER; +psql:include/cagg_bgw_common.sql:76: WARNING: no privileges were granted for "public" +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +CREATE TABLE test_continuous_agg_table(time int, data int); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('test_continuous_agg_table', 'time', chunk_time_interval => 10, replication_factor => 2); +\else +SELECT create_hypertable('test_continuous_agg_table', 'time', chunk_time_interval => 10); +psql:include/cagg_bgw_common.sql:82: NOTICE: adding not-null constraint to column "time" + create_hypertable +---------------------------------------- + (1,public,test_continuous_agg_table,t) +(1 row) + +\endif +CREATE OR REPLACE FUNCTION integer_now_test() returns int LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), 0) FROM test_continuous_agg_table $$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ +CREATE OR REPLACE FUNCTION integer_now_test() returns int LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), 0) FROM test_continuous_agg_table $$; +$DIST$); +\endif +SELECT set_integer_now_func('test_continuous_agg_table', 'integer_now_test'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE MATERIALIZED VIEW test_continuous_agg_view + WITH (timescaledb.continuous, timescaledb.materialized_only=true) + AS SELECT time_bucket('2', time), SUM(data) as value + FROM test_continuous_agg_table + GROUP BY 1 WITH NO DATA; +SELECT add_continuous_aggregate_policy('test_continuous_agg_view', NULL, 4::integer, '12 h'::interval); + add_continuous_aggregate_policy +--------------------------------- + 1000 +(1 row) + +SELECT id as raw_table_id FROM _timescaledb_catalog.hypertable WHERE table_name='test_continuous_agg_table' \gset +-- min distance from end should be 1 +SELECT mat_hypertable_id, user_view_schema, user_view_name, bucket_width +FROM _timescaledb_catalog.continuous_agg; + mat_hypertable_id | user_view_schema | user_view_name | bucket_width +-------------------+------------------+--------------------------+-------------- + 2 | public | test_continuous_agg_view | 2 +(1 row) + +SELECT mat_hypertable_id FROM _timescaledb_catalog.continuous_agg \gset +SELECT id AS job_id FROM _timescaledb_config.bgw_job where hypertable_id=:mat_hypertable_id \gset +-- job was created +SELECT * FROM _timescaledb_config.bgw_job where hypertable_id=:mat_hypertable_id; + id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone +------+--------------------------------------------+-------------------+-------------+-------------+--------------+------------------------+-------------------------------------+-------------------+-----------+----------------+---------------+---------------+-----------------------------------------------------------------+------------------------+-------------------------------------------+---------- + 1000 | Refresh Continuous Aggregate Policy [1000] | @ 12 hours | @ 0 | -1 | @ 12 hours | _timescaledb_functions | policy_refresh_continuous_aggregate | default_perm_user | t | f | | 2 | {"end_offset": 4, "start_offset": null, "mat_hypertable_id": 2} | _timescaledb_functions | policy_refresh_continuous_aggregate_check | +(1 row) + +-- create 10 time buckets +INSERT INTO test_continuous_agg_table + SELECT i, i FROM + (SELECT generate_series(0, 10) as i) AS j; +-- no stats +SELECT job_id, next_start, last_finish as until_next, last_run_success, total_runs, total_successes, total_failures, total_crashes + FROM _timescaledb_internal.bgw_job_stat + ORDER BY job_id; + job_id | next_start | until_next | last_run_success | total_runs | total_successes | total_failures | total_crashes +--------+------------+------------+------------------+------------+-----------------+----------------+--------------- +(0 rows) + +-- no data in view +SELECT * FROM test_continuous_agg_view ORDER BY 1; + time_bucket | value +-------------+------- +(0 rows) + +-- run first time +SELECT ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(25); + ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish +------------------------------------------------------------ + +(1 row) + +SELECT * FROM sorted_bgw_log; + msg_no | mock_time | application_name | msg +--------+-----------+--------------------------------------------+-------------------------------------------------------------------------------------------------------- + 0 | 0 | DB Scheduler | [TESTING] Registered new background worker + 1 | 0 | DB Scheduler | [TESTING] Wait until (RANDOM), started at (RANDOM) + 0 | 0 | Refresh Continuous Aggregate Policy [1000] | refreshing continuous aggregate "test_continuous_agg_view" in window [ -2147483648, 6 ] + 1 | 0 | Refresh Continuous Aggregate Policy [1000] | deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_2" + 2 | 0 | Refresh Continuous Aggregate Policy [1000] | inserted 3 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_2" + 3 | 0 | Refresh Continuous Aggregate Policy [1000] | job 1000 (Refresh Continuous Aggregate Policy [1000]) exiting with success: execution time (RANDOM) ms +(6 rows) + +SELECT * FROM _timescaledb_config.bgw_job where id=:job_id; + id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone +------+--------------------------------------------+-------------------+-------------+-------------+--------------+------------------------+-------------------------------------+-------------------+-----------+----------------+---------------+---------------+-----------------------------------------------------------------+------------------------+-------------------------------------------+---------- + 1000 | Refresh Continuous Aggregate Policy [1000] | @ 12 hours | @ 0 | -1 | @ 12 hours | _timescaledb_functions | policy_refresh_continuous_aggregate | default_perm_user | t | f | | 2 | {"end_offset": 4, "start_offset": null, "mat_hypertable_id": 2} | _timescaledb_functions | policy_refresh_continuous_aggregate_check | +(1 row) + +-- job ran once, successfully +SELECT job_id, next_start-last_finish as until_next, last_run_success, total_runs, total_successes, total_failures, total_crashes + FROM _timescaledb_internal.bgw_job_stat + where job_id=:job_id; + job_id | until_next | last_run_success | total_runs | total_successes | total_failures | total_crashes +--------+------------+------------------+------------+-----------------+----------------+--------------- + 1000 | @ 12 hours | t | 1 | 1 | 0 | 0 +(1 row) + +--clear log for next run of scheduler. +TRUNCATE public.bgw_log; +CREATE FUNCTION wait_for_timer_to_run(started_at INTEGER, spins INTEGER=:TEST_SPINWAIT_ITERS) RETURNS BOOLEAN LANGUAGE PLPGSQL AS +$BODY$ +DECLARE + num_runs INTEGER; + message TEXT; +BEGIN + select format('[TESTING] Wait until %%, started at %s', started_at) into message; + FOR i in 1..spins + LOOP + SELECT COUNT(*) from bgw_log where msg LIKE message INTO num_runs; + if (num_runs > 0) THEN + RETURN true; + ELSE + PERFORM pg_sleep(0.1); + END IF; + END LOOP; + RETURN false; +END +$BODY$; +CREATE FUNCTION wait_for_job_to_run(job_param_id INTEGER, expected_runs INTEGER, spins INTEGER=:TEST_SPINWAIT_ITERS) RETURNS BOOLEAN LANGUAGE PLPGSQL AS +$BODY$ +DECLARE + num_runs INTEGER; +BEGIN + FOR i in 1..spins + LOOP + SELECT total_successes FROM _timescaledb_internal.bgw_job_stat WHERE job_id=job_param_id INTO num_runs; + if (num_runs = expected_runs) THEN + RETURN true; + ELSEIF (num_runs > expected_runs) THEN + RAISE 'num_runs > expected'; + ELSE + PERFORM pg_sleep(0.1); + END IF; + END LOOP; + RETURN false; +END +$BODY$; +--make sure there is 1 job to start with +SELECT wait_for_job_to_run(:job_id, 1); + wait_for_job_to_run +--------------------- + t +(1 row) + +SELECT ts_bgw_params_mock_wait_returns_immediately(:WAIT_FOR_OTHER_TO_ADVANCE); + ts_bgw_params_mock_wait_returns_immediately +--------------------------------------------- + +(1 row) + +--start the scheduler on 0 time +SELECT ts_bgw_params_reset_time(0, true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +SELECT ts_bgw_db_scheduler_test_run(extract(epoch from interval '24 hour')::int * 1000, 0); + ts_bgw_db_scheduler_test_run +------------------------------ + +(1 row) + +SELECT wait_for_timer_to_run(0); + wait_for_timer_to_run +----------------------- + t +(1 row) + +--advance to 12:00 so that it runs one more time; now we know the +--scheduler has loaded up the job with the old schedule_interval +SELECT ts_bgw_params_reset_time(extract(epoch from interval '12 hour')::bigint * 1000000, true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +SELECT wait_for_job_to_run(:job_id, 2); + wait_for_job_to_run +--------------------- + t +(1 row) + +--advance clock 1us to make the scheduler realize the job is done +SELECT ts_bgw_params_reset_time((extract(epoch from interval '12 hour')::bigint * 1000000)+1, true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +--alter the refresh interval and check if next_start is altered +SELECT alter_job(:job_id, schedule_interval => '1m', retry_period => '1m'); + alter_job +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (1000,"@ 1 min","@ 0",-1,"@ 1 min",t,"{""end_offset"": 4, ""start_offset"": null, ""mat_hypertable_id"": 2}","Sat Jan 01 04:01:00 2000 PST",_timescaledb_functions.policy_refresh_continuous_aggregate_check,f,,) +(1 row) + +SELECT job_id, next_start - last_finish as until_next, total_runs +FROM _timescaledb_internal.bgw_job_stat +WHERE job_id=:job_id;; + job_id | until_next | total_runs +--------+------------+------------ + 1000 | @ 1 min | 2 +(1 row) + +--advance to 12:02, job should have run at 12:01 +SELECT ts_bgw_params_reset_time((extract(epoch from interval '12 hour')::bigint * 1000000)+(extract(epoch from interval '2 minute')::bigint * 1000000), true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +SELECT wait_for_job_to_run(:job_id, 3); + wait_for_job_to_run +--------------------- + t +(1 row) + +--next run in 1 minute +SELECT job_id, next_start-last_finish as until_next, total_runs +FROM _timescaledb_internal.bgw_job_stat +WHERE job_id=:job_id; + job_id | until_next | total_runs +--------+------------+------------ + 1000 | @ 1 min | 3 +(1 row) + +--change next run to be after 30s instead +SELECT (next_start - '30s'::interval) AS "NEW_NEXT_START" +FROM _timescaledb_internal.bgw_job_stat +WHERE job_id=:job_id \gset +SELECT alter_job(:job_id, next_start => :'NEW_NEXT_START'); + alter_job +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (1000,"@ 1 min","@ 0",-1,"@ 1 min",t,"{""end_offset"": 4, ""start_offset"": null, ""mat_hypertable_id"": 2}","Sat Jan 01 04:02:30 2000 PST",_timescaledb_functions.policy_refresh_continuous_aggregate_check,f,,) +(1 row) + +SELECT ts_bgw_params_reset_time((extract(epoch from interval '12 hour')::bigint * 1000000)+(extract(epoch from interval '2 minute 30 seconds')::bigint * 1000000), true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +SELECT wait_for_job_to_run(:job_id, 4); + wait_for_job_to_run +--------------------- + t +(1 row) + +--advance clock to quit scheduler +SELECT ts_bgw_params_reset_time(extract(epoch from interval '25 hour')::bigint * 1000000, true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +select ts_bgw_db_scheduler_test_wait_for_scheduler_finish(); + ts_bgw_db_scheduler_test_wait_for_scheduler_finish +---------------------------------------------------- + +(1 row) + +SELECT ts_bgw_params_mock_wait_returns_immediately(:WAIT_ON_JOB); + ts_bgw_params_mock_wait_returns_immediately +--------------------------------------------- + +(1 row) + +TRUNCATE public.bgw_log; +-- data before 8 +SELECT * FROM test_continuous_agg_view ORDER BY 1; + time_bucket | value +-------------+------- + 0 | 1 + 2 | 5 + 4 | 9 +(3 rows) + +-- invalidations test by running job multiple times +SELECT ts_bgw_params_reset_time(); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +DROP MATERIALIZED VIEW test_continuous_agg_view; +psql:include/cagg_bgw_common.sql:234: NOTICE: drop cascades to table _timescaledb_internal._hyper_2_3_chunk +CREATE MATERIALIZED VIEW test_continuous_agg_view + WITH (timescaledb.continuous, + timescaledb.materialized_only=true) + AS SELECT time_bucket('2', time), SUM(data) as value + FROM test_continuous_agg_table + GROUP BY 1 WITH NO DATA; +SELECT add_continuous_aggregate_policy('test_continuous_agg_view', 100::integer, -2::integer, '12 h'::interval); + add_continuous_aggregate_policy +--------------------------------- + 1001 +(1 row) + +SELECT mat_hypertable_id FROM _timescaledb_catalog.continuous_agg \gset +SELECT id AS job_id FROM _timescaledb_config.bgw_job WHERE hypertable_id=:mat_hypertable_id \gset +SELECT ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(25); + ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish +------------------------------------------------------------ + +(1 row) + +SELECT * FROM sorted_bgw_log; + msg_no | mock_time | application_name | msg +--------+-----------+--------------------------------------------+-------------------------------------------------------------------------------------------------------- + 0 | 0 | DB Scheduler | [TESTING] Registered new background worker + 1 | 0 | DB Scheduler | [TESTING] Wait until (RANDOM), started at (RANDOM) + 0 | 0 | Refresh Continuous Aggregate Policy [1001] | refreshing continuous aggregate "test_continuous_agg_view" in window [ -90, 12 ] + 1 | 0 | Refresh Continuous Aggregate Policy [1001] | deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_3" + 2 | 0 | Refresh Continuous Aggregate Policy [1001] | inserted 6 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_3" + 3 | 0 | Refresh Continuous Aggregate Policy [1001] | job 1001 (Refresh Continuous Aggregate Policy [1001]) exiting with success: execution time (RANDOM) ms +(6 rows) + +-- job ran once, successfully +SELECT job_id, last_finish - next_start as until_next, last_run_success, total_runs, total_successes, total_failures, total_crashes + FROM _timescaledb_internal.bgw_job_stat + where job_id=:job_id; + job_id | until_next | last_run_success | total_runs | total_successes | total_failures | total_crashes +--------+----------------+------------------+------------+-----------------+----------------+--------------- + 1001 | @ 12 hours ago | t | 1 | 1 | 0 | 0 +(1 row) + +-- should have refreshed everything we have so far +SELECT * FROM test_continuous_agg_view ORDER BY 1; + time_bucket | value +-------------+------- + 0 | 1 + 2 | 5 + 4 | 9 + 6 | 13 + 8 | 17 + 10 | 10 +(6 rows) + +-- invalidate some data +UPDATE test_continuous_agg_table +SET data = 11 WHERE time = 6; +--advance time by 12h so that job runs one more time +SELECT ts_bgw_params_reset_time(extract(epoch from interval '12 hour')::bigint * 1000000, true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +SELECT ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(25, 25); + ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish +------------------------------------------------------------ + +(1 row) + +SELECT * FROM sorted_bgw_log; + msg_no | mock_time | application_name | msg +--------+-------------+--------------------------------------------+-------------------------------------------------------------------------------------------------------- + 0 | 0 | DB Scheduler | [TESTING] Registered new background worker + 1 | 0 | DB Scheduler | [TESTING] Wait until (RANDOM), started at (RANDOM) + 0 | 0 | Refresh Continuous Aggregate Policy [1001] | refreshing continuous aggregate "test_continuous_agg_view" in window [ -90, 12 ] + 1 | 0 | Refresh Continuous Aggregate Policy [1001] | deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_3" + 2 | 0 | Refresh Continuous Aggregate Policy [1001] | inserted 6 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_3" + 3 | 0 | Refresh Continuous Aggregate Policy [1001] | job 1001 (Refresh Continuous Aggregate Policy [1001]) exiting with success: execution time (RANDOM) ms + 0 | 43200000000 | DB Scheduler | [TESTING] Registered new background worker + 1 | 43200000000 | DB Scheduler | [TESTING] Wait until (RANDOM), started at (RANDOM) + 0 | 43200000000 | Refresh Continuous Aggregate Policy [1001] | refreshing continuous aggregate "test_continuous_agg_view" in window [ -90, 12 ] + 1 | 43200000000 | Refresh Continuous Aggregate Policy [1001] | deleted 1 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_3" + 2 | 43200000000 | Refresh Continuous Aggregate Policy [1001] | inserted 1 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_3" + 3 | 43200000000 | Refresh Continuous Aggregate Policy [1001] | job 1001 (Refresh Continuous Aggregate Policy [1001]) exiting with success: execution time (RANDOM) ms +(12 rows) + +SELECT job_id, next_start - last_finish as until_next, last_run_success, total_runs, total_successes, total_failures, total_crashes + FROM _timescaledb_internal.bgw_job_stat + where job_id=:job_id; + job_id | until_next | last_run_success | total_runs | total_successes | total_failures | total_crashes +--------+------------+------------------+------------+-----------------+----------------+--------------- + 1001 | @ 12 hours | t | 2 | 2 | 0 | 0 +(1 row) + +-- should have updated data for time=6 +SELECT * FROM test_continuous_agg_view ORDER BY 1; + time_bucket | value +-------------+------- + 0 | 1 + 2 | 5 + 4 | 9 + 6 | 18 + 8 | 17 + 10 | 10 +(6 rows) + +\x on +--check the information views -- +select view_name, view_owner, materialization_hypertable_schema, materialization_hypertable_name +from timescaledb_information.continuous_aggregates +where view_name::text like '%test_continuous_agg_view'; +-[ RECORD 1 ]---------------------+--------------------------- +view_name | test_continuous_agg_view +view_owner | default_perm_user +materialization_hypertable_schema | _timescaledb_internal +materialization_hypertable_name | _materialized_hypertable_3 + +select view_name, view_definition from timescaledb_information.continuous_aggregates +where view_name::text like '%test_continuous_agg_view'; +-[ RECORD 1 ]---+------------------------------------------------------------------------- +view_name | test_continuous_agg_view +view_definition | SELECT time_bucket(2, test_continuous_agg_table."time") AS time_bucket,+ + | sum(test_continuous_agg_table.data) AS value + + | FROM test_continuous_agg_table + + | GROUP BY (time_bucket(2, test_continuous_agg_table."time")); + +select job_status, last_run_duration +from timescaledb_information.job_stats ps, timescaledb_information.continuous_aggregates cagg +where cagg.view_name::text like '%test_continuous_agg_view' +and cagg.materialization_hypertable_name = ps.hypertable_name; +-[ RECORD 1 ]-----+---------- +job_status | Scheduled +last_run_duration | + +\x off +DROP MATERIALIZED VIEW test_continuous_agg_view; +psql:include/cagg_bgw_common.sql:294: NOTICE: drop cascades to table _timescaledb_internal._hyper_3_4_chunk +--create a view with a function that it has no permission to execute +CREATE MATERIALIZED VIEW test_continuous_agg_view + WITH (timescaledb.continuous, + timescaledb.materialized_only=true) + AS SELECT time_bucket('2', time), SUM(data) as value, get_constant_no_perms() + FROM test_continuous_agg_table + GROUP BY 1 WITH NO DATA; +SELECT add_continuous_aggregate_policy('test_continuous_agg_view', 100::integer, -2::integer, '12 h'::interval); + add_continuous_aggregate_policy +--------------------------------- + 1002 +(1 row) + +SELECT id AS job_id FROM _timescaledb_config.bgw_job ORDER BY id desc limit 1 \gset +SELECT ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(25); + ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish +------------------------------------------------------------ + +(1 row) + +-- job fails +SELECT job_id, last_run_success, total_runs, total_successes, total_failures, total_crashes + FROM _timescaledb_internal.bgw_job_stat + where job_id=:job_id; + job_id | last_run_success | total_runs | total_successes | total_failures | total_crashes +--------+------------------+------------+-----------------+----------------+--------------- + 1002 | f | 1 | 0 | 1 | 0 +(1 row) + +DROP MATERIALIZED VIEW test_continuous_agg_view; +--advance clock to quit scheduler +SELECT ts_bgw_params_reset_time(extract(epoch from interval '25 hour')::bigint * 1000000, true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +select ts_bgw_db_scheduler_test_wait_for_scheduler_finish(); + ts_bgw_db_scheduler_test_wait_for_scheduler_finish +---------------------------------------------------- + +(1 row) + +SELECT ts_bgw_params_mock_wait_returns_immediately(:WAIT_ON_JOB); + ts_bgw_params_mock_wait_returns_immediately +--------------------------------------------- + +(1 row) + +--clear log for next run of the scheduler +TRUNCATE public.bgw_log; +SELECT ts_bgw_params_reset_time(); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +-- +-- Test creating continuous aggregate with a user that is the non-owner of the raw table +-- +CREATE TABLE test_continuous_agg_table_w_grant(time int, data int); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('test_continuous_agg_table_w_grant', 'time', chunk_time_interval => 10, replication_factor => 2); +\else +SELECT create_hypertable('test_continuous_agg_table_w_grant', 'time', chunk_time_interval => 10); +psql:include/cagg_bgw_common.sql:332: NOTICE: adding not-null constraint to column "time" + create_hypertable +------------------------------------------------ + (5,public,test_continuous_agg_table_w_grant,t) +(1 row) + +\endif +CREATE OR REPLACE FUNCTION integer_now_test1() returns int LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), 0) FROM test_continuous_agg_table_w_grant $$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ +CREATE OR REPLACE FUNCTION integer_now_test1() returns int LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), 0) FROM test_continuous_agg_table_w_grant $$; +$DIST$); +\endif +SELECT set_integer_now_func('test_continuous_agg_table_w_grant', 'integer_now_test1'); + set_integer_now_func +---------------------- + +(1 row) + +GRANT SELECT, TRIGGER ON test_continuous_agg_table_w_grant TO public; +INSERT INTO test_continuous_agg_table_w_grant + SELECT 1 , 1; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2 +-- make sure view can be created +CREATE MATERIALIZED VIEW test_continuous_agg_view_user_2 + WITH ( timescaledb.continuous, + timescaledb.materialized_only=true) + AS SELECT time_bucket('2', time), SUM(data) as value + FROM test_continuous_agg_table_w_grant + GROUP BY 1 WITH NO DATA; +SELECT add_continuous_aggregate_policy('test_continuous_agg_view_user_2', NULL, -2::integer, '12 h'::interval); + add_continuous_aggregate_policy +--------------------------------- + 1003 +(1 row) + +SELECT id AS job_id FROM _timescaledb_config.bgw_job ORDER BY id desc limit 1 \gset +SELECT ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(25); + ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish +------------------------------------------------------------ + +(1 row) + +SELECT id, owner FROM _timescaledb_config.bgw_job WHERE id = :job_id ; + id | owner +------+--------------------- + 1003 | default_perm_user_2 +(1 row) + +SELECT job_id, next_start - last_finish as until_next, last_run_success, total_runs, total_successes, total_failures, total_crashes + FROM _timescaledb_internal.bgw_job_stat + where job_id=:job_id; + job_id | until_next | last_run_success | total_runs | total_successes | total_failures | total_crashes +--------+------------+------------------+------------+-----------------+----------------+--------------- + 1003 | @ 12 hours | t | 1 | 1 | 0 | 0 +(1 row) + +--view is populated +SELECT * FROM test_continuous_agg_view_user_2 ORDER BY 1; + time_bucket | value +-------------+------- + 0 | 1 +(1 row) + +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +--revoke permissions from the continuous agg view owner to select from raw table +--no further updates to cont agg should happen +REVOKE SELECT ON test_continuous_agg_table_w_grant FROM public; +--add new data to table +INSERT INTO test_continuous_agg_table_w_grant VALUES(5,1); +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2 +--advance time by 12h so that job tries to run one more time +SELECT ts_bgw_params_reset_time(extract(epoch from interval '12 hour')::bigint * 1000000, true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +SELECT ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(25, 25); + ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish +------------------------------------------------------------ + +(1 row) + +--should show a failing execution because no longer has permissions (due to lack of permission on partial view owner's part) +SELECT job_id, last_run_success, total_runs, total_successes, total_failures, total_crashes + FROM _timescaledb_internal.bgw_job_stat + where job_id=:job_id; + job_id | last_run_success | total_runs | total_successes | total_failures | total_crashes +--------+------------------+------------+-----------------+----------------+--------------- + 1003 | f | 2 | 1 | 1 | 0 +(1 row) + +--view was NOT updated; but the old stuff is still there +SELECT * FROM test_continuous_agg_view_user_2; + time_bucket | value +-------------+------- + 0 | 1 +(1 row) + +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +SELECT * from sorted_bgw_log; + msg_no | mock_time | application_name | msg +--------+-------------+--------------------------------------------+-------------------------------------------------------------------------------------------------------- + 0 | 0 | DB Scheduler | [TESTING] Registered new background worker + 1 | 0 | DB Scheduler | [TESTING] Wait until (RANDOM), started at (RANDOM) + 0 | 0 | Refresh Continuous Aggregate Policy [1003] | refreshing continuous aggregate "test_continuous_agg_view_user_2" in window [ -2147483648, 2 ] + 1 | 0 | Refresh Continuous Aggregate Policy [1003] | deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_6" + 2 | 0 | Refresh Continuous Aggregate Policy [1003] | inserted 1 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_6" + 3 | 0 | Refresh Continuous Aggregate Policy [1003] | job 1003 (Refresh Continuous Aggregate Policy [1003]) exiting with success: execution time (RANDOM) ms + 0 | 43200000000 | DB Scheduler | [TESTING] Registered new background worker + 1 | 43200000000 | DB Scheduler | [TESTING] Wait until (RANDOM), started at (RANDOM) + 0 | 43200000000 | Refresh Continuous Aggregate Policy [1003] | job 1003 threw an error + 1 | 43200000000 | Refresh Continuous Aggregate Policy [1003] | permission denied for table test_continuous_agg_table_w_grant +(10 rows) + +-- Count the number of continuous aggregate policies +SELECT count(*) FROM _timescaledb_config.bgw_job + WHERE proc_schema = '_timescaledb_functions' + AND proc_name = 'policy_refresh_continuous_aggregate'; + count +------- + 1 +(1 row) + diff --git a/tsl/test/expected/cagg_bgw-15.out b/tsl/test/expected/cagg_bgw-15.out new file mode 100644 index 00000000000..0b16ab36fc4 --- /dev/null +++ b/tsl/test/expected/cagg_bgw-15.out @@ -0,0 +1,702 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\set IS_DISTRIBUTED FALSE +\ir include/cagg_bgw_common.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- +-- Setup +-- +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER +CREATE OR REPLACE FUNCTION ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(timeout INT = -1, mock_start_time INT = 0) RETURNS VOID +AS :MODULE_PATHNAME LANGUAGE C VOLATILE; +CREATE OR REPLACE FUNCTION ts_bgw_db_scheduler_test_run(timeout INT = -1, mock_start_time INT = 0) RETURNS VOID +AS :MODULE_PATHNAME LANGUAGE C VOLATILE; +CREATE OR REPLACE FUNCTION ts_bgw_db_scheduler_test_wait_for_scheduler_finish() RETURNS VOID +AS :MODULE_PATHNAME LANGUAGE C VOLATILE; +CREATE OR REPLACE FUNCTION ts_bgw_params_create() RETURNS VOID +AS :MODULE_PATHNAME LANGUAGE C VOLATILE; +CREATE OR REPLACE FUNCTION ts_bgw_params_destroy() RETURNS VOID +AS :MODULE_PATHNAME LANGUAGE C VOLATILE; +CREATE OR REPLACE FUNCTION ts_bgw_params_reset_time(set_time BIGINT = 0, wait BOOLEAN = false) RETURNS VOID +AS :MODULE_PATHNAME LANGUAGE C VOLATILE; +--test that this all works under the community license +ALTER DATABASE :TEST_DBNAME SET timescaledb.license_key='Community'; +--create a function with no permissions to execute +CREATE FUNCTION get_constant_no_perms() RETURNS INTEGER LANGUAGE SQL IMMUTABLE AS +$BODY$ + SELECT 10; +$BODY$; +REVOKE EXECUTE ON FUNCTION get_constant_no_perms() FROM PUBLIC; +\set WAIT_ON_JOB 0 +\set IMMEDIATELY_SET_UNTIL 1 +\set WAIT_FOR_OTHER_TO_ADVANCE 2 +CREATE OR REPLACE FUNCTION ts_bgw_params_mock_wait_returns_immediately(new_val INTEGER) RETURNS VOID +AS :MODULE_PATHNAME LANGUAGE C VOLATILE; +-- Remove any default jobs, e.g., telemetry +DELETE FROM _timescaledb_config.bgw_job WHERE TRUE; +TRUNCATE _timescaledb_internal.bgw_job_stat; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +CREATE TABLE public.bgw_log( + msg_no INT, + mock_time BIGINT, + application_name TEXT, + msg TEXT +); +CREATE VIEW sorted_bgw_log AS + SELECT msg_no, + mock_time, + application_name, + regexp_replace(regexp_replace(msg, '(Wait until|started at|execution time) [0-9]+(\.[0-9]+)?', '\1 (RANDOM)', 'g'), 'background worker "[^"]+"','connection') AS msg + FROM bgw_log ORDER BY mock_time, application_name COLLATE "C", msg_no; +CREATE TABLE public.bgw_dsm_handle_store( + handle BIGINT +); +INSERT INTO public.bgw_dsm_handle_store VALUES (0); +SELECT ts_bgw_params_create(); + ts_bgw_params_create +---------------------- + +(1 row) + +SELECT * FROM _timescaledb_config.bgw_job; + id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone +----+------------------+-------------------+-------------+-------------+--------------+-------------+-----------+-------+-----------+----------------+---------------+---------------+--------+--------------+------------+---------- +(0 rows) + +SELECT * FROM timescaledb_information.job_stats; + hypertable_schema | hypertable_name | job_id | last_run_started_at | last_successful_finish | last_run_status | job_status | last_run_duration | next_start | total_runs | total_successes | total_failures +-------------------+-----------------+--------+---------------------+------------------------+-----------------+------------+-------------------+------------+------------+-----------------+---------------- +(0 rows) + +SELECT * FROM _timescaledb_catalog.continuous_agg; + mat_hypertable_id | raw_hypertable_id | parent_mat_hypertable_id | user_view_schema | user_view_name | partial_view_schema | partial_view_name | bucket_width | direct_view_schema | direct_view_name | materialized_only | finalized +-------------------+-------------------+--------------------------+------------------+----------------+---------------------+-------------------+--------------+--------------------+------------------+-------------------+----------- +(0 rows) + +-- though user on access node has required GRANTS, this will propagate GRANTS to the connected data nodes +GRANT CREATE ON SCHEMA public TO :ROLE_DEFAULT_PERM_USER; +psql:include/cagg_bgw_common.sql:76: WARNING: no privileges were granted for "public" +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +CREATE TABLE test_continuous_agg_table(time int, data int); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('test_continuous_agg_table', 'time', chunk_time_interval => 10, replication_factor => 2); +\else +SELECT create_hypertable('test_continuous_agg_table', 'time', chunk_time_interval => 10); +psql:include/cagg_bgw_common.sql:82: NOTICE: adding not-null constraint to column "time" + create_hypertable +---------------------------------------- + (1,public,test_continuous_agg_table,t) +(1 row) + +\endif +CREATE OR REPLACE FUNCTION integer_now_test() returns int LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), 0) FROM test_continuous_agg_table $$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ +CREATE OR REPLACE FUNCTION integer_now_test() returns int LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), 0) FROM test_continuous_agg_table $$; +$DIST$); +\endif +SELECT set_integer_now_func('test_continuous_agg_table', 'integer_now_test'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE MATERIALIZED VIEW test_continuous_agg_view + WITH (timescaledb.continuous, timescaledb.materialized_only=true) + AS SELECT time_bucket('2', time), SUM(data) as value + FROM test_continuous_agg_table + GROUP BY 1 WITH NO DATA; +SELECT add_continuous_aggregate_policy('test_continuous_agg_view', NULL, 4::integer, '12 h'::interval); + add_continuous_aggregate_policy +--------------------------------- + 1000 +(1 row) + +SELECT id as raw_table_id FROM _timescaledb_catalog.hypertable WHERE table_name='test_continuous_agg_table' \gset +-- min distance from end should be 1 +SELECT mat_hypertable_id, user_view_schema, user_view_name, bucket_width +FROM _timescaledb_catalog.continuous_agg; + mat_hypertable_id | user_view_schema | user_view_name | bucket_width +-------------------+------------------+--------------------------+-------------- + 2 | public | test_continuous_agg_view | 2 +(1 row) + +SELECT mat_hypertable_id FROM _timescaledb_catalog.continuous_agg \gset +SELECT id AS job_id FROM _timescaledb_config.bgw_job where hypertable_id=:mat_hypertable_id \gset +-- job was created +SELECT * FROM _timescaledb_config.bgw_job where hypertable_id=:mat_hypertable_id; + id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone +------+--------------------------------------------+-------------------+-------------+-------------+--------------+------------------------+-------------------------------------+-------------------+-----------+----------------+---------------+---------------+-----------------------------------------------------------------+------------------------+-------------------------------------------+---------- + 1000 | Refresh Continuous Aggregate Policy [1000] | @ 12 hours | @ 0 | -1 | @ 12 hours | _timescaledb_functions | policy_refresh_continuous_aggregate | default_perm_user | t | f | | 2 | {"end_offset": 4, "start_offset": null, "mat_hypertable_id": 2} | _timescaledb_functions | policy_refresh_continuous_aggregate_check | +(1 row) + +-- create 10 time buckets +INSERT INTO test_continuous_agg_table + SELECT i, i FROM + (SELECT generate_series(0, 10) as i) AS j; +-- no stats +SELECT job_id, next_start, last_finish as until_next, last_run_success, total_runs, total_successes, total_failures, total_crashes + FROM _timescaledb_internal.bgw_job_stat + ORDER BY job_id; + job_id | next_start | until_next | last_run_success | total_runs | total_successes | total_failures | total_crashes +--------+------------+------------+------------------+------------+-----------------+----------------+--------------- +(0 rows) + +-- no data in view +SELECT * FROM test_continuous_agg_view ORDER BY 1; + time_bucket | value +-------------+------- +(0 rows) + +-- run first time +SELECT ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(25); + ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish +------------------------------------------------------------ + +(1 row) + +SELECT * FROM sorted_bgw_log; + msg_no | mock_time | application_name | msg +--------+-----------+--------------------------------------------+-------------------------------------------------------------------------------------------------------- + 0 | 0 | DB Scheduler | [TESTING] Registered new background worker + 1 | 0 | DB Scheduler | [TESTING] Wait until (RANDOM), started at (RANDOM) + 0 | 0 | Refresh Continuous Aggregate Policy [1000] | refreshing continuous aggregate "test_continuous_agg_view" in window [ -2147483648, 6 ] + 1 | 0 | Refresh Continuous Aggregate Policy [1000] | deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_2" + 2 | 0 | Refresh Continuous Aggregate Policy [1000] | inserted 3 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_2" + 3 | 0 | Refresh Continuous Aggregate Policy [1000] | job 1000 (Refresh Continuous Aggregate Policy [1000]) exiting with success: execution time (RANDOM) ms +(6 rows) + +SELECT * FROM _timescaledb_config.bgw_job where id=:job_id; + id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone +------+--------------------------------------------+-------------------+-------------+-------------+--------------+------------------------+-------------------------------------+-------------------+-----------+----------------+---------------+---------------+-----------------------------------------------------------------+------------------------+-------------------------------------------+---------- + 1000 | Refresh Continuous Aggregate Policy [1000] | @ 12 hours | @ 0 | -1 | @ 12 hours | _timescaledb_functions | policy_refresh_continuous_aggregate | default_perm_user | t | f | | 2 | {"end_offset": 4, "start_offset": null, "mat_hypertable_id": 2} | _timescaledb_functions | policy_refresh_continuous_aggregate_check | +(1 row) + +-- job ran once, successfully +SELECT job_id, next_start-last_finish as until_next, last_run_success, total_runs, total_successes, total_failures, total_crashes + FROM _timescaledb_internal.bgw_job_stat + where job_id=:job_id; + job_id | until_next | last_run_success | total_runs | total_successes | total_failures | total_crashes +--------+------------+------------------+------------+-----------------+----------------+--------------- + 1000 | @ 12 hours | t | 1 | 1 | 0 | 0 +(1 row) + +--clear log for next run of scheduler. +TRUNCATE public.bgw_log; +CREATE FUNCTION wait_for_timer_to_run(started_at INTEGER, spins INTEGER=:TEST_SPINWAIT_ITERS) RETURNS BOOLEAN LANGUAGE PLPGSQL AS +$BODY$ +DECLARE + num_runs INTEGER; + message TEXT; +BEGIN + select format('[TESTING] Wait until %%, started at %s', started_at) into message; + FOR i in 1..spins + LOOP + SELECT COUNT(*) from bgw_log where msg LIKE message INTO num_runs; + if (num_runs > 0) THEN + RETURN true; + ELSE + PERFORM pg_sleep(0.1); + END IF; + END LOOP; + RETURN false; +END +$BODY$; +CREATE FUNCTION wait_for_job_to_run(job_param_id INTEGER, expected_runs INTEGER, spins INTEGER=:TEST_SPINWAIT_ITERS) RETURNS BOOLEAN LANGUAGE PLPGSQL AS +$BODY$ +DECLARE + num_runs INTEGER; +BEGIN + FOR i in 1..spins + LOOP + SELECT total_successes FROM _timescaledb_internal.bgw_job_stat WHERE job_id=job_param_id INTO num_runs; + if (num_runs = expected_runs) THEN + RETURN true; + ELSEIF (num_runs > expected_runs) THEN + RAISE 'num_runs > expected'; + ELSE + PERFORM pg_sleep(0.1); + END IF; + END LOOP; + RETURN false; +END +$BODY$; +--make sure there is 1 job to start with +SELECT wait_for_job_to_run(:job_id, 1); + wait_for_job_to_run +--------------------- + t +(1 row) + +SELECT ts_bgw_params_mock_wait_returns_immediately(:WAIT_FOR_OTHER_TO_ADVANCE); + ts_bgw_params_mock_wait_returns_immediately +--------------------------------------------- + +(1 row) + +--start the scheduler on 0 time +SELECT ts_bgw_params_reset_time(0, true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +SELECT ts_bgw_db_scheduler_test_run(extract(epoch from interval '24 hour')::int * 1000, 0); + ts_bgw_db_scheduler_test_run +------------------------------ + +(1 row) + +SELECT wait_for_timer_to_run(0); + wait_for_timer_to_run +----------------------- + t +(1 row) + +--advance to 12:00 so that it runs one more time; now we know the +--scheduler has loaded up the job with the old schedule_interval +SELECT ts_bgw_params_reset_time(extract(epoch from interval '12 hour')::bigint * 1000000, true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +SELECT wait_for_job_to_run(:job_id, 2); + wait_for_job_to_run +--------------------- + t +(1 row) + +--advance clock 1us to make the scheduler realize the job is done +SELECT ts_bgw_params_reset_time((extract(epoch from interval '12 hour')::bigint * 1000000)+1, true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +--alter the refresh interval and check if next_start is altered +SELECT alter_job(:job_id, schedule_interval => '1m', retry_period => '1m'); + alter_job +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (1000,"@ 1 min","@ 0",-1,"@ 1 min",t,"{""end_offset"": 4, ""start_offset"": null, ""mat_hypertable_id"": 2}","Sat Jan 01 04:01:00 2000 PST",_timescaledb_functions.policy_refresh_continuous_aggregate_check,f,,) +(1 row) + +SELECT job_id, next_start - last_finish as until_next, total_runs +FROM _timescaledb_internal.bgw_job_stat +WHERE job_id=:job_id;; + job_id | until_next | total_runs +--------+------------+------------ + 1000 | @ 1 min | 2 +(1 row) + +--advance to 12:02, job should have run at 12:01 +SELECT ts_bgw_params_reset_time((extract(epoch from interval '12 hour')::bigint * 1000000)+(extract(epoch from interval '2 minute')::bigint * 1000000), true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +SELECT wait_for_job_to_run(:job_id, 3); + wait_for_job_to_run +--------------------- + t +(1 row) + +--next run in 1 minute +SELECT job_id, next_start-last_finish as until_next, total_runs +FROM _timescaledb_internal.bgw_job_stat +WHERE job_id=:job_id; + job_id | until_next | total_runs +--------+------------+------------ + 1000 | @ 1 min | 3 +(1 row) + +--change next run to be after 30s instead +SELECT (next_start - '30s'::interval) AS "NEW_NEXT_START" +FROM _timescaledb_internal.bgw_job_stat +WHERE job_id=:job_id \gset +SELECT alter_job(:job_id, next_start => :'NEW_NEXT_START'); + alter_job +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (1000,"@ 1 min","@ 0",-1,"@ 1 min",t,"{""end_offset"": 4, ""start_offset"": null, ""mat_hypertable_id"": 2}","Sat Jan 01 04:02:30 2000 PST",_timescaledb_functions.policy_refresh_continuous_aggregate_check,f,,) +(1 row) + +SELECT ts_bgw_params_reset_time((extract(epoch from interval '12 hour')::bigint * 1000000)+(extract(epoch from interval '2 minute 30 seconds')::bigint * 1000000), true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +SELECT wait_for_job_to_run(:job_id, 4); + wait_for_job_to_run +--------------------- + t +(1 row) + +--advance clock to quit scheduler +SELECT ts_bgw_params_reset_time(extract(epoch from interval '25 hour')::bigint * 1000000, true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +select ts_bgw_db_scheduler_test_wait_for_scheduler_finish(); + ts_bgw_db_scheduler_test_wait_for_scheduler_finish +---------------------------------------------------- + +(1 row) + +SELECT ts_bgw_params_mock_wait_returns_immediately(:WAIT_ON_JOB); + ts_bgw_params_mock_wait_returns_immediately +--------------------------------------------- + +(1 row) + +TRUNCATE public.bgw_log; +-- data before 8 +SELECT * FROM test_continuous_agg_view ORDER BY 1; + time_bucket | value +-------------+------- + 0 | 1 + 2 | 5 + 4 | 9 +(3 rows) + +-- invalidations test by running job multiple times +SELECT ts_bgw_params_reset_time(); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +DROP MATERIALIZED VIEW test_continuous_agg_view; +psql:include/cagg_bgw_common.sql:234: NOTICE: drop cascades to table _timescaledb_internal._hyper_2_3_chunk +CREATE MATERIALIZED VIEW test_continuous_agg_view + WITH (timescaledb.continuous, + timescaledb.materialized_only=true) + AS SELECT time_bucket('2', time), SUM(data) as value + FROM test_continuous_agg_table + GROUP BY 1 WITH NO DATA; +SELECT add_continuous_aggregate_policy('test_continuous_agg_view', 100::integer, -2::integer, '12 h'::interval); + add_continuous_aggregate_policy +--------------------------------- + 1001 +(1 row) + +SELECT mat_hypertable_id FROM _timescaledb_catalog.continuous_agg \gset +SELECT id AS job_id FROM _timescaledb_config.bgw_job WHERE hypertable_id=:mat_hypertable_id \gset +SELECT ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(25); + ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish +------------------------------------------------------------ + +(1 row) + +SELECT * FROM sorted_bgw_log; + msg_no | mock_time | application_name | msg +--------+-----------+--------------------------------------------+-------------------------------------------------------------------------------------------------------- + 0 | 0 | DB Scheduler | [TESTING] Registered new background worker + 1 | 0 | DB Scheduler | [TESTING] Wait until (RANDOM), started at (RANDOM) + 0 | 0 | Refresh Continuous Aggregate Policy [1001] | refreshing continuous aggregate "test_continuous_agg_view" in window [ -90, 12 ] + 1 | 0 | Refresh Continuous Aggregate Policy [1001] | deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_3" + 2 | 0 | Refresh Continuous Aggregate Policy [1001] | inserted 6 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_3" + 3 | 0 | Refresh Continuous Aggregate Policy [1001] | job 1001 (Refresh Continuous Aggregate Policy [1001]) exiting with success: execution time (RANDOM) ms +(6 rows) + +-- job ran once, successfully +SELECT job_id, last_finish - next_start as until_next, last_run_success, total_runs, total_successes, total_failures, total_crashes + FROM _timescaledb_internal.bgw_job_stat + where job_id=:job_id; + job_id | until_next | last_run_success | total_runs | total_successes | total_failures | total_crashes +--------+----------------+------------------+------------+-----------------+----------------+--------------- + 1001 | @ 12 hours ago | t | 1 | 1 | 0 | 0 +(1 row) + +-- should have refreshed everything we have so far +SELECT * FROM test_continuous_agg_view ORDER BY 1; + time_bucket | value +-------------+------- + 0 | 1 + 2 | 5 + 4 | 9 + 6 | 13 + 8 | 17 + 10 | 10 +(6 rows) + +-- invalidate some data +UPDATE test_continuous_agg_table +SET data = 11 WHERE time = 6; +--advance time by 12h so that job runs one more time +SELECT ts_bgw_params_reset_time(extract(epoch from interval '12 hour')::bigint * 1000000, true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +SELECT ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(25, 25); + ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish +------------------------------------------------------------ + +(1 row) + +SELECT * FROM sorted_bgw_log; + msg_no | mock_time | application_name | msg +--------+-------------+--------------------------------------------+-------------------------------------------------------------------------------------------------------- + 0 | 0 | DB Scheduler | [TESTING] Registered new background worker + 1 | 0 | DB Scheduler | [TESTING] Wait until (RANDOM), started at (RANDOM) + 0 | 0 | Refresh Continuous Aggregate Policy [1001] | refreshing continuous aggregate "test_continuous_agg_view" in window [ -90, 12 ] + 1 | 0 | Refresh Continuous Aggregate Policy [1001] | deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_3" + 2 | 0 | Refresh Continuous Aggregate Policy [1001] | inserted 6 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_3" + 3 | 0 | Refresh Continuous Aggregate Policy [1001] | job 1001 (Refresh Continuous Aggregate Policy [1001]) exiting with success: execution time (RANDOM) ms + 0 | 43200000000 | DB Scheduler | [TESTING] Registered new background worker + 1 | 43200000000 | DB Scheduler | [TESTING] Wait until (RANDOM), started at (RANDOM) + 0 | 43200000000 | Refresh Continuous Aggregate Policy [1001] | refreshing continuous aggregate "test_continuous_agg_view" in window [ -90, 12 ] + 1 | 43200000000 | Refresh Continuous Aggregate Policy [1001] | deleted 1 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_3" + 2 | 43200000000 | Refresh Continuous Aggregate Policy [1001] | inserted 1 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_3" + 3 | 43200000000 | Refresh Continuous Aggregate Policy [1001] | job 1001 (Refresh Continuous Aggregate Policy [1001]) exiting with success: execution time (RANDOM) ms +(12 rows) + +SELECT job_id, next_start - last_finish as until_next, last_run_success, total_runs, total_successes, total_failures, total_crashes + FROM _timescaledb_internal.bgw_job_stat + where job_id=:job_id; + job_id | until_next | last_run_success | total_runs | total_successes | total_failures | total_crashes +--------+------------+------------------+------------+-----------------+----------------+--------------- + 1001 | @ 12 hours | t | 2 | 2 | 0 | 0 +(1 row) + +-- should have updated data for time=6 +SELECT * FROM test_continuous_agg_view ORDER BY 1; + time_bucket | value +-------------+------- + 0 | 1 + 2 | 5 + 4 | 9 + 6 | 18 + 8 | 17 + 10 | 10 +(6 rows) + +\x on +--check the information views -- +select view_name, view_owner, materialization_hypertable_schema, materialization_hypertable_name +from timescaledb_information.continuous_aggregates +where view_name::text like '%test_continuous_agg_view'; +-[ RECORD 1 ]---------------------+--------------------------- +view_name | test_continuous_agg_view +view_owner | default_perm_user +materialization_hypertable_schema | _timescaledb_internal +materialization_hypertable_name | _materialized_hypertable_3 + +select view_name, view_definition from timescaledb_information.continuous_aggregates +where view_name::text like '%test_continuous_agg_view'; +-[ RECORD 1 ]---+------------------------------------------------------------------------- +view_name | test_continuous_agg_view +view_definition | SELECT time_bucket(2, test_continuous_agg_table."time") AS time_bucket,+ + | sum(test_continuous_agg_table.data) AS value + + | FROM test_continuous_agg_table + + | GROUP BY (time_bucket(2, test_continuous_agg_table."time")); + +select job_status, last_run_duration +from timescaledb_information.job_stats ps, timescaledb_information.continuous_aggregates cagg +where cagg.view_name::text like '%test_continuous_agg_view' +and cagg.materialization_hypertable_name = ps.hypertable_name; +-[ RECORD 1 ]-----+---------- +job_status | Scheduled +last_run_duration | + +\x off +DROP MATERIALIZED VIEW test_continuous_agg_view; +psql:include/cagg_bgw_common.sql:294: NOTICE: drop cascades to table _timescaledb_internal._hyper_3_4_chunk +--create a view with a function that it has no permission to execute +CREATE MATERIALIZED VIEW test_continuous_agg_view + WITH (timescaledb.continuous, + timescaledb.materialized_only=true) + AS SELECT time_bucket('2', time), SUM(data) as value, get_constant_no_perms() + FROM test_continuous_agg_table + GROUP BY 1 WITH NO DATA; +SELECT add_continuous_aggregate_policy('test_continuous_agg_view', 100::integer, -2::integer, '12 h'::interval); + add_continuous_aggregate_policy +--------------------------------- + 1002 +(1 row) + +SELECT id AS job_id FROM _timescaledb_config.bgw_job ORDER BY id desc limit 1 \gset +SELECT ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(25); + ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish +------------------------------------------------------------ + +(1 row) + +-- job fails +SELECT job_id, last_run_success, total_runs, total_successes, total_failures, total_crashes + FROM _timescaledb_internal.bgw_job_stat + where job_id=:job_id; + job_id | last_run_success | total_runs | total_successes | total_failures | total_crashes +--------+------------------+------------+-----------------+----------------+--------------- + 1002 | f | 1 | 0 | 1 | 0 +(1 row) + +DROP MATERIALIZED VIEW test_continuous_agg_view; +--advance clock to quit scheduler +SELECT ts_bgw_params_reset_time(extract(epoch from interval '25 hour')::bigint * 1000000, true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +select ts_bgw_db_scheduler_test_wait_for_scheduler_finish(); + ts_bgw_db_scheduler_test_wait_for_scheduler_finish +---------------------------------------------------- + +(1 row) + +SELECT ts_bgw_params_mock_wait_returns_immediately(:WAIT_ON_JOB); + ts_bgw_params_mock_wait_returns_immediately +--------------------------------------------- + +(1 row) + +--clear log for next run of the scheduler +TRUNCATE public.bgw_log; +SELECT ts_bgw_params_reset_time(); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +-- +-- Test creating continuous aggregate with a user that is the non-owner of the raw table +-- +CREATE TABLE test_continuous_agg_table_w_grant(time int, data int); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('test_continuous_agg_table_w_grant', 'time', chunk_time_interval => 10, replication_factor => 2); +\else +SELECT create_hypertable('test_continuous_agg_table_w_grant', 'time', chunk_time_interval => 10); +psql:include/cagg_bgw_common.sql:332: NOTICE: adding not-null constraint to column "time" + create_hypertable +------------------------------------------------ + (5,public,test_continuous_agg_table_w_grant,t) +(1 row) + +\endif +CREATE OR REPLACE FUNCTION integer_now_test1() returns int LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), 0) FROM test_continuous_agg_table_w_grant $$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ +CREATE OR REPLACE FUNCTION integer_now_test1() returns int LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), 0) FROM test_continuous_agg_table_w_grant $$; +$DIST$); +\endif +SELECT set_integer_now_func('test_continuous_agg_table_w_grant', 'integer_now_test1'); + set_integer_now_func +---------------------- + +(1 row) + +GRANT SELECT, TRIGGER ON test_continuous_agg_table_w_grant TO public; +INSERT INTO test_continuous_agg_table_w_grant + SELECT 1 , 1; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2 +-- make sure view can be created +CREATE MATERIALIZED VIEW test_continuous_agg_view_user_2 + WITH ( timescaledb.continuous, + timescaledb.materialized_only=true) + AS SELECT time_bucket('2', time), SUM(data) as value + FROM test_continuous_agg_table_w_grant + GROUP BY 1 WITH NO DATA; +SELECT add_continuous_aggregate_policy('test_continuous_agg_view_user_2', NULL, -2::integer, '12 h'::interval); + add_continuous_aggregate_policy +--------------------------------- + 1003 +(1 row) + +SELECT id AS job_id FROM _timescaledb_config.bgw_job ORDER BY id desc limit 1 \gset +SELECT ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(25); + ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish +------------------------------------------------------------ + +(1 row) + +SELECT id, owner FROM _timescaledb_config.bgw_job WHERE id = :job_id ; + id | owner +------+--------------------- + 1003 | default_perm_user_2 +(1 row) + +SELECT job_id, next_start - last_finish as until_next, last_run_success, total_runs, total_successes, total_failures, total_crashes + FROM _timescaledb_internal.bgw_job_stat + where job_id=:job_id; + job_id | until_next | last_run_success | total_runs | total_successes | total_failures | total_crashes +--------+------------+------------------+------------+-----------------+----------------+--------------- + 1003 | @ 12 hours | t | 1 | 1 | 0 | 0 +(1 row) + +--view is populated +SELECT * FROM test_continuous_agg_view_user_2 ORDER BY 1; + time_bucket | value +-------------+------- + 0 | 1 +(1 row) + +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +--revoke permissions from the continuous agg view owner to select from raw table +--no further updates to cont agg should happen +REVOKE SELECT ON test_continuous_agg_table_w_grant FROM public; +--add new data to table +INSERT INTO test_continuous_agg_table_w_grant VALUES(5,1); +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2 +--advance time by 12h so that job tries to run one more time +SELECT ts_bgw_params_reset_time(extract(epoch from interval '12 hour')::bigint * 1000000, true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +SELECT ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(25, 25); + ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish +------------------------------------------------------------ + +(1 row) + +--should show a failing execution because no longer has permissions (due to lack of permission on partial view owner's part) +SELECT job_id, last_run_success, total_runs, total_successes, total_failures, total_crashes + FROM _timescaledb_internal.bgw_job_stat + where job_id=:job_id; + job_id | last_run_success | total_runs | total_successes | total_failures | total_crashes +--------+------------------+------------+-----------------+----------------+--------------- + 1003 | f | 2 | 1 | 1 | 0 +(1 row) + +--view was NOT updated; but the old stuff is still there +SELECT * FROM test_continuous_agg_view_user_2; + time_bucket | value +-------------+------- + 0 | 1 +(1 row) + +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +SELECT * from sorted_bgw_log; + msg_no | mock_time | application_name | msg +--------+-------------+--------------------------------------------+-------------------------------------------------------------------------------------------------------- + 0 | 0 | DB Scheduler | [TESTING] Registered new background worker + 1 | 0 | DB Scheduler | [TESTING] Wait until (RANDOM), started at (RANDOM) + 0 | 0 | Refresh Continuous Aggregate Policy [1003] | refreshing continuous aggregate "test_continuous_agg_view_user_2" in window [ -2147483648, 2 ] + 1 | 0 | Refresh Continuous Aggregate Policy [1003] | deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_6" + 2 | 0 | Refresh Continuous Aggregate Policy [1003] | inserted 1 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_6" + 3 | 0 | Refresh Continuous Aggregate Policy [1003] | job 1003 (Refresh Continuous Aggregate Policy [1003]) exiting with success: execution time (RANDOM) ms + 0 | 43200000000 | DB Scheduler | [TESTING] Registered new background worker + 1 | 43200000000 | DB Scheduler | [TESTING] Wait until (RANDOM), started at (RANDOM) + 0 | 43200000000 | Refresh Continuous Aggregate Policy [1003] | job 1003 threw an error + 1 | 43200000000 | Refresh Continuous Aggregate Policy [1003] | permission denied for table test_continuous_agg_table_w_grant +(10 rows) + +-- Count the number of continuous aggregate policies +SELECT count(*) FROM _timescaledb_config.bgw_job + WHERE proc_schema = '_timescaledb_functions' + AND proc_name = 'policy_refresh_continuous_aggregate'; + count +------- + 1 +(1 row) + diff --git a/tsl/test/expected/cagg_bgw-16.out b/tsl/test/expected/cagg_bgw-16.out new file mode 100644 index 00000000000..63f8f8cc7bb --- /dev/null +++ b/tsl/test/expected/cagg_bgw-16.out @@ -0,0 +1,702 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\set IS_DISTRIBUTED FALSE +\ir include/cagg_bgw_common.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- +-- Setup +-- +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER +CREATE OR REPLACE FUNCTION ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(timeout INT = -1, mock_start_time INT = 0) RETURNS VOID +AS :MODULE_PATHNAME LANGUAGE C VOLATILE; +CREATE OR REPLACE FUNCTION ts_bgw_db_scheduler_test_run(timeout INT = -1, mock_start_time INT = 0) RETURNS VOID +AS :MODULE_PATHNAME LANGUAGE C VOLATILE; +CREATE OR REPLACE FUNCTION ts_bgw_db_scheduler_test_wait_for_scheduler_finish() RETURNS VOID +AS :MODULE_PATHNAME LANGUAGE C VOLATILE; +CREATE OR REPLACE FUNCTION ts_bgw_params_create() RETURNS VOID +AS :MODULE_PATHNAME LANGUAGE C VOLATILE; +CREATE OR REPLACE FUNCTION ts_bgw_params_destroy() RETURNS VOID +AS :MODULE_PATHNAME LANGUAGE C VOLATILE; +CREATE OR REPLACE FUNCTION ts_bgw_params_reset_time(set_time BIGINT = 0, wait BOOLEAN = false) RETURNS VOID +AS :MODULE_PATHNAME LANGUAGE C VOLATILE; +--test that this all works under the community license +ALTER DATABASE :TEST_DBNAME SET timescaledb.license_key='Community'; +--create a function with no permissions to execute +CREATE FUNCTION get_constant_no_perms() RETURNS INTEGER LANGUAGE SQL IMMUTABLE AS +$BODY$ + SELECT 10; +$BODY$; +REVOKE EXECUTE ON FUNCTION get_constant_no_perms() FROM PUBLIC; +\set WAIT_ON_JOB 0 +\set IMMEDIATELY_SET_UNTIL 1 +\set WAIT_FOR_OTHER_TO_ADVANCE 2 +CREATE OR REPLACE FUNCTION ts_bgw_params_mock_wait_returns_immediately(new_val INTEGER) RETURNS VOID +AS :MODULE_PATHNAME LANGUAGE C VOLATILE; +-- Remove any default jobs, e.g., telemetry +DELETE FROM _timescaledb_config.bgw_job WHERE TRUE; +TRUNCATE _timescaledb_internal.bgw_job_stat; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +CREATE TABLE public.bgw_log( + msg_no INT, + mock_time BIGINT, + application_name TEXT, + msg TEXT +); +CREATE VIEW sorted_bgw_log AS + SELECT msg_no, + mock_time, + application_name, + regexp_replace(regexp_replace(msg, '(Wait until|started at|execution time) [0-9]+(\.[0-9]+)?', '\1 (RANDOM)', 'g'), 'background worker "[^"]+"','connection') AS msg + FROM bgw_log ORDER BY mock_time, application_name COLLATE "C", msg_no; +CREATE TABLE public.bgw_dsm_handle_store( + handle BIGINT +); +INSERT INTO public.bgw_dsm_handle_store VALUES (0); +SELECT ts_bgw_params_create(); + ts_bgw_params_create +---------------------- + +(1 row) + +SELECT * FROM _timescaledb_config.bgw_job; + id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone +----+------------------+-------------------+-------------+-------------+--------------+-------------+-----------+-------+-----------+----------------+---------------+---------------+--------+--------------+------------+---------- +(0 rows) + +SELECT * FROM timescaledb_information.job_stats; + hypertable_schema | hypertable_name | job_id | last_run_started_at | last_successful_finish | last_run_status | job_status | last_run_duration | next_start | total_runs | total_successes | total_failures +-------------------+-----------------+--------+---------------------+------------------------+-----------------+------------+-------------------+------------+------------+-----------------+---------------- +(0 rows) + +SELECT * FROM _timescaledb_catalog.continuous_agg; + mat_hypertable_id | raw_hypertable_id | parent_mat_hypertable_id | user_view_schema | user_view_name | partial_view_schema | partial_view_name | bucket_width | direct_view_schema | direct_view_name | materialized_only | finalized +-------------------+-------------------+--------------------------+------------------+----------------+---------------------+-------------------+--------------+--------------------+------------------+-------------------+----------- +(0 rows) + +-- though user on access node has required GRANTS, this will propagate GRANTS to the connected data nodes +GRANT CREATE ON SCHEMA public TO :ROLE_DEFAULT_PERM_USER; +psql:include/cagg_bgw_common.sql:76: WARNING: no privileges were granted for "public" +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +CREATE TABLE test_continuous_agg_table(time int, data int); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('test_continuous_agg_table', 'time', chunk_time_interval => 10, replication_factor => 2); +\else +SELECT create_hypertable('test_continuous_agg_table', 'time', chunk_time_interval => 10); +psql:include/cagg_bgw_common.sql:82: NOTICE: adding not-null constraint to column "time" + create_hypertable +---------------------------------------- + (1,public,test_continuous_agg_table,t) +(1 row) + +\endif +CREATE OR REPLACE FUNCTION integer_now_test() returns int LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), 0) FROM test_continuous_agg_table $$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ +CREATE OR REPLACE FUNCTION integer_now_test() returns int LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), 0) FROM test_continuous_agg_table $$; +$DIST$); +\endif +SELECT set_integer_now_func('test_continuous_agg_table', 'integer_now_test'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE MATERIALIZED VIEW test_continuous_agg_view + WITH (timescaledb.continuous, timescaledb.materialized_only=true) + AS SELECT time_bucket('2', time), SUM(data) as value + FROM test_continuous_agg_table + GROUP BY 1 WITH NO DATA; +SELECT add_continuous_aggregate_policy('test_continuous_agg_view', NULL, 4::integer, '12 h'::interval); + add_continuous_aggregate_policy +--------------------------------- + 1000 +(1 row) + +SELECT id as raw_table_id FROM _timescaledb_catalog.hypertable WHERE table_name='test_continuous_agg_table' \gset +-- min distance from end should be 1 +SELECT mat_hypertable_id, user_view_schema, user_view_name, bucket_width +FROM _timescaledb_catalog.continuous_agg; + mat_hypertable_id | user_view_schema | user_view_name | bucket_width +-------------------+------------------+--------------------------+-------------- + 2 | public | test_continuous_agg_view | 2 +(1 row) + +SELECT mat_hypertable_id FROM _timescaledb_catalog.continuous_agg \gset +SELECT id AS job_id FROM _timescaledb_config.bgw_job where hypertable_id=:mat_hypertable_id \gset +-- job was created +SELECT * FROM _timescaledb_config.bgw_job where hypertable_id=:mat_hypertable_id; + id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone +------+--------------------------------------------+-------------------+-------------+-------------+--------------+------------------------+-------------------------------------+-------------------+-----------+----------------+---------------+---------------+-----------------------------------------------------------------+------------------------+-------------------------------------------+---------- + 1000 | Refresh Continuous Aggregate Policy [1000] | @ 12 hours | @ 0 | -1 | @ 12 hours | _timescaledb_functions | policy_refresh_continuous_aggregate | default_perm_user | t | f | | 2 | {"end_offset": 4, "start_offset": null, "mat_hypertable_id": 2} | _timescaledb_functions | policy_refresh_continuous_aggregate_check | +(1 row) + +-- create 10 time buckets +INSERT INTO test_continuous_agg_table + SELECT i, i FROM + (SELECT generate_series(0, 10) as i) AS j; +-- no stats +SELECT job_id, next_start, last_finish as until_next, last_run_success, total_runs, total_successes, total_failures, total_crashes + FROM _timescaledb_internal.bgw_job_stat + ORDER BY job_id; + job_id | next_start | until_next | last_run_success | total_runs | total_successes | total_failures | total_crashes +--------+------------+------------+------------------+------------+-----------------+----------------+--------------- +(0 rows) + +-- no data in view +SELECT * FROM test_continuous_agg_view ORDER BY 1; + time_bucket | value +-------------+------- +(0 rows) + +-- run first time +SELECT ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(25); + ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish +------------------------------------------------------------ + +(1 row) + +SELECT * FROM sorted_bgw_log; + msg_no | mock_time | application_name | msg +--------+-----------+--------------------------------------------+-------------------------------------------------------------------------------------------------------- + 0 | 0 | DB Scheduler | [TESTING] Registered new background worker + 1 | 0 | DB Scheduler | [TESTING] Wait until (RANDOM), started at (RANDOM) + 0 | 0 | Refresh Continuous Aggregate Policy [1000] | refreshing continuous aggregate "test_continuous_agg_view" in window [ -2147483648, 6 ] + 1 | 0 | Refresh Continuous Aggregate Policy [1000] | deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_2" + 2 | 0 | Refresh Continuous Aggregate Policy [1000] | inserted 3 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_2" + 3 | 0 | Refresh Continuous Aggregate Policy [1000] | job 1000 (Refresh Continuous Aggregate Policy [1000]) exiting with success: execution time (RANDOM) ms +(6 rows) + +SELECT * FROM _timescaledb_config.bgw_job where id=:job_id; + id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone +------+--------------------------------------------+-------------------+-------------+-------------+--------------+------------------------+-------------------------------------+-------------------+-----------+----------------+---------------+---------------+-----------------------------------------------------------------+------------------------+-------------------------------------------+---------- + 1000 | Refresh Continuous Aggregate Policy [1000] | @ 12 hours | @ 0 | -1 | @ 12 hours | _timescaledb_functions | policy_refresh_continuous_aggregate | default_perm_user | t | f | | 2 | {"end_offset": 4, "start_offset": null, "mat_hypertable_id": 2} | _timescaledb_functions | policy_refresh_continuous_aggregate_check | +(1 row) + +-- job ran once, successfully +SELECT job_id, next_start-last_finish as until_next, last_run_success, total_runs, total_successes, total_failures, total_crashes + FROM _timescaledb_internal.bgw_job_stat + where job_id=:job_id; + job_id | until_next | last_run_success | total_runs | total_successes | total_failures | total_crashes +--------+------------+------------------+------------+-----------------+----------------+--------------- + 1000 | @ 12 hours | t | 1 | 1 | 0 | 0 +(1 row) + +--clear log for next run of scheduler. +TRUNCATE public.bgw_log; +CREATE FUNCTION wait_for_timer_to_run(started_at INTEGER, spins INTEGER=:TEST_SPINWAIT_ITERS) RETURNS BOOLEAN LANGUAGE PLPGSQL AS +$BODY$ +DECLARE + num_runs INTEGER; + message TEXT; +BEGIN + select format('[TESTING] Wait until %%, started at %s', started_at) into message; + FOR i in 1..spins + LOOP + SELECT COUNT(*) from bgw_log where msg LIKE message INTO num_runs; + if (num_runs > 0) THEN + RETURN true; + ELSE + PERFORM pg_sleep(0.1); + END IF; + END LOOP; + RETURN false; +END +$BODY$; +CREATE FUNCTION wait_for_job_to_run(job_param_id INTEGER, expected_runs INTEGER, spins INTEGER=:TEST_SPINWAIT_ITERS) RETURNS BOOLEAN LANGUAGE PLPGSQL AS +$BODY$ +DECLARE + num_runs INTEGER; +BEGIN + FOR i in 1..spins + LOOP + SELECT total_successes FROM _timescaledb_internal.bgw_job_stat WHERE job_id=job_param_id INTO num_runs; + if (num_runs = expected_runs) THEN + RETURN true; + ELSEIF (num_runs > expected_runs) THEN + RAISE 'num_runs > expected'; + ELSE + PERFORM pg_sleep(0.1); + END IF; + END LOOP; + RETURN false; +END +$BODY$; +--make sure there is 1 job to start with +SELECT wait_for_job_to_run(:job_id, 1); + wait_for_job_to_run +--------------------- + t +(1 row) + +SELECT ts_bgw_params_mock_wait_returns_immediately(:WAIT_FOR_OTHER_TO_ADVANCE); + ts_bgw_params_mock_wait_returns_immediately +--------------------------------------------- + +(1 row) + +--start the scheduler on 0 time +SELECT ts_bgw_params_reset_time(0, true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +SELECT ts_bgw_db_scheduler_test_run(extract(epoch from interval '24 hour')::int * 1000, 0); + ts_bgw_db_scheduler_test_run +------------------------------ + +(1 row) + +SELECT wait_for_timer_to_run(0); + wait_for_timer_to_run +----------------------- + t +(1 row) + +--advance to 12:00 so that it runs one more time; now we know the +--scheduler has loaded up the job with the old schedule_interval +SELECT ts_bgw_params_reset_time(extract(epoch from interval '12 hour')::bigint * 1000000, true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +SELECT wait_for_job_to_run(:job_id, 2); + wait_for_job_to_run +--------------------- + t +(1 row) + +--advance clock 1us to make the scheduler realize the job is done +SELECT ts_bgw_params_reset_time((extract(epoch from interval '12 hour')::bigint * 1000000)+1, true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +--alter the refresh interval and check if next_start is altered +SELECT alter_job(:job_id, schedule_interval => '1m', retry_period => '1m'); + alter_job +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (1000,"@ 1 min","@ 0",-1,"@ 1 min",t,"{""end_offset"": 4, ""start_offset"": null, ""mat_hypertable_id"": 2}","Sat Jan 01 04:01:00 2000 PST",_timescaledb_functions.policy_refresh_continuous_aggregate_check,f,,) +(1 row) + +SELECT job_id, next_start - last_finish as until_next, total_runs +FROM _timescaledb_internal.bgw_job_stat +WHERE job_id=:job_id;; + job_id | until_next | total_runs +--------+------------+------------ + 1000 | @ 1 min | 2 +(1 row) + +--advance to 12:02, job should have run at 12:01 +SELECT ts_bgw_params_reset_time((extract(epoch from interval '12 hour')::bigint * 1000000)+(extract(epoch from interval '2 minute')::bigint * 1000000), true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +SELECT wait_for_job_to_run(:job_id, 3); + wait_for_job_to_run +--------------------- + t +(1 row) + +--next run in 1 minute +SELECT job_id, next_start-last_finish as until_next, total_runs +FROM _timescaledb_internal.bgw_job_stat +WHERE job_id=:job_id; + job_id | until_next | total_runs +--------+------------+------------ + 1000 | @ 1 min | 3 +(1 row) + +--change next run to be after 30s instead +SELECT (next_start - '30s'::interval) AS "NEW_NEXT_START" +FROM _timescaledb_internal.bgw_job_stat +WHERE job_id=:job_id \gset +SELECT alter_job(:job_id, next_start => :'NEW_NEXT_START'); + alter_job +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (1000,"@ 1 min","@ 0",-1,"@ 1 min",t,"{""end_offset"": 4, ""start_offset"": null, ""mat_hypertable_id"": 2}","Sat Jan 01 04:02:30 2000 PST",_timescaledb_functions.policy_refresh_continuous_aggregate_check,f,,) +(1 row) + +SELECT ts_bgw_params_reset_time((extract(epoch from interval '12 hour')::bigint * 1000000)+(extract(epoch from interval '2 minute 30 seconds')::bigint * 1000000), true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +SELECT wait_for_job_to_run(:job_id, 4); + wait_for_job_to_run +--------------------- + t +(1 row) + +--advance clock to quit scheduler +SELECT ts_bgw_params_reset_time(extract(epoch from interval '25 hour')::bigint * 1000000, true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +select ts_bgw_db_scheduler_test_wait_for_scheduler_finish(); + ts_bgw_db_scheduler_test_wait_for_scheduler_finish +---------------------------------------------------- + +(1 row) + +SELECT ts_bgw_params_mock_wait_returns_immediately(:WAIT_ON_JOB); + ts_bgw_params_mock_wait_returns_immediately +--------------------------------------------- + +(1 row) + +TRUNCATE public.bgw_log; +-- data before 8 +SELECT * FROM test_continuous_agg_view ORDER BY 1; + time_bucket | value +-------------+------- + 0 | 1 + 2 | 5 + 4 | 9 +(3 rows) + +-- invalidations test by running job multiple times +SELECT ts_bgw_params_reset_time(); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +DROP MATERIALIZED VIEW test_continuous_agg_view; +psql:include/cagg_bgw_common.sql:234: NOTICE: drop cascades to table _timescaledb_internal._hyper_2_3_chunk +CREATE MATERIALIZED VIEW test_continuous_agg_view + WITH (timescaledb.continuous, + timescaledb.materialized_only=true) + AS SELECT time_bucket('2', time), SUM(data) as value + FROM test_continuous_agg_table + GROUP BY 1 WITH NO DATA; +SELECT add_continuous_aggregate_policy('test_continuous_agg_view', 100::integer, -2::integer, '12 h'::interval); + add_continuous_aggregate_policy +--------------------------------- + 1001 +(1 row) + +SELECT mat_hypertable_id FROM _timescaledb_catalog.continuous_agg \gset +SELECT id AS job_id FROM _timescaledb_config.bgw_job WHERE hypertable_id=:mat_hypertable_id \gset +SELECT ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(25); + ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish +------------------------------------------------------------ + +(1 row) + +SELECT * FROM sorted_bgw_log; + msg_no | mock_time | application_name | msg +--------+-----------+--------------------------------------------+-------------------------------------------------------------------------------------------------------- + 0 | 0 | DB Scheduler | [TESTING] Registered new background worker + 1 | 0 | DB Scheduler | [TESTING] Wait until (RANDOM), started at (RANDOM) + 0 | 0 | Refresh Continuous Aggregate Policy [1001] | refreshing continuous aggregate "test_continuous_agg_view" in window [ -90, 12 ] + 1 | 0 | Refresh Continuous Aggregate Policy [1001] | deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_3" + 2 | 0 | Refresh Continuous Aggregate Policy [1001] | inserted 6 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_3" + 3 | 0 | Refresh Continuous Aggregate Policy [1001] | job 1001 (Refresh Continuous Aggregate Policy [1001]) exiting with success: execution time (RANDOM) ms +(6 rows) + +-- job ran once, successfully +SELECT job_id, last_finish - next_start as until_next, last_run_success, total_runs, total_successes, total_failures, total_crashes + FROM _timescaledb_internal.bgw_job_stat + where job_id=:job_id; + job_id | until_next | last_run_success | total_runs | total_successes | total_failures | total_crashes +--------+----------------+------------------+------------+-----------------+----------------+--------------- + 1001 | @ 12 hours ago | t | 1 | 1 | 0 | 0 +(1 row) + +-- should have refreshed everything we have so far +SELECT * FROM test_continuous_agg_view ORDER BY 1; + time_bucket | value +-------------+------- + 0 | 1 + 2 | 5 + 4 | 9 + 6 | 13 + 8 | 17 + 10 | 10 +(6 rows) + +-- invalidate some data +UPDATE test_continuous_agg_table +SET data = 11 WHERE time = 6; +--advance time by 12h so that job runs one more time +SELECT ts_bgw_params_reset_time(extract(epoch from interval '12 hour')::bigint * 1000000, true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +SELECT ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(25, 25); + ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish +------------------------------------------------------------ + +(1 row) + +SELECT * FROM sorted_bgw_log; + msg_no | mock_time | application_name | msg +--------+-------------+--------------------------------------------+-------------------------------------------------------------------------------------------------------- + 0 | 0 | DB Scheduler | [TESTING] Registered new background worker + 1 | 0 | DB Scheduler | [TESTING] Wait until (RANDOM), started at (RANDOM) + 0 | 0 | Refresh Continuous Aggregate Policy [1001] | refreshing continuous aggregate "test_continuous_agg_view" in window [ -90, 12 ] + 1 | 0 | Refresh Continuous Aggregate Policy [1001] | deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_3" + 2 | 0 | Refresh Continuous Aggregate Policy [1001] | inserted 6 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_3" + 3 | 0 | Refresh Continuous Aggregate Policy [1001] | job 1001 (Refresh Continuous Aggregate Policy [1001]) exiting with success: execution time (RANDOM) ms + 0 | 43200000000 | DB Scheduler | [TESTING] Registered new background worker + 1 | 43200000000 | DB Scheduler | [TESTING] Wait until (RANDOM), started at (RANDOM) + 0 | 43200000000 | Refresh Continuous Aggregate Policy [1001] | refreshing continuous aggregate "test_continuous_agg_view" in window [ -90, 12 ] + 1 | 43200000000 | Refresh Continuous Aggregate Policy [1001] | deleted 1 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_3" + 2 | 43200000000 | Refresh Continuous Aggregate Policy [1001] | inserted 1 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_3" + 3 | 43200000000 | Refresh Continuous Aggregate Policy [1001] | job 1001 (Refresh Continuous Aggregate Policy [1001]) exiting with success: execution time (RANDOM) ms +(12 rows) + +SELECT job_id, next_start - last_finish as until_next, last_run_success, total_runs, total_successes, total_failures, total_crashes + FROM _timescaledb_internal.bgw_job_stat + where job_id=:job_id; + job_id | until_next | last_run_success | total_runs | total_successes | total_failures | total_crashes +--------+------------+------------------+------------+-----------------+----------------+--------------- + 1001 | @ 12 hours | t | 2 | 2 | 0 | 0 +(1 row) + +-- should have updated data for time=6 +SELECT * FROM test_continuous_agg_view ORDER BY 1; + time_bucket | value +-------------+------- + 0 | 1 + 2 | 5 + 4 | 9 + 6 | 18 + 8 | 17 + 10 | 10 +(6 rows) + +\x on +--check the information views -- +select view_name, view_owner, materialization_hypertable_schema, materialization_hypertable_name +from timescaledb_information.continuous_aggregates +where view_name::text like '%test_continuous_agg_view'; +-[ RECORD 1 ]---------------------+--------------------------- +view_name | test_continuous_agg_view +view_owner | default_perm_user +materialization_hypertable_schema | _timescaledb_internal +materialization_hypertable_name | _materialized_hypertable_3 + +select view_name, view_definition from timescaledb_information.continuous_aggregates +where view_name::text like '%test_continuous_agg_view'; +-[ RECORD 1 ]---+----------------------------------------------- +view_name | test_continuous_agg_view +view_definition | SELECT time_bucket(2, "time") AS time_bucket,+ + | sum(data) AS value + + | FROM test_continuous_agg_table + + | GROUP BY (time_bucket(2, "time")); + +select job_status, last_run_duration +from timescaledb_information.job_stats ps, timescaledb_information.continuous_aggregates cagg +where cagg.view_name::text like '%test_continuous_agg_view' +and cagg.materialization_hypertable_name = ps.hypertable_name; +-[ RECORD 1 ]-----+---------- +job_status | Scheduled +last_run_duration | + +\x off +DROP MATERIALIZED VIEW test_continuous_agg_view; +psql:include/cagg_bgw_common.sql:294: NOTICE: drop cascades to table _timescaledb_internal._hyper_3_4_chunk +--create a view with a function that it has no permission to execute +CREATE MATERIALIZED VIEW test_continuous_agg_view + WITH (timescaledb.continuous, + timescaledb.materialized_only=true) + AS SELECT time_bucket('2', time), SUM(data) as value, get_constant_no_perms() + FROM test_continuous_agg_table + GROUP BY 1 WITH NO DATA; +SELECT add_continuous_aggregate_policy('test_continuous_agg_view', 100::integer, -2::integer, '12 h'::interval); + add_continuous_aggregate_policy +--------------------------------- + 1002 +(1 row) + +SELECT id AS job_id FROM _timescaledb_config.bgw_job ORDER BY id desc limit 1 \gset +SELECT ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(25); + ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish +------------------------------------------------------------ + +(1 row) + +-- job fails +SELECT job_id, last_run_success, total_runs, total_successes, total_failures, total_crashes + FROM _timescaledb_internal.bgw_job_stat + where job_id=:job_id; + job_id | last_run_success | total_runs | total_successes | total_failures | total_crashes +--------+------------------+------------+-----------------+----------------+--------------- + 1002 | f | 1 | 0 | 1 | 0 +(1 row) + +DROP MATERIALIZED VIEW test_continuous_agg_view; +--advance clock to quit scheduler +SELECT ts_bgw_params_reset_time(extract(epoch from interval '25 hour')::bigint * 1000000, true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +select ts_bgw_db_scheduler_test_wait_for_scheduler_finish(); + ts_bgw_db_scheduler_test_wait_for_scheduler_finish +---------------------------------------------------- + +(1 row) + +SELECT ts_bgw_params_mock_wait_returns_immediately(:WAIT_ON_JOB); + ts_bgw_params_mock_wait_returns_immediately +--------------------------------------------- + +(1 row) + +--clear log for next run of the scheduler +TRUNCATE public.bgw_log; +SELECT ts_bgw_params_reset_time(); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +-- +-- Test creating continuous aggregate with a user that is the non-owner of the raw table +-- +CREATE TABLE test_continuous_agg_table_w_grant(time int, data int); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('test_continuous_agg_table_w_grant', 'time', chunk_time_interval => 10, replication_factor => 2); +\else +SELECT create_hypertable('test_continuous_agg_table_w_grant', 'time', chunk_time_interval => 10); +psql:include/cagg_bgw_common.sql:332: NOTICE: adding not-null constraint to column "time" + create_hypertable +------------------------------------------------ + (5,public,test_continuous_agg_table_w_grant,t) +(1 row) + +\endif +CREATE OR REPLACE FUNCTION integer_now_test1() returns int LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), 0) FROM test_continuous_agg_table_w_grant $$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ +CREATE OR REPLACE FUNCTION integer_now_test1() returns int LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), 0) FROM test_continuous_agg_table_w_grant $$; +$DIST$); +\endif +SELECT set_integer_now_func('test_continuous_agg_table_w_grant', 'integer_now_test1'); + set_integer_now_func +---------------------- + +(1 row) + +GRANT SELECT, TRIGGER ON test_continuous_agg_table_w_grant TO public; +INSERT INTO test_continuous_agg_table_w_grant + SELECT 1 , 1; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2 +-- make sure view can be created +CREATE MATERIALIZED VIEW test_continuous_agg_view_user_2 + WITH ( timescaledb.continuous, + timescaledb.materialized_only=true) + AS SELECT time_bucket('2', time), SUM(data) as value + FROM test_continuous_agg_table_w_grant + GROUP BY 1 WITH NO DATA; +SELECT add_continuous_aggregate_policy('test_continuous_agg_view_user_2', NULL, -2::integer, '12 h'::interval); + add_continuous_aggregate_policy +--------------------------------- + 1003 +(1 row) + +SELECT id AS job_id FROM _timescaledb_config.bgw_job ORDER BY id desc limit 1 \gset +SELECT ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(25); + ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish +------------------------------------------------------------ + +(1 row) + +SELECT id, owner FROM _timescaledb_config.bgw_job WHERE id = :job_id ; + id | owner +------+--------------------- + 1003 | default_perm_user_2 +(1 row) + +SELECT job_id, next_start - last_finish as until_next, last_run_success, total_runs, total_successes, total_failures, total_crashes + FROM _timescaledb_internal.bgw_job_stat + where job_id=:job_id; + job_id | until_next | last_run_success | total_runs | total_successes | total_failures | total_crashes +--------+------------+------------------+------------+-----------------+----------------+--------------- + 1003 | @ 12 hours | t | 1 | 1 | 0 | 0 +(1 row) + +--view is populated +SELECT * FROM test_continuous_agg_view_user_2 ORDER BY 1; + time_bucket | value +-------------+------- + 0 | 1 +(1 row) + +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +--revoke permissions from the continuous agg view owner to select from raw table +--no further updates to cont agg should happen +REVOKE SELECT ON test_continuous_agg_table_w_grant FROM public; +--add new data to table +INSERT INTO test_continuous_agg_table_w_grant VALUES(5,1); +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2 +--advance time by 12h so that job tries to run one more time +SELECT ts_bgw_params_reset_time(extract(epoch from interval '12 hour')::bigint * 1000000, true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +SELECT ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(25, 25); + ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish +------------------------------------------------------------ + +(1 row) + +--should show a failing execution because no longer has permissions (due to lack of permission on partial view owner's part) +SELECT job_id, last_run_success, total_runs, total_successes, total_failures, total_crashes + FROM _timescaledb_internal.bgw_job_stat + where job_id=:job_id; + job_id | last_run_success | total_runs | total_successes | total_failures | total_crashes +--------+------------------+------------+-----------------+----------------+--------------- + 1003 | f | 2 | 1 | 1 | 0 +(1 row) + +--view was NOT updated; but the old stuff is still there +SELECT * FROM test_continuous_agg_view_user_2; + time_bucket | value +-------------+------- + 0 | 1 +(1 row) + +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +SELECT * from sorted_bgw_log; + msg_no | mock_time | application_name | msg +--------+-------------+--------------------------------------------+-------------------------------------------------------------------------------------------------------- + 0 | 0 | DB Scheduler | [TESTING] Registered new background worker + 1 | 0 | DB Scheduler | [TESTING] Wait until (RANDOM), started at (RANDOM) + 0 | 0 | Refresh Continuous Aggregate Policy [1003] | refreshing continuous aggregate "test_continuous_agg_view_user_2" in window [ -2147483648, 2 ] + 1 | 0 | Refresh Continuous Aggregate Policy [1003] | deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_6" + 2 | 0 | Refresh Continuous Aggregate Policy [1003] | inserted 1 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_6" + 3 | 0 | Refresh Continuous Aggregate Policy [1003] | job 1003 (Refresh Continuous Aggregate Policy [1003]) exiting with success: execution time (RANDOM) ms + 0 | 43200000000 | DB Scheduler | [TESTING] Registered new background worker + 1 | 43200000000 | DB Scheduler | [TESTING] Wait until (RANDOM), started at (RANDOM) + 0 | 43200000000 | Refresh Continuous Aggregate Policy [1003] | job 1003 threw an error + 1 | 43200000000 | Refresh Continuous Aggregate Policy [1003] | permission denied for table test_continuous_agg_table_w_grant +(10 rows) + +-- Count the number of continuous aggregate policies +SELECT count(*) FROM _timescaledb_config.bgw_job + WHERE proc_schema = '_timescaledb_functions' + AND proc_name = 'policy_refresh_continuous_aggregate'; + count +------- + 1 +(1 row) + diff --git a/tsl/test/expected/cagg_ddl.out b/tsl/test/expected/cagg_ddl-13.out similarity index 96% rename from tsl/test/expected/cagg_ddl.out rename to tsl/test/expected/cagg_ddl-13.out index 5a2df6d4e8f..b9514c3eb29 100644 --- a/tsl/test/expected/cagg_ddl.out +++ b/tsl/test/expected/cagg_ddl-13.out @@ -37,6 +37,11 @@ SELECT table_name FROM create_hypertable('conditions', 'timec'); \endif -- schema tests \c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER +-- drop if the tablespace1 and/or tablespace2 exists +SET client_min_messages TO error; +DROP TABLESPACE IF EXISTS tablespace1; +DROP TABLESPACE IF EXISTS tablespace2; +RESET client_min_messages; CREATE TABLESPACE tablespace1 OWNER :ROLE_DEFAULT_PERM_USER LOCATION :TEST_TABLESPACE1_PATH; CREATE TABLESPACE tablespace2 OWNER :ROLE_DEFAULT_PERM_USER LOCATION :TEST_TABLESPACE2_PATH; CREATE SCHEMA rename_schema; @@ -228,7 +233,7 @@ SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name, -- drop_chunks tests DROP TABLE conditions CASCADE; DROP TABLE foo CASCADE; -psql:include/cagg_ddl_common.sql:161: NOTICE: drop cascades to 2 other objects +psql:include/cagg_ddl_common.sql:166: NOTICE: drop cascades to 2 other objects CREATE TABLE drop_chunks_table(time BIGINT NOT NULL, data INTEGER); \if :IS_DISTRIBUTED SELECT hypertable_id AS drop_chunks_table_id @@ -294,7 +299,7 @@ SELECT * FROM drop_chunks_view ORDER BY 1; SELECT drop_chunks(:'drop_chunks_mat_table', newer_than => -20, verbose => true); -psql:include/cagg_ddl_common.sql:213: ERROR: operation not supported on materialized hypertable +psql:include/cagg_ddl_common.sql:218: ERROR: operation not supported on materialized hypertable \set ON_ERROR_STOP 1 SELECT count(c) FROM show_chunks('drop_chunks_table') AS c; count @@ -318,8 +323,8 @@ SELECT * FROM drop_chunks_view ORDER BY 1; -- drop chunks when the chunksize and time_bucket aren't aligned DROP TABLE drop_chunks_table CASCADE; -psql:include/cagg_ddl_common.sql:222: NOTICE: drop cascades to 2 other objects -psql:include/cagg_ddl_common.sql:222: NOTICE: drop cascades to table _timescaledb_internal._hyper_5_4_chunk +psql:include/cagg_ddl_common.sql:227: NOTICE: drop cascades to 2 other objects +psql:include/cagg_ddl_common.sql:227: NOTICE: drop cascades to table _timescaledb_internal._hyper_5_4_chunk CREATE TABLE drop_chunks_table_u(time BIGINT NOT NULL, data INTEGER); \if :IS_DISTRIBUTED SELECT hypertable_id AS drop_chunks_table_u_id @@ -386,7 +391,7 @@ TRUNCATE drop_chunks_table_u; \set ON_ERROR_STOP 0 -- Can't truncate materialized hypertables directly TRUNCATE :drop_chunks_mat_table_u; -psql:include/cagg_ddl_common.sql:271: ERROR: cannot TRUNCATE a hypertable underlying a continuous aggregate +psql:include/cagg_ddl_common.sql:276: ERROR: cannot TRUNCATE a hypertable underlying a continuous aggregate \set ON_ERROR_STOP 1 -- Check that we don't interfere with TRUNCATE of normal table and -- partitioned table @@ -413,31 +418,31 @@ SELECT * FROM truncate_partitioned; \set ON_ERROR_STOP 0 -- test a variety of ALTER TABLE statements ALTER TABLE :drop_chunks_mat_table_u RENAME time_bucket TO bad_name; -psql:include/cagg_ddl_common.sql:291: ERROR: renaming columns on materialization tables is not supported +psql:include/cagg_ddl_common.sql:296: ERROR: renaming columns on materialization tables is not supported ALTER TABLE :drop_chunks_mat_table_u ADD UNIQUE(time_bucket); -psql:include/cagg_ddl_common.sql:292: ERROR: operation not supported on materialization tables +psql:include/cagg_ddl_common.sql:297: ERROR: operation not supported on materialization tables ALTER TABLE :drop_chunks_mat_table_u SET UNLOGGED; -psql:include/cagg_ddl_common.sql:293: ERROR: operation not supported on materialization tables +psql:include/cagg_ddl_common.sql:298: ERROR: operation not supported on materialization tables ALTER TABLE :drop_chunks_mat_table_u ENABLE ROW LEVEL SECURITY; -psql:include/cagg_ddl_common.sql:294: ERROR: operation not supported on materialization tables +psql:include/cagg_ddl_common.sql:299: ERROR: operation not supported on materialization tables ALTER TABLE :drop_chunks_mat_table_u ADD COLUMN fizzle INTEGER; -psql:include/cagg_ddl_common.sql:295: ERROR: operation not supported on materialization tables +psql:include/cagg_ddl_common.sql:300: ERROR: operation not supported on materialization tables ALTER TABLE :drop_chunks_mat_table_u DROP COLUMN time_bucket; -psql:include/cagg_ddl_common.sql:296: ERROR: operation not supported on materialization tables +psql:include/cagg_ddl_common.sql:301: ERROR: operation not supported on materialization tables ALTER TABLE :drop_chunks_mat_table_u ALTER COLUMN time_bucket DROP NOT NULL; -psql:include/cagg_ddl_common.sql:297: ERROR: operation not supported on materialization tables +psql:include/cagg_ddl_common.sql:302: ERROR: operation not supported on materialization tables ALTER TABLE :drop_chunks_mat_table_u ALTER COLUMN time_bucket SET DEFAULT 1; -psql:include/cagg_ddl_common.sql:298: ERROR: operation not supported on materialization tables +psql:include/cagg_ddl_common.sql:303: ERROR: operation not supported on materialization tables ALTER TABLE :drop_chunks_mat_table_u ALTER COLUMN time_bucket SET STORAGE EXTERNAL; -psql:include/cagg_ddl_common.sql:299: ERROR: operation not supported on materialization tables +psql:include/cagg_ddl_common.sql:304: ERROR: operation not supported on materialization tables ALTER TABLE :drop_chunks_mat_table_u DISABLE TRIGGER ALL; -psql:include/cagg_ddl_common.sql:300: ERROR: operation not supported on materialization tables +psql:include/cagg_ddl_common.sql:305: ERROR: operation not supported on materialization tables ALTER TABLE :drop_chunks_mat_table_u SET TABLESPACE foo; -psql:include/cagg_ddl_common.sql:301: ERROR: operation not supported on materialization tables +psql:include/cagg_ddl_common.sql:306: ERROR: operation not supported on materialization tables ALTER TABLE :drop_chunks_mat_table_u NOT OF; -psql:include/cagg_ddl_common.sql:302: ERROR: operation not supported on materialization tables +psql:include/cagg_ddl_common.sql:307: ERROR: operation not supported on materialization tables ALTER TABLE :drop_chunks_mat_table_u OWNER TO CURRENT_USER; -psql:include/cagg_ddl_common.sql:303: ERROR: operation not supported on materialization tables +psql:include/cagg_ddl_common.sql:308: ERROR: operation not supported on materialization tables \set ON_ERROR_STOP 1 ALTER TABLE :drop_chunks_mat_table_u SET SCHEMA public; ALTER TABLE :drop_chunks_mat_table_u_name RENAME TO new_name; @@ -473,7 +478,7 @@ CREATE MATERIALIZED VIEW new_name_view AS SELECT time_bucket('6', time_bucket), COUNT("count") FROM new_name GROUP BY 1 WITH NO DATA; -psql:include/cagg_ddl_common.sql:326: ERROR: hypertable is a continuous aggregate materialization table +psql:include/cagg_ddl_common.sql:331: ERROR: hypertable is a continuous aggregate materialization table \set ON_ERROR_STOP 1 CREATE TABLE metrics(time timestamptz NOT NULL, device_id int, v1 float, v2 float); \if :IS_DISTRIBUTED @@ -515,10 +520,10 @@ SELECT * FROM cagg_expr ORDER BY time LIMIT 5; --test materialization of invalidation before drop DROP TABLE IF EXISTS drop_chunks_table CASCADE; -psql:include/cagg_ddl_common.sql:358: NOTICE: table "drop_chunks_table" does not exist, skipping +psql:include/cagg_ddl_common.sql:363: NOTICE: table "drop_chunks_table" does not exist, skipping DROP TABLE IF EXISTS drop_chunks_table_u CASCADE; -psql:include/cagg_ddl_common.sql:359: NOTICE: drop cascades to 2 other objects -psql:include/cagg_ddl_common.sql:359: NOTICE: drop cascades to table _timescaledb_internal._hyper_7_9_chunk +psql:include/cagg_ddl_common.sql:364: NOTICE: drop cascades to 2 other objects +psql:include/cagg_ddl_common.sql:364: NOTICE: drop cascades to table _timescaledb_internal._hyper_7_9_chunk CREATE TABLE drop_chunks_table(time BIGINT NOT NULL, data INTEGER); \if :IS_DISTRIBUTED SELECT hypertable_id AS drop_chunks_table_nid @@ -714,7 +719,7 @@ SELECT format('%I.%I', schema_name, table_name) AS drop_chunks_mat_tablen, SELECT drop_chunks('drop_chunks_view', newer_than => -20, verbose => true); -psql:include/cagg_ddl_common.sql:454: INFO: dropping chunk _timescaledb_internal._hyper_11_17_chunk +psql:include/cagg_ddl_common.sql:459: INFO: dropping chunk _timescaledb_internal._hyper_11_17_chunk drop_chunks ------------------------------------------ _timescaledb_internal._hyper_11_17_chunk @@ -735,7 +740,7 @@ WHERE hypertable_name = :'drop_chunks_mat_table_name' ORDER BY range_start_integ \set ON_ERROR_STOP 0 \set VERBOSITY default SELECT drop_chunks(:'drop_chunks_mat_tablen', older_than => 60); -psql:include/cagg_ddl_common.sql:466: ERROR: operation not supported on materialized hypertable +psql:include/cagg_ddl_common.sql:471: ERROR: operation not supported on materialized hypertable DETAIL: Hypertable "_materialized_hypertable_11" is a materialized hypertable. HINT: Try the operation on the continuous aggregate instead. \set VERBOSITY terse @@ -988,9 +993,9 @@ SELECT user_view, (2 rows) DROP MATERIALIZED VIEW whatever_view_1; -psql:include/cagg_ddl_common.sql:644: NOTICE: drop cascades to table _timescaledb_internal._hyper_13_24_chunk +psql:include/cagg_ddl_common.sql:649: NOTICE: drop cascades to table _timescaledb_internal._hyper_13_24_chunk DROP MATERIALIZED VIEW whatever_view_2; -psql:include/cagg_ddl_common.sql:645: NOTICE: drop cascades to table _timescaledb_internal._hyper_14_25_chunk +psql:include/cagg_ddl_common.sql:650: NOTICE: drop cascades to table _timescaledb_internal._hyper_14_25_chunk -- test bucket width expressions on integer hypertables CREATE TABLE metrics_int2 ( time int2 NOT NULL, @@ -1101,39 +1106,39 @@ CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.ma SELECT time_bucket(1::smallint, time) FROM metrics_int2 GROUP BY 1; -psql:include/cagg_ddl_common.sql:750: NOTICE: continuous aggregate "width_expr" is already up-to-date +psql:include/cagg_ddl_common.sql:755: NOTICE: continuous aggregate "width_expr" is already up-to-date DROP MATERIALIZED VIEW width_expr; CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS SELECT time_bucket(1::smallint + 2::smallint, time) FROM metrics_int2 GROUP BY 1; -psql:include/cagg_ddl_common.sql:757: NOTICE: continuous aggregate "width_expr" is already up-to-date +psql:include/cagg_ddl_common.sql:762: NOTICE: continuous aggregate "width_expr" is already up-to-date DROP MATERIALIZED VIEW width_expr; -- width expression for int4 hypertables CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS SELECT time_bucket(1, time) FROM metrics_int4 GROUP BY 1; -psql:include/cagg_ddl_common.sql:765: NOTICE: continuous aggregate "width_expr" is already up-to-date +psql:include/cagg_ddl_common.sql:770: NOTICE: continuous aggregate "width_expr" is already up-to-date DROP MATERIALIZED VIEW width_expr; CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS SELECT time_bucket(1 + 2, time) FROM metrics_int4 GROUP BY 1; -psql:include/cagg_ddl_common.sql:772: NOTICE: continuous aggregate "width_expr" is already up-to-date +psql:include/cagg_ddl_common.sql:777: NOTICE: continuous aggregate "width_expr" is already up-to-date DROP MATERIALIZED VIEW width_expr; -- width expression for int8 hypertables CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS SELECT time_bucket(1, time) FROM metrics_int8 GROUP BY 1; -psql:include/cagg_ddl_common.sql:780: NOTICE: continuous aggregate "width_expr" is already up-to-date +psql:include/cagg_ddl_common.sql:785: NOTICE: continuous aggregate "width_expr" is already up-to-date DROP MATERIALIZED VIEW width_expr; CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS SELECT time_bucket(1 + 2, time) FROM metrics_int8 GROUP BY 1; -psql:include/cagg_ddl_common.sql:787: NOTICE: continuous aggregate "width_expr" is already up-to-date +psql:include/cagg_ddl_common.sql:792: NOTICE: continuous aggregate "width_expr" is already up-to-date DROP MATERIALIZED VIEW width_expr; \set ON_ERROR_STOP 0 -- non-immutable expresions should be rejected @@ -1141,17 +1146,17 @@ CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.ma SELECT time_bucket(extract(year FROM now())::smallint, time) FROM metrics_int2 GROUP BY 1; -psql:include/cagg_ddl_common.sql:796: ERROR: only immutable expressions allowed in time bucket function +psql:include/cagg_ddl_common.sql:801: ERROR: only immutable expressions allowed in time bucket function CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS SELECT time_bucket(extract(year FROM now())::int, time) FROM metrics_int4 GROUP BY 1; -psql:include/cagg_ddl_common.sql:801: ERROR: only immutable expressions allowed in time bucket function +psql:include/cagg_ddl_common.sql:806: ERROR: only immutable expressions allowed in time bucket function CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS SELECT time_bucket(extract(year FROM now())::int, time) FROM metrics_int8 GROUP BY 1; -psql:include/cagg_ddl_common.sql:806: ERROR: only immutable expressions allowed in time bucket function +psql:include/cagg_ddl_common.sql:811: ERROR: only immutable expressions allowed in time bucket function \set ON_ERROR_STOP 1 -- Test various ALTER MATERIALIZED VIEW statements. SET ROLE :ROLE_DEFAULT_PERM_USER; @@ -1178,7 +1183,7 @@ tablespace | -- we test that the normal checks are done when changing the owner. \set ON_ERROR_STOP 0 ALTER MATERIALIZED VIEW owner_check OWNER TO :ROLE_1; -psql:include/cagg_ddl_common.sql:826: ERROR: must be member of role "test_role_1" +psql:include/cagg_ddl_common.sql:831: ERROR: must be member of role "test_role_1" \set ON_ERROR_STOP 1 -- Superuser can always change owner SET ROLE :ROLE_CLUSTER_SUPERUSER; @@ -1242,9 +1247,9 @@ AS SELECT time_bucket(7, time_int) as bucket, SUM(value), COUNT(value) FROM conditionsnm GROUP BY bucket WITH DATA; -psql:include/cagg_ddl_common.sql:874: NOTICE: refreshing continuous aggregate "conditionsnm_4" +psql:include/cagg_ddl_common.sql:879: NOTICE: refreshing continuous aggregate "conditionsnm_4" DROP materialized view conditionsnm_4; -psql:include/cagg_ddl_common.sql:876: NOTICE: drop cascades to table _timescaledb_internal._hyper_26_37_chunk +psql:include/cagg_ddl_common.sql:881: NOTICE: drop cascades to table _timescaledb_internal._hyper_26_37_chunk -- Case 2: DROP CASCADE should have similar behaviour as DROP CREATE MATERIALIZED VIEW conditionsnm_4 WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) @@ -1252,9 +1257,9 @@ AS SELECT time_bucket(7, time_int) as bucket, SUM(value), COUNT(value) FROM conditionsnm GROUP BY bucket WITH DATA; -psql:include/cagg_ddl_common.sql:884: NOTICE: refreshing continuous aggregate "conditionsnm_4" +psql:include/cagg_ddl_common.sql:889: NOTICE: refreshing continuous aggregate "conditionsnm_4" DROP materialized view conditionsnm_4 CASCADE; -psql:include/cagg_ddl_common.sql:886: NOTICE: drop cascades to table _timescaledb_internal._hyper_27_38_chunk +psql:include/cagg_ddl_common.sql:891: NOTICE: drop cascades to table _timescaledb_internal._hyper_27_38_chunk -- Case 3: require CASCADE in case of dependent object CREATE MATERIALIZED VIEW conditionsnm_4 WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) @@ -1262,16 +1267,16 @@ AS SELECT time_bucket(7, time_int) as bucket, SUM(value), COUNT(value) FROM conditionsnm GROUP BY bucket WITH DATA; -psql:include/cagg_ddl_common.sql:894: NOTICE: refreshing continuous aggregate "conditionsnm_4" +psql:include/cagg_ddl_common.sql:899: NOTICE: refreshing continuous aggregate "conditionsnm_4" CREATE VIEW see_cagg as select * from conditionsnm_4; \set ON_ERROR_STOP 0 DROP MATERIALIZED VIEW conditionsnm_4; -psql:include/cagg_ddl_common.sql:898: ERROR: cannot drop view conditionsnm_4 because other objects depend on it +psql:include/cagg_ddl_common.sql:903: ERROR: cannot drop view conditionsnm_4 because other objects depend on it \set ON_ERROR_STOP 1 -- Case 4: DROP CASCADE with dependency DROP MATERIALIZED VIEW conditionsnm_4 CASCADE; -psql:include/cagg_ddl_common.sql:902: NOTICE: drop cascades to view see_cagg -psql:include/cagg_ddl_common.sql:902: NOTICE: drop cascades to table _timescaledb_internal._hyper_28_39_chunk +psql:include/cagg_ddl_common.sql:907: NOTICE: drop cascades to view see_cagg +psql:include/cagg_ddl_common.sql:907: NOTICE: drop cascades to table _timescaledb_internal._hyper_28_39_chunk -- Test DROP SCHEMA CASCADE with continuous aggregates -- -- Issue: #2350 @@ -1314,7 +1319,7 @@ WHERE user_view_name = 'telemetry_1s'; \gset DROP SCHEMA test_schema CASCADE; -psql:include/cagg_ddl_common.sql:941: NOTICE: drop cascades to 4 other objects +psql:include/cagg_ddl_common.sql:946: NOTICE: drop cascades to 4 other objects SELECT count(*) FROM pg_class WHERE relname = :'MAT_TABLE_NAME'; count ------- @@ -1398,7 +1403,7 @@ WHERE user_view_name = 'cagg2'; \gset DROP SCHEMA test_schema CASCADE; -psql:include/cagg_ddl_common.sql:998: NOTICE: drop cascades to 7 other objects +psql:include/cagg_ddl_common.sql:1003: NOTICE: drop cascades to 7 other objects SELECT count(*) FROM pg_class WHERE relname = :'MAT_TABLE_NAME1'; count ------- @@ -1568,10 +1573,10 @@ CALL refresh_continuous_aggregate('conditions_daily', NULL, NULL); \set ON_ERROR_STOP 0 -- unique indexes are not supported CREATE UNIQUE INDEX index_unique_error ON conditions_daily ("time", location); -psql:include/cagg_ddl_common.sql:1084: ERROR: continuous aggregates do not support UNIQUE indexes +psql:include/cagg_ddl_common.sql:1089: ERROR: continuous aggregates do not support UNIQUE indexes -- concurrently index creation not supported CREATE INDEX CONCURRENTLY index_concurrently_avg ON conditions_daily (avg); -psql:include/cagg_ddl_common.sql:1086: ERROR: hypertables do not support concurrent index creation +psql:include/cagg_ddl_common.sql:1091: ERROR: hypertables do not support concurrent index creation \set ON_ERROR_STOP 1 CREATE INDEX index_avg ON conditions_daily (avg); CREATE INDEX index_avg_only ON ONLY conditions_daily (avg); @@ -1608,14 +1613,14 @@ CREATE MATERIALIZED VIEW i3696_cagg1 WITH (timescaledb.continuous, timescaledb.m AS SELECT search_query,count(search_query) as count, sum(cnt), time_bucket(INTERVAL '1 minute', time) AS bucket FROM i3696 GROUP BY cnt +cnt2 , bucket, search_query; -psql:include/cagg_ddl_common.sql:1108: NOTICE: continuous aggregate "i3696_cagg1" is already up-to-date +psql:include/cagg_ddl_common.sql:1113: NOTICE: continuous aggregate "i3696_cagg1" is already up-to-date ALTER MATERIALIZED VIEW i3696_cagg1 SET (timescaledb.materialized_only = 'true'); CREATE MATERIALIZED VIEW i3696_cagg2 WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS SELECT search_query,count(search_query) as count, sum(cnt), time_bucket(INTERVAL '1 minute', time) AS bucket FROM i3696 GROUP BY cnt + cnt2, bucket, search_query HAVING cnt + cnt2 + sum(cnt) > 2 or count(cnt2) > 10; -psql:include/cagg_ddl_common.sql:1116: NOTICE: continuous aggregate "i3696_cagg2" is already up-to-date +psql:include/cagg_ddl_common.sql:1121: NOTICE: continuous aggregate "i3696_cagg2" is already up-to-date ALTER MATERIALIZED VIEW i3696_cagg2 SET (timescaledb.materialized_only = 'true'); --TEST test with multiple settings on continuous aggregates -- -- test for materialized_only + compress combinations (real time aggs enabled initially) @@ -1632,7 +1637,7 @@ SELECT create_hypertable('test_setting', 'time'); \endif CREATE MATERIALIZED VIEW test_setting_cagg with (timescaledb.continuous, timescaledb.materialized_only=false) AS SELECT time_bucket('1h',time), avg(val), count(*) FROM test_setting GROUP BY 1; -psql:include/cagg_ddl_common.sql:1130: NOTICE: continuous aggregate "test_setting_cagg" is already up-to-date +psql:include/cagg_ddl_common.sql:1135: NOTICE: continuous aggregate "test_setting_cagg" is already up-to-date INSERT INTO test_setting SELECT generate_series( '2020-01-10 8:00'::timestamp, '2020-01-30 10:00+00'::timestamptz, '1 day'::interval), 10.0; CALL refresh_continuous_aggregate('test_setting_cagg', NULL, '2020-05-30 10:00+00'::timestamptz); @@ -1646,7 +1651,7 @@ SELECT count(*) from test_setting_cagg ORDER BY 1; INSERT INTO test_setting VALUES( '2020-11-01', 20); --try out 2 settings here -- ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='true'); -psql:include/cagg_ddl_common.sql:1141: NOTICE: defaulting compress_orderby to time_bucket +psql:include/cagg_ddl_common.sql:1146: NOTICE: defaulting compress_orderby to time_bucket SELECT view_name, compression_enabled, materialized_only FROM timescaledb_information.continuous_aggregates where view_name = 'test_setting_cagg'; @@ -1664,7 +1669,7 @@ SELECT count(*) from test_setting_cagg ORDER BY 1; --now set it back to false -- ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='true'); -psql:include/cagg_ddl_common.sql:1149: NOTICE: defaulting compress_orderby to time_bucket +psql:include/cagg_ddl_common.sql:1154: NOTICE: defaulting compress_orderby to time_bucket SELECT view_name, compression_enabled, materialized_only FROM timescaledb_information.continuous_aggregates where view_name = 'test_setting_cagg'; @@ -1716,10 +1721,10 @@ DELETE FROM test_setting WHERE val = 20; --TEST test with multiple settings on continuous aggregates with real time aggregates turned off initially -- -- test for materialized_only + compress combinations (real time aggs enabled initially) DROP MATERIALIZED VIEW test_setting_cagg; -psql:include/cagg_ddl_common.sql:1174: NOTICE: drop cascades to table _timescaledb_internal._hyper_40_47_chunk +psql:include/cagg_ddl_common.sql:1179: NOTICE: drop cascades to table _timescaledb_internal._hyper_40_47_chunk CREATE MATERIALIZED VIEW test_setting_cagg with (timescaledb.continuous, timescaledb.materialized_only = true) AS SELECT time_bucket('1h',time), avg(val), count(*) FROM test_setting GROUP BY 1; -psql:include/cagg_ddl_common.sql:1177: NOTICE: refreshing continuous aggregate "test_setting_cagg" +psql:include/cagg_ddl_common.sql:1182: NOTICE: refreshing continuous aggregate "test_setting_cagg" CALL refresh_continuous_aggregate('test_setting_cagg', NULL, '2020-05-30 10:00+00'::timestamptz); SELECT count(*) from test_setting_cagg ORDER BY 1; count @@ -1731,7 +1736,7 @@ SELECT count(*) from test_setting_cagg ORDER BY 1; INSERT INTO test_setting VALUES( '2020-11-01', 20); --try out 2 settings here -- ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='true'); -psql:include/cagg_ddl_common.sql:1185: NOTICE: defaulting compress_orderby to time_bucket +psql:include/cagg_ddl_common.sql:1190: NOTICE: defaulting compress_orderby to time_bucket SELECT view_name, compression_enabled, materialized_only FROM timescaledb_information.continuous_aggregates where view_name = 'test_setting_cagg'; @@ -1749,7 +1754,7 @@ SELECT count(*) from test_setting_cagg ORDER BY 1; --now set it back to false -- ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='true'); -psql:include/cagg_ddl_common.sql:1193: NOTICE: defaulting compress_orderby to time_bucket +psql:include/cagg_ddl_common.sql:1198: NOTICE: defaulting compress_orderby to time_bucket SELECT view_name, compression_enabled, materialized_only FROM timescaledb_information.continuous_aggregates where view_name = 'test_setting_cagg'; @@ -1849,7 +1854,7 @@ SELECT time_bucket ('1 day', time) AS bucket, amount + sum(fiat_value) FROM transactions GROUP BY bucket, amount; -psql:include/cagg_ddl_common.sql:1267: NOTICE: refreshing continuous aggregate "cashflows" +psql:include/cagg_ddl_common.sql:1272: NOTICE: refreshing continuous aggregate "cashflows" SELECT h.table_name AS "MAT_TABLE_NAME", partial_view_name AS "PART_VIEW_NAME", direct_view_name AS "DIRECT_VIEW_NAME" @@ -2041,8 +2046,8 @@ WHERE d.hypertable_id = ca.mat_hypertable_id; -- Since #6077 CAggs are materialized only by default DROP TABLE conditions CASCADE; -psql:include/cagg_ddl_common.sql:1365: NOTICE: drop cascades to 3 other objects -psql:include/cagg_ddl_common.sql:1365: NOTICE: drop cascades to 2 other objects +psql:include/cagg_ddl_common.sql:1370: NOTICE: drop cascades to 3 other objects +psql:include/cagg_ddl_common.sql:1370: NOTICE: drop cascades to 2 other objects CREATE TABLE conditions ( time TIMESTAMPTZ NOT NULL, location TEXT NOT NULL, diff --git a/tsl/test/expected/cagg_ddl-14.out b/tsl/test/expected/cagg_ddl-14.out new file mode 100644 index 00000000000..b9514c3eb29 --- /dev/null +++ b/tsl/test/expected/cagg_ddl-14.out @@ -0,0 +1,2164 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\set IS_DISTRIBUTED FALSE +\ir include/cagg_ddl_common.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- Set this variable to avoid using a hard-coded path each time query +-- results are compared +\set QUERY_RESULT_TEST_EQUAL_RELPATH '../../../../test/sql/include/query_result_test_equal.sql' +\if :IS_DISTRIBUTED +\echo 'Running distributed hypertable tests' +\else +\echo 'Running local hypertable tests' +Running local hypertable tests +\endif +SET ROLE :ROLE_DEFAULT_PERM_USER; +--DDL commands on continuous aggregates +CREATE TABLE conditions ( + timec TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + temperature integer NULL, + humidity DOUBLE PRECISION NULL, + timemeasure TIMESTAMPTZ, + timeinterval INTERVAL +); +\if :IS_DISTRIBUTED +SELECT table_name FROM create_distributed_hypertable('conditions', 'timec', replication_factor => 2); +\else +SELECT table_name FROM create_hypertable('conditions', 'timec'); + table_name +------------ + conditions +(1 row) + +\endif +-- schema tests +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER +-- drop if the tablespace1 and/or tablespace2 exists +SET client_min_messages TO error; +DROP TABLESPACE IF EXISTS tablespace1; +DROP TABLESPACE IF EXISTS tablespace2; +RESET client_min_messages; +CREATE TABLESPACE tablespace1 OWNER :ROLE_DEFAULT_PERM_USER LOCATION :TEST_TABLESPACE1_PATH; +CREATE TABLESPACE tablespace2 OWNER :ROLE_DEFAULT_PERM_USER LOCATION :TEST_TABLESPACE2_PATH; +CREATE SCHEMA rename_schema; +GRANT ALL ON SCHEMA rename_schema TO :ROLE_DEFAULT_PERM_USER; +SET ROLE :ROLE_DEFAULT_PERM_USER; +CREATE TABLE foo(time TIMESTAMPTZ NOT NULL, data INTEGER); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('foo', 'time', replication_factor => 2); +\else +SELECT create_hypertable('foo', 'time'); + create_hypertable +------------------- + (2,public,foo,t) +(1 row) + +\endif +CREATE MATERIALIZED VIEW rename_test + WITH ( timescaledb.continuous, timescaledb.materialized_only=true) +AS SELECT time_bucket('1week', time), COUNT(data) + FROM foo + GROUP BY 1 WITH NO DATA; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name +------------------+----------------+-----------------------+------------------- + public | rename_test | _timescaledb_internal | _partial_view_3 +(1 row) + +ALTER MATERIALIZED VIEW rename_test SET SCHEMA rename_schema; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name +------------------+----------------+-----------------------+------------------- + rename_schema | rename_test | _timescaledb_internal | _partial_view_3 +(1 row) + +SELECT ca.raw_hypertable_id as "RAW_HYPERTABLE_ID", + h.schema_name AS "MAT_SCHEMA_NAME", + h.table_name AS "MAT_TABLE_NAME", + partial_view_name as "PART_VIEW_NAME", + partial_view_schema as "PART_VIEW_SCHEMA", + direct_view_name as "DIR_VIEW_NAME", + direct_view_schema as "DIR_VIEW_SCHEMA" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'rename_test' +\gset +RESET ROLE; +SELECT current_user; + current_user +-------------------- + cluster_super_user +(1 row) + +ALTER VIEW :"PART_VIEW_SCHEMA".:"PART_VIEW_NAME" SET SCHEMA public; +SET ROLE :ROLE_DEFAULT_PERM_USER; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name +------------------+----------------+---------------------+------------------- + rename_schema | rename_test | public | _partial_view_3 +(1 row) + +--alter direct view schema +SELECT user_view_schema, user_view_name, direct_view_schema, direct_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | direct_view_schema | direct_view_name +------------------+----------------+-----------------------+------------------ + rename_schema | rename_test | _timescaledb_internal | _direct_view_3 +(1 row) + +RESET ROLE; +SELECT current_user; + current_user +-------------------- + cluster_super_user +(1 row) + +ALTER VIEW :"DIR_VIEW_SCHEMA".:"DIR_VIEW_NAME" SET SCHEMA public; +SET ROLE :ROLE_DEFAULT_PERM_USER; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name, + direct_view_schema, direct_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name | direct_view_schema | direct_view_name +------------------+----------------+---------------------+-------------------+--------------------+------------------ + rename_schema | rename_test | public | _partial_view_3 | public | _direct_view_3 +(1 row) + +RESET ROLE; +SELECT current_user; + current_user +-------------------- + cluster_super_user +(1 row) + +ALTER SCHEMA rename_schema RENAME TO new_name_schema; +SET ROLE :ROLE_DEFAULT_PERM_USER; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name, + direct_view_schema, direct_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name | direct_view_schema | direct_view_name +------------------+----------------+---------------------+-------------------+--------------------+------------------ + new_name_schema | rename_test | public | _partial_view_3 | public | _direct_view_3 +(1 row) + +ALTER VIEW :"PART_VIEW_NAME" SET SCHEMA new_name_schema; +ALTER VIEW :"DIR_VIEW_NAME" SET SCHEMA new_name_schema; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name, + direct_view_schema, direct_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name | direct_view_schema | direct_view_name +------------------+----------------+---------------------+-------------------+--------------------+------------------ + new_name_schema | rename_test | new_name_schema | _partial_view_3 | new_name_schema | _direct_view_3 +(1 row) + +RESET ROLE; +SELECT current_user; + current_user +-------------------- + cluster_super_user +(1 row) + +ALTER SCHEMA new_name_schema RENAME TO foo_name_schema; +SET ROLE :ROLE_DEFAULT_PERM_USER; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name +------------------+----------------+---------------------+------------------- + foo_name_schema | rename_test | foo_name_schema | _partial_view_3 +(1 row) + +ALTER MATERIALIZED VIEW foo_name_schema.rename_test SET SCHEMA public; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name +------------------+----------------+---------------------+------------------- + public | rename_test | foo_name_schema | _partial_view_3 +(1 row) + +RESET ROLE; +SELECT current_user; + current_user +-------------------- + cluster_super_user +(1 row) + +ALTER SCHEMA foo_name_schema RENAME TO rename_schema; +SET ROLE :ROLE_DEFAULT_PERM_USER; +SET client_min_messages TO NOTICE; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name +------------------+----------------+---------------------+------------------- + public | rename_test | rename_schema | _partial_view_3 +(1 row) + +ALTER MATERIALIZED VIEW rename_test RENAME TO rename_c_aggregate; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name +------------------+--------------------+---------------------+------------------- + public | rename_c_aggregate | rename_schema | _partial_view_3 +(1 row) + +SELECT * FROM rename_c_aggregate; + time_bucket | count +-------------+------- +(0 rows) + +ALTER VIEW rename_schema.:"PART_VIEW_NAME" RENAME TO partial_view; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name, + direct_view_schema, direct_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name | direct_view_schema | direct_view_name +------------------+--------------------+---------------------+-------------------+--------------------+------------------ + public | rename_c_aggregate | rename_schema | partial_view | rename_schema | _direct_view_3 +(1 row) + +--rename direct view +ALTER VIEW rename_schema.:"DIR_VIEW_NAME" RENAME TO direct_view; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name, + direct_view_schema, direct_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name | direct_view_schema | direct_view_name +------------------+--------------------+---------------------+-------------------+--------------------+------------------ + public | rename_c_aggregate | rename_schema | partial_view | rename_schema | direct_view +(1 row) + +-- drop_chunks tests +DROP TABLE conditions CASCADE; +DROP TABLE foo CASCADE; +psql:include/cagg_ddl_common.sql:166: NOTICE: drop cascades to 2 other objects +CREATE TABLE drop_chunks_table(time BIGINT NOT NULL, data INTEGER); +\if :IS_DISTRIBUTED +SELECT hypertable_id AS drop_chunks_table_id + FROM create_distributed_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10, replication_factor => 2) \gset +\else +SELECT hypertable_id AS drop_chunks_table_id + FROM create_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10) \gset +\endif +CREATE OR REPLACE FUNCTION integer_now_test() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table $$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ +CREATE OR REPLACE FUNCTION integer_now_test() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table $$; +$DIST$); +\endif +SELECT set_integer_now_func('drop_chunks_table', 'integer_now_test'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE MATERIALIZED VIEW drop_chunks_view + WITH ( + timescaledb.continuous, + timescaledb.materialized_only=true + ) +AS SELECT time_bucket('5', time), COUNT(data) + FROM drop_chunks_table + GROUP BY 1 WITH NO DATA; +SELECT format('%I.%I', schema_name, table_name) AS drop_chunks_mat_table, + schema_name AS drop_chunks_mat_schema, + table_name AS drop_chunks_mat_table_name + FROM _timescaledb_catalog.hypertable, _timescaledb_catalog.continuous_agg + WHERE _timescaledb_catalog.continuous_agg.raw_hypertable_id = :drop_chunks_table_id + AND _timescaledb_catalog.hypertable.id = _timescaledb_catalog.continuous_agg.mat_hypertable_id \gset +-- create 3 chunks, with 3 time bucket +INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(0, 29) AS i; +-- Only refresh up to bucket 15 initially. Matches the old refresh +-- behavior that didn't materialize everything +CALL refresh_continuous_aggregate('drop_chunks_view', 0, 15); +SELECT count(c) FROM show_chunks('drop_chunks_table') AS c; + count +------- + 3 +(1 row) + +SELECT count(c) FROM show_chunks('drop_chunks_view') AS c; + count +------- + 1 +(1 row) + +SELECT * FROM drop_chunks_view ORDER BY 1; + time_bucket | count +-------------+------- + 0 | 5 + 5 | 5 + 10 | 5 +(3 rows) + +-- cannot drop directly from the materialization table without specifying +-- cont. aggregate view name explicitly +\set ON_ERROR_STOP 0 +SELECT drop_chunks(:'drop_chunks_mat_table', + newer_than => -20, + verbose => true); +psql:include/cagg_ddl_common.sql:218: ERROR: operation not supported on materialized hypertable +\set ON_ERROR_STOP 1 +SELECT count(c) FROM show_chunks('drop_chunks_table') AS c; + count +------- + 3 +(1 row) + +SELECT count(c) FROM show_chunks('drop_chunks_view') AS c; + count +------- + 1 +(1 row) + +SELECT * FROM drop_chunks_view ORDER BY 1; + time_bucket | count +-------------+------- + 0 | 5 + 5 | 5 + 10 | 5 +(3 rows) + +-- drop chunks when the chunksize and time_bucket aren't aligned +DROP TABLE drop_chunks_table CASCADE; +psql:include/cagg_ddl_common.sql:227: NOTICE: drop cascades to 2 other objects +psql:include/cagg_ddl_common.sql:227: NOTICE: drop cascades to table _timescaledb_internal._hyper_5_4_chunk +CREATE TABLE drop_chunks_table_u(time BIGINT NOT NULL, data INTEGER); +\if :IS_DISTRIBUTED +SELECT hypertable_id AS drop_chunks_table_u_id + FROM create_distributed_hypertable('drop_chunks_table_u', 'time', chunk_time_interval => 7, replication_factor => 2) \gset +\else +SELECT hypertable_id AS drop_chunks_table_u_id + FROM create_hypertable('drop_chunks_table_u', 'time', chunk_time_interval => 7) \gset +\endif +CREATE OR REPLACE FUNCTION integer_now_test1() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table_u $$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ +CREATE OR REPLACE FUNCTION integer_now_test1() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table_u $$; +$DIST$); +\endif +SELECT set_integer_now_func('drop_chunks_table_u', 'integer_now_test1'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE MATERIALIZED VIEW drop_chunks_view + WITH ( + timescaledb.continuous, + timescaledb.materialized_only=true + ) +AS SELECT time_bucket('3', time), COUNT(data) + FROM drop_chunks_table_u + GROUP BY 1 WITH NO DATA; +SELECT format('%I.%I', schema_name, table_name) AS drop_chunks_mat_table_u, + schema_name AS drop_chunks_mat_schema, + table_name AS drop_chunks_mat_table_u_name + FROM _timescaledb_catalog.hypertable, _timescaledb_catalog.continuous_agg + WHERE _timescaledb_catalog.continuous_agg.raw_hypertable_id = :drop_chunks_table_u_id + AND _timescaledb_catalog.hypertable.id = _timescaledb_catalog.continuous_agg.mat_hypertable_id \gset +-- create 3 chunks, with 3 time bucket +INSERT INTO drop_chunks_table_u SELECT i, i FROM generate_series(0, 21) AS i; +-- Refresh up to bucket 15 to match old materializer behavior +CALL refresh_continuous_aggregate('drop_chunks_view', 0, 15); +SELECT count(c) FROM show_chunks('drop_chunks_table_u') AS c; + count +------- + 4 +(1 row) + +SELECT count(c) FROM show_chunks('drop_chunks_view') AS c; + count +------- + 1 +(1 row) + +SELECT * FROM drop_chunks_view ORDER BY 1; + time_bucket | count +-------------+------- + 0 | 3 + 3 | 3 + 6 | 3 + 9 | 3 + 12 | 3 +(5 rows) + +-- TRUNCATE test +-- Can truncate regular hypertables that have caggs +TRUNCATE drop_chunks_table_u; +\set ON_ERROR_STOP 0 +-- Can't truncate materialized hypertables directly +TRUNCATE :drop_chunks_mat_table_u; +psql:include/cagg_ddl_common.sql:276: ERROR: cannot TRUNCATE a hypertable underlying a continuous aggregate +\set ON_ERROR_STOP 1 +-- Check that we don't interfere with TRUNCATE of normal table and +-- partitioned table +CREATE TABLE truncate (value int); +INSERT INTO truncate VALUES (1), (2); +TRUNCATE truncate; +SELECT * FROM truncate; + value +------- +(0 rows) + +CREATE TABLE truncate_partitioned (value int) + PARTITION BY RANGE(value); +CREATE TABLE truncate_p1 PARTITION OF truncate_partitioned + FOR VALUES FROM (1) TO (3); +INSERT INTO truncate_partitioned VALUES (1), (2); +TRUNCATE truncate_partitioned; +SELECT * FROM truncate_partitioned; + value +------- +(0 rows) + +-- ALTER TABLE tests +\set ON_ERROR_STOP 0 +-- test a variety of ALTER TABLE statements +ALTER TABLE :drop_chunks_mat_table_u RENAME time_bucket TO bad_name; +psql:include/cagg_ddl_common.sql:296: ERROR: renaming columns on materialization tables is not supported +ALTER TABLE :drop_chunks_mat_table_u ADD UNIQUE(time_bucket); +psql:include/cagg_ddl_common.sql:297: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u SET UNLOGGED; +psql:include/cagg_ddl_common.sql:298: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u ENABLE ROW LEVEL SECURITY; +psql:include/cagg_ddl_common.sql:299: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u ADD COLUMN fizzle INTEGER; +psql:include/cagg_ddl_common.sql:300: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u DROP COLUMN time_bucket; +psql:include/cagg_ddl_common.sql:301: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u ALTER COLUMN time_bucket DROP NOT NULL; +psql:include/cagg_ddl_common.sql:302: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u ALTER COLUMN time_bucket SET DEFAULT 1; +psql:include/cagg_ddl_common.sql:303: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u ALTER COLUMN time_bucket SET STORAGE EXTERNAL; +psql:include/cagg_ddl_common.sql:304: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u DISABLE TRIGGER ALL; +psql:include/cagg_ddl_common.sql:305: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u SET TABLESPACE foo; +psql:include/cagg_ddl_common.sql:306: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u NOT OF; +psql:include/cagg_ddl_common.sql:307: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u OWNER TO CURRENT_USER; +psql:include/cagg_ddl_common.sql:308: ERROR: operation not supported on materialization tables +\set ON_ERROR_STOP 1 +ALTER TABLE :drop_chunks_mat_table_u SET SCHEMA public; +ALTER TABLE :drop_chunks_mat_table_u_name RENAME TO new_name; +SET ROLE :ROLE_DEFAULT_PERM_USER; +SET client_min_messages TO NOTICE; +SELECT * FROM new_name; + time_bucket | count +-------------+------- + 0 | 3 + 3 | 3 + 6 | 3 + 9 | 3 + 12 | 3 +(5 rows) + +SELECT * FROM drop_chunks_view ORDER BY 1; + time_bucket | count +-------------+------- + 0 | 3 + 3 | 3 + 6 | 3 + 9 | 3 + 12 | 3 +(5 rows) + +\set ON_ERROR_STOP 0 +-- no continuous aggregates on a continuous aggregate materialization table +CREATE MATERIALIZED VIEW new_name_view + WITH ( + timescaledb.continuous, + timescaledb.materialized_only=true + ) +AS SELECT time_bucket('6', time_bucket), COUNT("count") + FROM new_name + GROUP BY 1 WITH NO DATA; +psql:include/cagg_ddl_common.sql:331: ERROR: hypertable is a continuous aggregate materialization table +\set ON_ERROR_STOP 1 +CREATE TABLE metrics(time timestamptz NOT NULL, device_id int, v1 float, v2 float); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('metrics', 'time', replication_factor => 2); +\else +SELECT create_hypertable('metrics','time'); + create_hypertable +---------------------- + (8,public,metrics,t) +(1 row) + +\endif +INSERT INTO metrics SELECT generate_series('2000-01-01'::timestamptz,'2000-01-10','1m'),1,0.25,0.75; +-- check expressions in view definition +CREATE MATERIALIZED VIEW cagg_expr + WITH (timescaledb.continuous, timescaledb.materialized_only=true) +AS +SELECT + time_bucket('1d', time) AS time, + 'Const'::text AS Const, + 4.3::numeric AS "numeric", + first(metrics,time), + CASE WHEN true THEN 'foo' ELSE 'bar' END, + COALESCE(NULL,'coalesce'), + avg(v1) + avg(v2) AS avg1, + avg(v1+v2) AS avg2 +FROM metrics +GROUP BY 1 WITH NO DATA; +CALL refresh_continuous_aggregate('cagg_expr', NULL, NULL); +SELECT * FROM cagg_expr ORDER BY time LIMIT 5; + time | const | numeric | first | case | coalesce | avg1 | avg2 +------------------------------+-------+---------+----------------------------------------------+------+----------+------+------ + Fri Dec 31 16:00:00 1999 PST | Const | 4.3 | ("Sat Jan 01 00:00:00 2000 PST",1,0.25,0.75) | foo | coalesce | 1 | 1 + Sat Jan 01 16:00:00 2000 PST | Const | 4.3 | ("Sat Jan 01 16:00:00 2000 PST",1,0.25,0.75) | foo | coalesce | 1 | 1 + Sun Jan 02 16:00:00 2000 PST | Const | 4.3 | ("Sun Jan 02 16:00:00 2000 PST",1,0.25,0.75) | foo | coalesce | 1 | 1 + Mon Jan 03 16:00:00 2000 PST | Const | 4.3 | ("Mon Jan 03 16:00:00 2000 PST",1,0.25,0.75) | foo | coalesce | 1 | 1 + Tue Jan 04 16:00:00 2000 PST | Const | 4.3 | ("Tue Jan 04 16:00:00 2000 PST",1,0.25,0.75) | foo | coalesce | 1 | 1 +(5 rows) + +--test materialization of invalidation before drop +DROP TABLE IF EXISTS drop_chunks_table CASCADE; +psql:include/cagg_ddl_common.sql:363: NOTICE: table "drop_chunks_table" does not exist, skipping +DROP TABLE IF EXISTS drop_chunks_table_u CASCADE; +psql:include/cagg_ddl_common.sql:364: NOTICE: drop cascades to 2 other objects +psql:include/cagg_ddl_common.sql:364: NOTICE: drop cascades to table _timescaledb_internal._hyper_7_9_chunk +CREATE TABLE drop_chunks_table(time BIGINT NOT NULL, data INTEGER); +\if :IS_DISTRIBUTED +SELECT hypertable_id AS drop_chunks_table_nid + FROM create_distributed_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10, replication_factor => 2) \gset +\else +SELECT hypertable_id AS drop_chunks_table_nid + FROM create_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10) \gset +\endif +CREATE OR REPLACE FUNCTION integer_now_test2() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table $$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ +CREATE OR REPLACE FUNCTION integer_now_test2() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table $$; +$DIST$); +\endif +SELECT set_integer_now_func('drop_chunks_table', 'integer_now_test2'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE MATERIALIZED VIEW drop_chunks_view + WITH ( + timescaledb.continuous, + timescaledb.materialized_only=true + ) +AS SELECT time_bucket('5', time), max(data) + FROM drop_chunks_table + GROUP BY 1 WITH NO DATA; +INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(0, 20) AS i; +--dropping chunks will process the invalidations +SELECT drop_chunks('drop_chunks_table', older_than => (integer_now_test2()-9)); + drop_chunks +------------------------------------------ + _timescaledb_internal._hyper_10_13_chunk +(1 row) + +SELECT * FROM drop_chunks_table ORDER BY time ASC limit 1; + time | data +------+------ + 10 | 10 +(1 row) + +INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(20, 35) AS i; +CALL refresh_continuous_aggregate('drop_chunks_view', 10, 40); +--this will be seen after the drop its within the invalidation window and will be dropped +INSERT INTO drop_chunks_table VALUES (26, 100); +--this will not be processed by the drop since chunk 30-39 is not dropped but will be seen after refresh +--shows that the drop doesn't do more work than necessary +INSERT INTO drop_chunks_table VALUES (31, 200); +--move the time up to 39 +INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(35, 39) AS i; +--the chunks and ranges we have thus far +SELECT chunk_name, range_start_integer, range_end_integer +FROM timescaledb_information.chunks +WHERE hypertable_name = 'drop_chunks_table'; + chunk_name | range_start_integer | range_end_integer +--------------------+---------------------+------------------- + _hyper_10_14_chunk | 10 | 20 + _hyper_10_15_chunk | 20 | 30 + _hyper_10_16_chunk | 30 | 40 +(3 rows) + +--the invalidation on 25 not yet seen +SELECT * FROM drop_chunks_view ORDER BY time_bucket DESC; + time_bucket | max +-------------+----- + 35 | 35 + 30 | 34 + 25 | 29 + 20 | 24 + 15 | 19 + 10 | 14 +(6 rows) + +--refresh to process the invalidations and then drop +CALL refresh_continuous_aggregate('drop_chunks_view', NULL, (integer_now_test2()-9)); +SELECT drop_chunks('drop_chunks_table', older_than => (integer_now_test2()-9)); + drop_chunks +------------------------------------------ + _timescaledb_internal._hyper_10_14_chunk + _timescaledb_internal._hyper_10_15_chunk +(2 rows) + +--new values on 25 now seen in view +SELECT * FROM drop_chunks_view ORDER BY time_bucket DESC; + time_bucket | max +-------------+----- + 35 | 35 + 30 | 34 + 25 | 100 + 20 | 24 + 15 | 19 + 10 | 14 +(6 rows) + +--earliest datapoint now in table +SELECT * FROM drop_chunks_table ORDER BY time ASC limit 1; + time | data +------+------ + 30 | 30 +(1 row) + +--we see the chunks row with the dropped flags set; +SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk where dropped; + id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk +----+---------------+-----------------------+--------------------+---------------------+---------+--------+----------- + 13 | 10 | _timescaledb_internal | _hyper_10_13_chunk | | t | 0 | f + 14 | 10 | _timescaledb_internal | _hyper_10_14_chunk | | t | 0 | f + 15 | 10 | _timescaledb_internal | _hyper_10_15_chunk | | t | 0 | f +(3 rows) + +--still see data in the view +SELECT * FROM drop_chunks_view WHERE time_bucket < (integer_now_test2()-9) ORDER BY time_bucket DESC; + time_bucket | max +-------------+----- + 25 | 100 + 20 | 24 + 15 | 19 + 10 | 14 +(4 rows) + +--no data but covers dropped chunks +SELECT * FROM drop_chunks_table WHERE time < (integer_now_test2()-9) ORDER BY time DESC; + time | data +------+------ +(0 rows) + +--recreate the dropped chunk +INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(0, 20) AS i; +--see data from recreated region +SELECT * FROM drop_chunks_table WHERE time < (integer_now_test2()-9) ORDER BY time DESC; + time | data +------+------ + 20 | 20 + 19 | 19 + 18 | 18 + 17 | 17 + 16 | 16 + 15 | 15 + 14 | 14 + 13 | 13 + 12 | 12 + 11 | 11 + 10 | 10 + 9 | 9 + 8 | 8 + 7 | 7 + 6 | 6 + 5 | 5 + 4 | 4 + 3 | 3 + 2 | 2 + 1 | 1 + 0 | 0 +(21 rows) + +--should show chunk with old name and old ranges +SELECT chunk_name, range_start_integer, range_end_integer +FROM timescaledb_information.chunks +WHERE hypertable_name = 'drop_chunks_table' +ORDER BY range_start_integer; + chunk_name | range_start_integer | range_end_integer +--------------------+---------------------+------------------- + _hyper_10_13_chunk | 0 | 10 + _hyper_10_14_chunk | 10 | 20 + _hyper_10_15_chunk | 20 | 30 + _hyper_10_16_chunk | 30 | 40 +(4 rows) + +--We dropped everything up to the bucket starting at 30 and then +--inserted new data up to and including time 20. Therefore, the +--dropped data should stay the same as long as we only refresh +--buckets that have non-dropped data. +CALL refresh_continuous_aggregate('drop_chunks_view', 30, 40); +SELECT * FROM drop_chunks_view ORDER BY time_bucket DESC; + time_bucket | max +-------------+----- + 35 | 39 + 30 | 200 + 25 | 100 + 20 | 24 + 15 | 19 + 10 | 14 +(6 rows) + +SELECT format('%I.%I', schema_name, table_name) AS drop_chunks_mat_tablen, + schema_name AS drop_chunks_mat_schema, + table_name AS drop_chunks_mat_table_name + FROM _timescaledb_catalog.hypertable, _timescaledb_catalog.continuous_agg + WHERE _timescaledb_catalog.continuous_agg.raw_hypertable_id = :drop_chunks_table_nid + AND _timescaledb_catalog.hypertable.id = _timescaledb_catalog.continuous_agg.mat_hypertable_id \gset +-- TEST drop chunks from continuous aggregates by specifying view name +SELECT drop_chunks('drop_chunks_view', + newer_than => -20, + verbose => true); +psql:include/cagg_ddl_common.sql:459: INFO: dropping chunk _timescaledb_internal._hyper_11_17_chunk + drop_chunks +------------------------------------------ + _timescaledb_internal._hyper_11_17_chunk +(1 row) + +-- Test that we cannot drop chunks when specifying materialized +-- hypertable +INSERT INTO drop_chunks_table SELECT generate_series(45, 55), 500; +CALL refresh_continuous_aggregate('drop_chunks_view', 45, 55); +SELECT chunk_name, range_start_integer, range_end_integer +FROM timescaledb_information.chunks +WHERE hypertable_name = :'drop_chunks_mat_table_name' ORDER BY range_start_integer; + chunk_name | range_start_integer | range_end_integer +--------------------+---------------------+------------------- + _hyper_11_20_chunk | 0 | 100 +(1 row) + +\set ON_ERROR_STOP 0 +\set VERBOSITY default +SELECT drop_chunks(:'drop_chunks_mat_tablen', older_than => 60); +psql:include/cagg_ddl_common.sql:471: ERROR: operation not supported on materialized hypertable +DETAIL: Hypertable "_materialized_hypertable_11" is a materialized hypertable. +HINT: Try the operation on the continuous aggregate instead. +\set VERBOSITY terse +\set ON_ERROR_STOP 1 +----------------------------------------------------------------- +-- Test that refresh_continuous_aggregate on chunk will refresh, +-- but only in the regions covered by the show chunks. +----------------------------------------------------------------- +SELECT chunk_name, range_start_integer, range_end_integer +FROM timescaledb_information.chunks +WHERE hypertable_name = 'drop_chunks_table' +ORDER BY 2,3; + chunk_name | range_start_integer | range_end_integer +--------------------+---------------------+------------------- + _hyper_10_13_chunk | 0 | 10 + _hyper_10_14_chunk | 10 | 20 + _hyper_10_15_chunk | 20 | 30 + _hyper_10_16_chunk | 30 | 40 + _hyper_10_18_chunk | 40 | 50 + _hyper_10_19_chunk | 50 | 60 +(6 rows) + +-- Pick the second chunk as the one to drop +WITH numbered_chunks AS ( + SELECT row_number() OVER (ORDER BY range_start_integer), chunk_schema, chunk_name, range_start_integer, range_end_integer + FROM timescaledb_information.chunks + WHERE hypertable_name = 'drop_chunks_table' + ORDER BY 1 +) +SELECT format('%I.%I', chunk_schema, chunk_name) AS chunk_to_drop, range_start_integer, range_end_integer +FROM numbered_chunks +WHERE row_number = 2 \gset +-- There's data in the table for the chunk/range we will drop +SELECT * FROM drop_chunks_table +WHERE time >= :range_start_integer +AND time < :range_end_integer +ORDER BY 1; + time | data +------+------ + 10 | 10 + 11 | 11 + 12 | 12 + 13 | 13 + 14 | 14 + 15 | 15 + 16 | 16 + 17 | 17 + 18 | 18 + 19 | 19 +(10 rows) + +-- Make sure there is also data in the continuous aggregate +-- CARE: +-- Note that this behaviour of dropping the materialization table chunks and expecting a refresh +-- that overlaps that time range to NOT update those chunks is undefined. Since CAGGs over +-- distributed hypertables merge the invalidations the refresh region is updated in the distributed +-- case, which may be different than what happens in the normal hypertable case. The command was: +-- SELECT drop_chunks('drop_chunks_view', newer_than => -20, verbose => true); +CALL refresh_continuous_aggregate('drop_chunks_view', 0, 50); +SELECT * FROM drop_chunks_view +ORDER BY 1; + time_bucket | max +-------------+----- + 0 | 4 + 5 | 9 + 10 | 14 + 15 | 19 + 20 | 20 + 45 | 500 + 50 | 500 +(7 rows) + +-- Drop the second chunk, to leave a gap in the data +\if :IS_DISTRIBUTED +CALL distributed_exec(format('DROP TABLE IF EXISTS %s', :'chunk_to_drop')); +DROP FOREIGN TABLE :chunk_to_drop; +\else +DROP TABLE :chunk_to_drop; +\endif +-- Verify that the second chunk is dropped +SELECT chunk_name, range_start_integer, range_end_integer +FROM timescaledb_information.chunks +WHERE hypertable_name = 'drop_chunks_table' +ORDER BY 2,3; + chunk_name | range_start_integer | range_end_integer +--------------------+---------------------+------------------- + _hyper_10_13_chunk | 0 | 10 + _hyper_10_15_chunk | 20 | 30 + _hyper_10_16_chunk | 30 | 40 + _hyper_10_18_chunk | 40 | 50 + _hyper_10_19_chunk | 50 | 60 +(5 rows) + +-- Data is no longer in the table but still in the view +SELECT * FROM drop_chunks_table +WHERE time >= :range_start_integer +AND time < :range_end_integer +ORDER BY 1; + time | data +------+------ +(0 rows) + +SELECT * FROM drop_chunks_view +WHERE time_bucket >= :range_start_integer +AND time_bucket < :range_end_integer +ORDER BY 1; + time_bucket | max +-------------+----- + 10 | 14 + 15 | 19 +(2 rows) + +-- Insert a large value in one of the chunks that will be dropped +INSERT INTO drop_chunks_table VALUES (:range_start_integer-1, 100); +-- Now refresh and drop the two adjecent chunks +CALL refresh_continuous_aggregate('drop_chunks_view', NULL, 30); +SELECT drop_chunks('drop_chunks_table', older_than=>30); + drop_chunks +------------------------------------------ + _timescaledb_internal._hyper_10_13_chunk + _timescaledb_internal._hyper_10_15_chunk +(2 rows) + +-- Verify that the chunks are dropped +SELECT chunk_name, range_start_integer, range_end_integer +FROM timescaledb_information.chunks +WHERE hypertable_name = 'drop_chunks_table' +ORDER BY 2,3; + chunk_name | range_start_integer | range_end_integer +--------------------+---------------------+------------------- + _hyper_10_16_chunk | 30 | 40 + _hyper_10_18_chunk | 40 | 50 + _hyper_10_19_chunk | 50 | 60 +(3 rows) + +-- The continuous aggregate should be refreshed in the regions covered +-- by the dropped chunks, but not in the "gap" region, i.e., the +-- region of the chunk that was dropped via DROP TABLE. +SELECT * FROM drop_chunks_view +ORDER BY 1; + time_bucket | max +-------------+----- + 0 | 4 + 5 | 100 + 20 | 20 + 45 | 500 + 50 | 500 +(5 rows) + +-- Now refresh in the region of the first two dropped chunks +CALL refresh_continuous_aggregate('drop_chunks_view', 0, :range_end_integer); +-- Aggregate data in the refreshed range should no longer exist since +-- the underlying data was dropped. +SELECT * FROM drop_chunks_view +ORDER BY 1; + time_bucket | max +-------------+----- + 20 | 20 + 45 | 500 + 50 | 500 +(3 rows) + +-------------------------------------------------------------------- +-- Check that we can create a materialized table in a tablespace. We +-- create one with tablespace and one without and compare them. +CREATE VIEW cagg_info AS +WITH + caggs AS ( + SELECT format('%I.%I', user_view_schema, user_view_name)::regclass AS user_view, + format('%I.%I', direct_view_schema, direct_view_name)::regclass AS direct_view, + format('%I.%I', partial_view_schema, partial_view_name)::regclass AS partial_view, + format('%I.%I', ht.schema_name, ht.table_name)::regclass AS mat_relid + FROM _timescaledb_catalog.hypertable ht, + _timescaledb_catalog.continuous_agg cagg + WHERE ht.id = cagg.mat_hypertable_id + ) +SELECT user_view, + pg_get_userbyid(relowner) AS user_view_owner, + relname AS mat_table, + (SELECT pg_get_userbyid(relowner) FROM pg_class WHERE oid = mat_relid) AS mat_table_owner, + direct_view, + (SELECT pg_get_userbyid(relowner) FROM pg_class WHERE oid = direct_view) AS direct_view_owner, + partial_view, + (SELECT pg_get_userbyid(relowner) FROM pg_class WHERE oid = partial_view) AS partial_view_owner, + (SELECT spcname FROM pg_tablespace WHERE oid = reltablespace) AS tablespace + FROM pg_class JOIN caggs ON pg_class.oid = caggs.mat_relid; +GRANT SELECT ON cagg_info TO PUBLIC; +CREATE VIEW chunk_info AS +SELECT ht.schema_name, ht.table_name, relname AS chunk_name, + (SELECT spcname FROM pg_tablespace WHERE oid = reltablespace) AS tablespace + FROM pg_class c, + _timescaledb_catalog.hypertable ht, + _timescaledb_catalog.chunk ch + WHERE ch.table_name = c.relname AND ht.id = ch.hypertable_id; +CREATE TABLE whatever(time BIGINT NOT NULL, data INTEGER); +\if :IS_DISTRIBUTED +SELECT hypertable_id AS whatever_nid + FROM create_distributed_hypertable('whatever', 'time', chunk_time_interval => 10, replication_factor => 2) +\gset +\else +SELECT hypertable_id AS whatever_nid + FROM create_hypertable('whatever', 'time', chunk_time_interval => 10) +\gset +\endif +SELECT set_integer_now_func('whatever', 'integer_now_test'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE MATERIALIZED VIEW whatever_view_1 +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT time_bucket('5', time), COUNT(data) + FROM whatever GROUP BY 1 WITH NO DATA; +CREATE MATERIALIZED VIEW whatever_view_2 +WITH (timescaledb.continuous, timescaledb.materialized_only=true) +TABLESPACE tablespace1 AS +SELECT time_bucket('5', time), COUNT(data) + FROM whatever GROUP BY 1 WITH NO DATA; +INSERT INTO whatever SELECT i, i FROM generate_series(0, 29) AS i; +CALL refresh_continuous_aggregate('whatever_view_1', NULL, NULL); +CALL refresh_continuous_aggregate('whatever_view_2', NULL, NULL); +SELECT user_view, + mat_table, + cagg_info.tablespace AS mat_tablespace, + chunk_name, + chunk_info.tablespace AS chunk_tablespace + FROM cagg_info, chunk_info + WHERE mat_table::text = table_name + AND user_view::text LIKE 'whatever_view%'; + user_view | mat_table | mat_tablespace | chunk_name | chunk_tablespace +-----------------+-----------------------------+----------------+--------------------+------------------ + whatever_view_1 | _materialized_hypertable_13 | | _hyper_13_24_chunk | + whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_25_chunk | tablespace1 +(2 rows) + +ALTER MATERIALIZED VIEW whatever_view_1 SET TABLESPACE tablespace2; +SELECT user_view, + mat_table, + cagg_info.tablespace AS mat_tablespace, + chunk_name, + chunk_info.tablespace AS chunk_tablespace + FROM cagg_info, chunk_info + WHERE mat_table::text = table_name + AND user_view::text LIKE 'whatever_view%'; + user_view | mat_table | mat_tablespace | chunk_name | chunk_tablespace +-----------------+-----------------------------+----------------+--------------------+------------------ + whatever_view_1 | _materialized_hypertable_13 | tablespace2 | _hyper_13_24_chunk | tablespace2 + whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_25_chunk | tablespace1 +(2 rows) + +DROP MATERIALIZED VIEW whatever_view_1; +psql:include/cagg_ddl_common.sql:649: NOTICE: drop cascades to table _timescaledb_internal._hyper_13_24_chunk +DROP MATERIALIZED VIEW whatever_view_2; +psql:include/cagg_ddl_common.sql:650: NOTICE: drop cascades to table _timescaledb_internal._hyper_14_25_chunk +-- test bucket width expressions on integer hypertables +CREATE TABLE metrics_int2 ( + time int2 NOT NULL, + device_id int, + v1 float, + v2 float +); +CREATE TABLE metrics_int4 ( + time int4 NOT NULL, + device_id int, + v1 float, + v2 float +); +CREATE TABLE metrics_int8 ( + time int8 NOT NULL, + device_id int, + v1 float, + v2 float +); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable (('metrics_' || dt)::regclass, 'time', chunk_time_interval => 10, replication_factor => 2) +FROM ( + VALUES ('int2'), + ('int4'), + ('int8')) v (dt); +\else +SELECT create_hypertable (('metrics_' || dt)::regclass, 'time', chunk_time_interval => 10) +FROM ( + VALUES ('int2'), + ('int4'), + ('int8')) v (dt); + create_hypertable +---------------------------- + (15,public,metrics_int2,t) + (16,public,metrics_int4,t) + (17,public,metrics_int8,t) +(3 rows) + +\endif +CREATE OR REPLACE FUNCTION int2_now () + RETURNS int2 + LANGUAGE SQL + STABLE + AS $$ + SELECT 10::int2 +$$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ +CREATE OR REPLACE FUNCTION int2_now () + RETURNS int2 + LANGUAGE SQL + STABLE + AS $$ + SELECT 10::int2 +$$; +$DIST$); +\endif +CREATE OR REPLACE FUNCTION int4_now () + RETURNS int4 + LANGUAGE SQL + STABLE + AS $$ + SELECT 10::int4 +$$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ +CREATE OR REPLACE FUNCTION int4_now () + RETURNS int4 + LANGUAGE SQL + STABLE + AS $$ + SELECT 10::int4 +$$; +$DIST$); +\endif +CREATE OR REPLACE FUNCTION int8_now () + RETURNS int8 + LANGUAGE SQL + STABLE + AS $$ + SELECT 10::int8 +$$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ +CREATE OR REPLACE FUNCTION int8_now () + RETURNS int8 + LANGUAGE SQL + STABLE + AS $$ + SELECT 10::int8 +$$; +$DIST$); +\endif +SELECT set_integer_now_func (('metrics_' || dt)::regclass, (dt || '_now')::regproc) +FROM ( + VALUES ('int2'), + ('int4'), + ('int8')) v (dt); + set_integer_now_func +---------------------- + + + +(3 rows) + +-- width expression for int2 hypertables +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(1::smallint, time) +FROM metrics_int2 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:755: NOTICE: continuous aggregate "width_expr" is already up-to-date +DROP MATERIALIZED VIEW width_expr; +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(1::smallint + 2::smallint, time) +FROM metrics_int2 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:762: NOTICE: continuous aggregate "width_expr" is already up-to-date +DROP MATERIALIZED VIEW width_expr; +-- width expression for int4 hypertables +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(1, time) +FROM metrics_int4 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:770: NOTICE: continuous aggregate "width_expr" is already up-to-date +DROP MATERIALIZED VIEW width_expr; +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(1 + 2, time) +FROM metrics_int4 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:777: NOTICE: continuous aggregate "width_expr" is already up-to-date +DROP MATERIALIZED VIEW width_expr; +-- width expression for int8 hypertables +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(1, time) +FROM metrics_int8 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:785: NOTICE: continuous aggregate "width_expr" is already up-to-date +DROP MATERIALIZED VIEW width_expr; +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(1 + 2, time) +FROM metrics_int8 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:792: NOTICE: continuous aggregate "width_expr" is already up-to-date +DROP MATERIALIZED VIEW width_expr; +\set ON_ERROR_STOP 0 +-- non-immutable expresions should be rejected +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(extract(year FROM now())::smallint, time) +FROM metrics_int2 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:801: ERROR: only immutable expressions allowed in time bucket function +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(extract(year FROM now())::int, time) +FROM metrics_int4 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:806: ERROR: only immutable expressions allowed in time bucket function +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(extract(year FROM now())::int, time) +FROM metrics_int8 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:811: ERROR: only immutable expressions allowed in time bucket function +\set ON_ERROR_STOP 1 +-- Test various ALTER MATERIALIZED VIEW statements. +SET ROLE :ROLE_DEFAULT_PERM_USER; +CREATE MATERIALIZED VIEW owner_check WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(1 + 2, time) +FROM metrics_int8 +GROUP BY 1 +WITH NO DATA; +\x on +SELECT * FROM cagg_info WHERE user_view::text = 'owner_check'; +-[ RECORD 1 ]------+--------------------------------------- +user_view | owner_check +user_view_owner | default_perm_user +mat_table | _materialized_hypertable_24 +mat_table_owner | default_perm_user +direct_view | _timescaledb_internal._direct_view_24 +direct_view_owner | default_perm_user +partial_view | _timescaledb_internal._partial_view_24 +partial_view_owner | default_perm_user +tablespace | + +\x off +-- This should not work since the target user has the wrong role, but +-- we test that the normal checks are done when changing the owner. +\set ON_ERROR_STOP 0 +ALTER MATERIALIZED VIEW owner_check OWNER TO :ROLE_1; +psql:include/cagg_ddl_common.sql:831: ERROR: must be member of role "test_role_1" +\set ON_ERROR_STOP 1 +-- Superuser can always change owner +SET ROLE :ROLE_CLUSTER_SUPERUSER; +ALTER MATERIALIZED VIEW owner_check OWNER TO :ROLE_1; +\x on +SELECT * FROM cagg_info WHERE user_view::text = 'owner_check'; +-[ RECORD 1 ]------+--------------------------------------- +user_view | owner_check +user_view_owner | test_role_1 +mat_table | _materialized_hypertable_24 +mat_table_owner | test_role_1 +direct_view | _timescaledb_internal._direct_view_24 +direct_view_owner | test_role_1 +partial_view | _timescaledb_internal._partial_view_24 +partial_view_owner | test_role_1 +tablespace | + +\x off +-- +-- Test drop continuous aggregate cases +-- +-- Issue: #2608 +-- +CREATE OR REPLACE FUNCTION test_int_now() + RETURNS INT LANGUAGE SQL STABLE AS +$BODY$ + SELECT 50; +$BODY$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ + CREATE OR REPLACE FUNCTION test_int_now() + RETURNS INT LANGUAGE SQL STABLE AS + $BODY$ + SELECT 50; + $BODY$; +$DIST$); +\endif +CREATE TABLE conditionsnm(time_int INT NOT NULL, device INT, value FLOAT); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('conditionsnm', 'time_int', chunk_time_interval => 10, replication_factor => 2); +\else +SELECT create_hypertable('conditionsnm', 'time_int', chunk_time_interval => 10); + create_hypertable +---------------------------- + (25,public,conditionsnm,t) +(1 row) + +\endif +SELECT set_integer_now_func('conditionsnm', 'test_int_now'); + set_integer_now_func +---------------------- + +(1 row) + +INSERT INTO conditionsnm +SELECT time_val, time_val % 4, 3.14 FROM generate_series(0,100,1) AS time_val; +-- Case 1: DROP +CREATE MATERIALIZED VIEW conditionsnm_4 +WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) +AS +SELECT time_bucket(7, time_int) as bucket, +SUM(value), COUNT(value) +FROM conditionsnm GROUP BY bucket WITH DATA; +psql:include/cagg_ddl_common.sql:879: NOTICE: refreshing continuous aggregate "conditionsnm_4" +DROP materialized view conditionsnm_4; +psql:include/cagg_ddl_common.sql:881: NOTICE: drop cascades to table _timescaledb_internal._hyper_26_37_chunk +-- Case 2: DROP CASCADE should have similar behaviour as DROP +CREATE MATERIALIZED VIEW conditionsnm_4 +WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) +AS +SELECT time_bucket(7, time_int) as bucket, +SUM(value), COUNT(value) +FROM conditionsnm GROUP BY bucket WITH DATA; +psql:include/cagg_ddl_common.sql:889: NOTICE: refreshing continuous aggregate "conditionsnm_4" +DROP materialized view conditionsnm_4 CASCADE; +psql:include/cagg_ddl_common.sql:891: NOTICE: drop cascades to table _timescaledb_internal._hyper_27_38_chunk +-- Case 3: require CASCADE in case of dependent object +CREATE MATERIALIZED VIEW conditionsnm_4 +WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) +AS +SELECT time_bucket(7, time_int) as bucket, +SUM(value), COUNT(value) +FROM conditionsnm GROUP BY bucket WITH DATA; +psql:include/cagg_ddl_common.sql:899: NOTICE: refreshing continuous aggregate "conditionsnm_4" +CREATE VIEW see_cagg as select * from conditionsnm_4; +\set ON_ERROR_STOP 0 +DROP MATERIALIZED VIEW conditionsnm_4; +psql:include/cagg_ddl_common.sql:903: ERROR: cannot drop view conditionsnm_4 because other objects depend on it +\set ON_ERROR_STOP 1 +-- Case 4: DROP CASCADE with dependency +DROP MATERIALIZED VIEW conditionsnm_4 CASCADE; +psql:include/cagg_ddl_common.sql:907: NOTICE: drop cascades to view see_cagg +psql:include/cagg_ddl_common.sql:907: NOTICE: drop cascades to table _timescaledb_internal._hyper_28_39_chunk +-- Test DROP SCHEMA CASCADE with continuous aggregates +-- +-- Issue: #2350 +-- +-- Case 1: DROP SCHEMA CASCADE +CREATE SCHEMA test_schema; +CREATE TABLE test_schema.telemetry_raw ( + ts TIMESTAMP WITH TIME ZONE NOT NULL, + value DOUBLE PRECISION +); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('test_schema.telemetry_raw', 'ts', replication_factor => 2); +\else +SELECT create_hypertable('test_schema.telemetry_raw', 'ts'); + create_hypertable +---------------------------------- + (29,test_schema,telemetry_raw,t) +(1 row) + +\endif +CREATE MATERIALIZED VIEW test_schema.telemetry_1s + WITH (timescaledb.continuous, timescaledb.materialized_only=false) + AS +SELECT time_bucket(INTERVAL '1s', ts) AS ts_1s, + avg(value) + FROM test_schema.telemetry_raw + GROUP BY ts_1s WITH NO DATA; +SELECT ca.raw_hypertable_id, + h.schema_name, + h.table_name AS "MAT_TABLE_NAME", + partial_view_name as "PART_VIEW_NAME", + partial_view_schema +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'telemetry_1s'; + raw_hypertable_id | schema_name | MAT_TABLE_NAME | PART_VIEW_NAME | partial_view_schema +-------------------+-----------------------+-----------------------------+------------------+----------------------- + 29 | _timescaledb_internal | _materialized_hypertable_30 | _partial_view_30 | _timescaledb_internal +(1 row) + +\gset +DROP SCHEMA test_schema CASCADE; +psql:include/cagg_ddl_common.sql:946: NOTICE: drop cascades to 4 other objects +SELECT count(*) FROM pg_class WHERE relname = :'MAT_TABLE_NAME'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_class WHERE relname = :'PART_VIEW_NAME'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_class WHERE relname = 'telemetry_1s'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_namespace WHERE nspname = 'test_schema'; + count +------- + 0 +(1 row) + +-- Case 2: DROP SCHEMA CASCADE with multiple caggs +CREATE SCHEMA test_schema; +CREATE TABLE test_schema.telemetry_raw ( + ts TIMESTAMP WITH TIME ZONE NOT NULL, + value DOUBLE PRECISION +); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('test_schema.telemetry_raw', 'ts', replication_factor => 2); +\else +SELECT create_hypertable('test_schema.telemetry_raw', 'ts'); + create_hypertable +---------------------------------- + (31,test_schema,telemetry_raw,t) +(1 row) + +\endif +CREATE MATERIALIZED VIEW test_schema.cagg1 + WITH (timescaledb.continuous, timescaledb.materialized_only=false) + AS +SELECT time_bucket(INTERVAL '1s', ts) AS ts_1s, + avg(value) + FROM test_schema.telemetry_raw + GROUP BY ts_1s WITH NO DATA; +CREATE MATERIALIZED VIEW test_schema.cagg2 + WITH (timescaledb.continuous, timescaledb.materialized_only=false) + AS +SELECT time_bucket(INTERVAL '1s', ts) AS ts_1s, + avg(value) + FROM test_schema.telemetry_raw + GROUP BY ts_1s WITH NO DATA; +SELECT ca.raw_hypertable_id, + h.schema_name, + h.table_name AS "MAT_TABLE_NAME1", + partial_view_name as "PART_VIEW_NAME1", + partial_view_schema +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'cagg1'; + raw_hypertable_id | schema_name | MAT_TABLE_NAME1 | PART_VIEW_NAME1 | partial_view_schema +-------------------+-----------------------+-----------------------------+------------------+----------------------- + 31 | _timescaledb_internal | _materialized_hypertable_32 | _partial_view_32 | _timescaledb_internal +(1 row) + +\gset +SELECT ca.raw_hypertable_id, + h.schema_name, + h.table_name AS "MAT_TABLE_NAME2", + partial_view_name as "PART_VIEW_NAME2", + partial_view_schema +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'cagg2'; + raw_hypertable_id | schema_name | MAT_TABLE_NAME2 | PART_VIEW_NAME2 | partial_view_schema +-------------------+-----------------------+-----------------------------+------------------+----------------------- + 31 | _timescaledb_internal | _materialized_hypertable_33 | _partial_view_33 | _timescaledb_internal +(1 row) + +\gset +DROP SCHEMA test_schema CASCADE; +psql:include/cagg_ddl_common.sql:1003: NOTICE: drop cascades to 7 other objects +SELECT count(*) FROM pg_class WHERE relname = :'MAT_TABLE_NAME1'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_class WHERE relname = :'PART_VIEW_NAME1'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_class WHERE relname = 'cagg1'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_class WHERE relname = :'MAT_TABLE_NAME2'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_class WHERE relname = :'PART_VIEW_NAME2'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_class WHERE relname = 'cagg2'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_namespace WHERE nspname = 'test_schema'; + count +------- + 0 +(1 row) + +DROP TABLESPACE tablespace1; +DROP TABLESPACE tablespace2; +-- Check that we can rename a column of a materialized view and still +-- rebuild it after (#3051, #3405) +CREATE TABLE conditions ( + time TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL +); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('conditions', 'time', replication_factor => 2); +\else +SELECT create_hypertable('conditions', 'time'); + create_hypertable +-------------------------- + (34,public,conditions,t) +(1 row) + +\endif +INSERT INTO conditions VALUES ( '2018-01-01 09:20:00-08', 'SFO', 55); +INSERT INTO conditions VALUES ( '2018-01-02 09:30:00-08', 'por', 100); +INSERT INTO conditions VALUES ( '2018-01-02 09:20:00-08', 'SFO', 65); +INSERT INTO conditions VALUES ( '2018-01-02 09:10:00-08', 'NYC', 65); +INSERT INTO conditions VALUES ( '2018-11-01 09:20:00-08', 'NYC', 45); +INSERT INTO conditions VALUES ( '2018-11-01 10:40:00-08', 'NYC', 55); +INSERT INTO conditions VALUES ( '2018-11-01 11:50:00-08', 'NYC', 65); +INSERT INTO conditions VALUES ( '2018-11-01 12:10:00-08', 'NYC', 75); +INSERT INTO conditions VALUES ( '2018-11-01 13:10:00-08', 'NYC', 85); +INSERT INTO conditions VALUES ( '2018-11-02 09:20:00-08', 'NYC', 10); +INSERT INTO conditions VALUES ( '2018-11-02 10:30:00-08', 'NYC', 20); +CREATE MATERIALIZED VIEW conditions_daily +WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS +SELECT location, + time_bucket(INTERVAL '1 day', time) AS bucket, + AVG(temperature) + FROM conditions +GROUP BY location, bucket +WITH NO DATA; +SELECT format('%I.%I', '_timescaledb_internal', h.table_name) AS "MAT_TABLE_NAME", + format('%I.%I', '_timescaledb_internal', partial_view_name) AS "PART_VIEW_NAME", + format('%I.%I', '_timescaledb_internal', direct_view_name) AS "DIRECT_VIEW_NAME" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'conditions_daily' +\gset +-- Show both the columns and the view definitions to see that +-- references are correct in the view as well. +SELECT * FROM test.show_columns('conditions_daily'); + Column | Type | NotNull +----------+--------------------------+--------- + location | text | f + bucket | timestamp with time zone | f + avg | double precision | f +(3 rows) + +SELECT * FROM test.show_columns(:'DIRECT_VIEW_NAME'); + Column | Type | NotNull +----------+--------------------------+--------- + location | text | f + bucket | timestamp with time zone | f + avg | double precision | f +(3 rows) + +SELECT * FROM test.show_columns(:'PART_VIEW_NAME'); + Column | Type | NotNull +----------+--------------------------+--------- + location | text | f + bucket | timestamp with time zone | f + avg | double precision | f +(3 rows) + +SELECT * FROM test.show_columns(:'MAT_TABLE_NAME'); + Column | Type | NotNull +----------+--------------------------+--------- + location | text | f + bucket | timestamp with time zone | t + avg | double precision | f +(3 rows) + +ALTER MATERIALIZED VIEW conditions_daily RENAME COLUMN bucket to "time"; +-- Show both the columns and the view definitions to see that +-- references are correct in the view as well. +SELECT * FROM test.show_columns(' conditions_daily'); + Column | Type | NotNull +----------+--------------------------+--------- + location | text | f + time | timestamp with time zone | f + avg | double precision | f +(3 rows) + +SELECT * FROM test.show_columns(:'DIRECT_VIEW_NAME'); + Column | Type | NotNull +----------+--------------------------+--------- + location | text | f + time | timestamp with time zone | f + avg | double precision | f +(3 rows) + +SELECT * FROM test.show_columns(:'PART_VIEW_NAME'); + Column | Type | NotNull +----------+--------------------------+--------- + location | text | f + time | timestamp with time zone | f + avg | double precision | f +(3 rows) + +SELECT * FROM test.show_columns(:'MAT_TABLE_NAME'); + Column | Type | NotNull +----------+--------------------------+--------- + location | text | f + time | timestamp with time zone | t + avg | double precision | f +(3 rows) + +-- This will rebuild the materialized view and should succeed. +ALTER MATERIALIZED VIEW conditions_daily SET (timescaledb.materialized_only = false); +-- Refresh the continuous aggregate to check that it works after the +-- rename. +\set VERBOSITY verbose +CALL refresh_continuous_aggregate('conditions_daily', NULL, NULL); +\set VERBOSITY terse +-- +-- Indexes on continuous aggregate +-- +\set ON_ERROR_STOP 0 +-- unique indexes are not supported +CREATE UNIQUE INDEX index_unique_error ON conditions_daily ("time", location); +psql:include/cagg_ddl_common.sql:1089: ERROR: continuous aggregates do not support UNIQUE indexes +-- concurrently index creation not supported +CREATE INDEX CONCURRENTLY index_concurrently_avg ON conditions_daily (avg); +psql:include/cagg_ddl_common.sql:1091: ERROR: hypertables do not support concurrent index creation +\set ON_ERROR_STOP 1 +CREATE INDEX index_avg ON conditions_daily (avg); +CREATE INDEX index_avg_only ON ONLY conditions_daily (avg); +CREATE INDEX index_avg_include ON conditions_daily (avg) INCLUDE (location); +CREATE INDEX index_avg_expr ON conditions_daily ((avg + 1)); +CREATE INDEX index_avg_location_sfo ON conditions_daily (avg) WHERE location = 'SFO'; +CREATE INDEX index_avg_expr_location_sfo ON conditions_daily ((avg + 2)) WHERE location = 'SFO'; +SELECT * FROM test.show_indexespred(:'MAT_TABLE_NAME'); + Index | Columns | Expr | Pred | Unique | Primary | Exclusion | Tablespace +-----------------------------------------------------------------------+-------------------+---------------------------+------------------------+--------+---------+-----------+------------ + _timescaledb_internal._materialized_hypertable_35_bucket_idx | {bucket} | | | f | f | f | + _timescaledb_internal._materialized_hypertable_35_location_bucket_idx | {location,bucket} | | | f | f | f | + _timescaledb_internal.index_avg | {avg} | | | f | f | f | + _timescaledb_internal.index_avg_expr | {expr} | avg + 1::double precision | | f | f | f | + _timescaledb_internal.index_avg_expr_location_sfo | {expr} | avg + 2::double precision | location = 'SFO'::text | f | f | f | + _timescaledb_internal.index_avg_include | {avg,location} | | | f | f | f | + _timescaledb_internal.index_avg_location_sfo | {avg} | | location = 'SFO'::text | f | f | f | + _timescaledb_internal.index_avg_only | {avg} | | | f | f | f | +(8 rows) + +-- #3696 assertion failure when referencing columns not present in result +CREATE TABLE i3696(time timestamptz NOT NULL, search_query text, cnt integer, cnt2 integer); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('i3696', 'time', replication_factor => 2); +\else +SELECT table_name FROM create_hypertable('i3696','time'); + table_name +------------ + i3696 +(1 row) + +\endif +CREATE MATERIALIZED VIEW i3696_cagg1 WITH (timescaledb.continuous, timescaledb.materialized_only=false) +AS + SELECT search_query,count(search_query) as count, sum(cnt), time_bucket(INTERVAL '1 minute', time) AS bucket + FROM i3696 GROUP BY cnt +cnt2 , bucket, search_query; +psql:include/cagg_ddl_common.sql:1113: NOTICE: continuous aggregate "i3696_cagg1" is already up-to-date +ALTER MATERIALIZED VIEW i3696_cagg1 SET (timescaledb.materialized_only = 'true'); +CREATE MATERIALIZED VIEW i3696_cagg2 WITH (timescaledb.continuous, timescaledb.materialized_only=false) +AS + SELECT search_query,count(search_query) as count, sum(cnt), time_bucket(INTERVAL '1 minute', time) AS bucket + FROM i3696 GROUP BY cnt + cnt2, bucket, search_query + HAVING cnt + cnt2 + sum(cnt) > 2 or count(cnt2) > 10; +psql:include/cagg_ddl_common.sql:1121: NOTICE: continuous aggregate "i3696_cagg2" is already up-to-date +ALTER MATERIALIZED VIEW i3696_cagg2 SET (timescaledb.materialized_only = 'true'); +--TEST test with multiple settings on continuous aggregates -- +-- test for materialized_only + compress combinations (real time aggs enabled initially) +CREATE TABLE test_setting(time timestamptz not null, val numeric); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('test_setting', 'time', replication_factor => 2); +\else +SELECT create_hypertable('test_setting', 'time'); + create_hypertable +---------------------------- + (39,public,test_setting,t) +(1 row) + +\endif +CREATE MATERIALIZED VIEW test_setting_cagg with (timescaledb.continuous, timescaledb.materialized_only=false) +AS SELECT time_bucket('1h',time), avg(val), count(*) FROM test_setting GROUP BY 1; +psql:include/cagg_ddl_common.sql:1135: NOTICE: continuous aggregate "test_setting_cagg" is already up-to-date +INSERT INTO test_setting +SELECT generate_series( '2020-01-10 8:00'::timestamp, '2020-01-30 10:00+00'::timestamptz, '1 day'::interval), 10.0; +CALL refresh_continuous_aggregate('test_setting_cagg', NULL, '2020-05-30 10:00+00'::timestamptz); +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 20 +(1 row) + +--this row is not in the materialized result --- +INSERT INTO test_setting VALUES( '2020-11-01', 20); +--try out 2 settings here -- +ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='true'); +psql:include/cagg_ddl_common.sql:1146: NOTICE: defaulting compress_orderby to time_bucket +SELECT view_name, compression_enabled, materialized_only +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_setting_cagg'; + view_name | compression_enabled | materialized_only +-------------------+---------------------+------------------- + test_setting_cagg | t | t +(1 row) + +--real time aggs is off now , should return 20 -- +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 20 +(1 row) + +--now set it back to false -- +ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='true'); +psql:include/cagg_ddl_common.sql:1154: NOTICE: defaulting compress_orderby to time_bucket +SELECT view_name, compression_enabled, materialized_only +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_setting_cagg'; + view_name | compression_enabled | materialized_only +-------------------+---------------------+------------------- + test_setting_cagg | t | f +(1 row) + +--count should return additional data since we have real time aggs on +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 21 +(1 row) + +ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='false'); +SELECT view_name, compression_enabled, materialized_only +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_setting_cagg'; + view_name | compression_enabled | materialized_only +-------------------+---------------------+------------------- + test_setting_cagg | f | t +(1 row) + +--real time aggs is off now , should return 20 -- +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 20 +(1 row) + +ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='false'); +SELECT view_name, compression_enabled, materialized_only +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_setting_cagg'; + view_name | compression_enabled | materialized_only +-------------------+---------------------+------------------- + test_setting_cagg | f | f +(1 row) + +--count should return additional data since we have real time aggs on +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 21 +(1 row) + +DELETE FROM test_setting WHERE val = 20; +--TEST test with multiple settings on continuous aggregates with real time aggregates turned off initially -- +-- test for materialized_only + compress combinations (real time aggs enabled initially) +DROP MATERIALIZED VIEW test_setting_cagg; +psql:include/cagg_ddl_common.sql:1179: NOTICE: drop cascades to table _timescaledb_internal._hyper_40_47_chunk +CREATE MATERIALIZED VIEW test_setting_cagg with (timescaledb.continuous, timescaledb.materialized_only = true) +AS SELECT time_bucket('1h',time), avg(val), count(*) FROM test_setting GROUP BY 1; +psql:include/cagg_ddl_common.sql:1182: NOTICE: refreshing continuous aggregate "test_setting_cagg" +CALL refresh_continuous_aggregate('test_setting_cagg', NULL, '2020-05-30 10:00+00'::timestamptz); +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 20 +(1 row) + +--this row is not in the materialized result --- +INSERT INTO test_setting VALUES( '2020-11-01', 20); +--try out 2 settings here -- +ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='true'); +psql:include/cagg_ddl_common.sql:1190: NOTICE: defaulting compress_orderby to time_bucket +SELECT view_name, compression_enabled, materialized_only +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_setting_cagg'; + view_name | compression_enabled | materialized_only +-------------------+---------------------+------------------- + test_setting_cagg | t | f +(1 row) + +--count should return additional data since we have real time aggs on +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 21 +(1 row) + +--now set it back to false -- +ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='true'); +psql:include/cagg_ddl_common.sql:1198: NOTICE: defaulting compress_orderby to time_bucket +SELECT view_name, compression_enabled, materialized_only +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_setting_cagg'; + view_name | compression_enabled | materialized_only +-------------------+---------------------+------------------- + test_setting_cagg | t | t +(1 row) + +--real time aggs is off now , should return 20 -- +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 20 +(1 row) + +ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='false'); +SELECT view_name, compression_enabled, materialized_only +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_setting_cagg'; + view_name | compression_enabled | materialized_only +-------------------+---------------------+------------------- + test_setting_cagg | f | f +(1 row) + +--count should return additional data since we have real time aggs on +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 21 +(1 row) + +ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='false'); +SELECT view_name, compression_enabled, materialized_only +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_setting_cagg'; + view_name | compression_enabled | materialized_only +-------------------+---------------------+------------------- + test_setting_cagg | f | t +(1 row) + +--real time aggs is off now , should return 20 -- +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 20 +(1 row) + +-- END TEST with multiple settings +-- Test View Target Entries that contain both aggrefs and Vars in the same expression +CREATE TABLE transactions +( + "time" timestamp with time zone NOT NULL, + dummy1 integer, + dummy2 integer, + dummy3 integer, + dummy4 integer, + dummy5 integer, + amount integer, + fiat_value integer +); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('transactions', 'time', replication_factor => 2); +\else +SELECT create_hypertable('transactions', 'time'); + create_hypertable +---------------------------- + (46,public,transactions,t) +(1 row) + +\endif +INSERT INTO transactions VALUES ( '2018-01-01 09:20:00-08', 0, 0, 0, 0, 0, 1, 10); +INSERT INTO transactions VALUES ( '2018-01-02 09:30:00-08', 0, 0, 0, 0, 0, -1, 10); +INSERT INTO transactions VALUES ( '2018-01-02 09:20:00-08', 0, 0, 0, 0, 0, -1, 10); +INSERT INTO transactions VALUES ( '2018-01-02 09:10:00-08', 0, 0, 0, 0, 0, -1, 10); +INSERT INTO transactions VALUES ( '2018-11-01 09:20:00-08', 0, 0, 0, 0, 0, 1, 10); +INSERT INTO transactions VALUES ( '2018-11-01 10:40:00-08', 0, 0, 0, 0, 0, 1, 10); +INSERT INTO transactions VALUES ( '2018-11-01 11:50:00-08', 0, 0, 0, 0, 0, 1, 10); +INSERT INTO transactions VALUES ( '2018-11-01 12:10:00-08', 0, 0, 0, 0, 0, -1, 10); +INSERT INTO transactions VALUES ( '2018-11-01 13:10:00-08', 0, 0, 0, 0, 0, -1, 10); +INSERT INTO transactions VALUES ( '2018-11-02 09:20:00-08', 0, 0, 0, 0, 0, 1, 10); +INSERT INTO transactions VALUES ( '2018-11-02 10:30:00-08', 0, 0, 0, 0, 0, -1, 10); +CREATE materialized view cashflows( + bucket, + amount, + cashflow, + cashflow2 +) WITH ( + timescaledb.continuous, + timescaledb.materialized_only = true +) AS +SELECT time_bucket ('1 day', time) AS bucket, + amount, + CASE + WHEN amount < 0 THEN (0 - sum(fiat_value)) + ELSE sum(fiat_value) + END AS cashflow, + amount + sum(fiat_value) +FROM transactions +GROUP BY bucket, amount; +psql:include/cagg_ddl_common.sql:1272: NOTICE: refreshing continuous aggregate "cashflows" +SELECT h.table_name AS "MAT_TABLE_NAME", + partial_view_name AS "PART_VIEW_NAME", + direct_view_name AS "DIRECT_VIEW_NAME" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'cashflows' +\gset +-- Show both the columns and the view definitions to see that +-- references are correct in the view as well. +\d+ "_timescaledb_internal".:"DIRECT_VIEW_NAME" + View "_timescaledb_internal._direct_view_47" + Column | Type | Collation | Nullable | Default | Storage | Description +-----------+--------------------------+-----------+----------+---------+---------+------------- + bucket | timestamp with time zone | | | | plain | + amount | integer | | | | plain | + cashflow | bigint | | | | plain | + cashflow2 | bigint | | | | plain | +View definition: + SELECT time_bucket('@ 1 day'::interval, transactions."time") AS bucket, + transactions.amount, + CASE + WHEN transactions.amount < 0 THEN 0 - sum(transactions.fiat_value) + ELSE sum(transactions.fiat_value) + END AS cashflow, + transactions.amount + sum(transactions.fiat_value) AS cashflow2 + FROM transactions + GROUP BY (time_bucket('@ 1 day'::interval, transactions."time")), transactions.amount; + +\d+ "_timescaledb_internal".:"PART_VIEW_NAME" + View "_timescaledb_internal._partial_view_47" + Column | Type | Collation | Nullable | Default | Storage | Description +-----------+--------------------------+-----------+----------+---------+---------+------------- + bucket | timestamp with time zone | | | | plain | + amount | integer | | | | plain | + cashflow | bigint | | | | plain | + cashflow2 | bigint | | | | plain | +View definition: + SELECT time_bucket('@ 1 day'::interval, transactions."time") AS bucket, + transactions.amount, + CASE + WHEN transactions.amount < 0 THEN 0 - sum(transactions.fiat_value) + ELSE sum(transactions.fiat_value) + END AS cashflow, + transactions.amount + sum(transactions.fiat_value) AS cashflow2 + FROM transactions + GROUP BY (time_bucket('@ 1 day'::interval, transactions."time")), transactions.amount; + +\d+ "_timescaledb_internal".:"MAT_TABLE_NAME" + Table "_timescaledb_internal._materialized_hypertable_47" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +-----------+--------------------------+-----------+----------+---------+---------+--------------+------------- + bucket | timestamp with time zone | | not null | | plain | | + amount | integer | | | | plain | | + cashflow | bigint | | | | plain | | + cashflow2 | bigint | | | | plain | | +Indexes: + "_materialized_hypertable_47_amount_bucket_idx" btree (amount, bucket DESC) + "_materialized_hypertable_47_bucket_idx" btree (bucket DESC) +Triggers: + ts_insert_blocker BEFORE INSERT ON _timescaledb_internal._materialized_hypertable_47 FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker() +Child tables: _timescaledb_internal._hyper_47_52_chunk, + _timescaledb_internal._hyper_47_53_chunk + +\d+ 'cashflows' + View "public.cashflows" + Column | Type | Collation | Nullable | Default | Storage | Description +-----------+--------------------------+-----------+----------+---------+---------+------------- + bucket | timestamp with time zone | | | | plain | + amount | integer | | | | plain | + cashflow | bigint | | | | plain | + cashflow2 | bigint | | | | plain | +View definition: + SELECT _materialized_hypertable_47.bucket, + _materialized_hypertable_47.amount, + _materialized_hypertable_47.cashflow, + _materialized_hypertable_47.cashflow2 + FROM _timescaledb_internal._materialized_hypertable_47; + +SELECT * FROM cashflows; + bucket | amount | cashflow | cashflow2 +------------------------------+--------+----------+----------- + Sun Dec 31 16:00:00 2017 PST | 1 | 10 | 11 + Mon Jan 01 16:00:00 2018 PST | -1 | -30 | 29 + Wed Oct 31 17:00:00 2018 PDT | -1 | -20 | 19 + Wed Oct 31 17:00:00 2018 PDT | 1 | 30 | 31 + Thu Nov 01 17:00:00 2018 PDT | -1 | -10 | 9 + Thu Nov 01 17:00:00 2018 PDT | 1 | 10 | 11 +(6 rows) + +-- test cagg creation with named arguments in time_bucket +-- note that positional arguments cannot follow named arguments +-- 1. test named origin +-- 2. test named timezone +-- 3. test named ts +-- 4. test named bucket width +-- named origin +CREATE MATERIALIZED VIEW cagg_named_origin WITH +(timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket('1h', time, 'UTC', origin => '2001-01-03 01:23:45') AS bucket, +avg(amount) as avg_amount +FROM transactions GROUP BY 1 WITH NO DATA; +-- named timezone +CREATE MATERIALIZED VIEW cagg_named_tz_origin WITH +(timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket('1h', time, timezone => 'UTC', origin => '2001-01-03 01:23:45') AS bucket, +avg(amount) as avg_amount +FROM transactions GROUP BY 1 WITH NO DATA; +-- named ts +CREATE MATERIALIZED VIEW cagg_named_ts_tz_origin WITH +(timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket('1h', ts => time, timezone => 'UTC', origin => '2001-01-03 01:23:45') AS bucket, +avg(amount) as avg_amount +FROM transactions GROUP BY 1 WITH NO DATA; +-- named bucket width +CREATE MATERIALIZED VIEW cagg_named_all WITH +(timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(bucket_width => '1h', ts => time, timezone => 'UTC', origin => '2001-01-03 01:23:45') AS bucket, +avg(amount) as avg_amount +FROM transactions GROUP BY 1 WITH NO DATA; +-- Refreshing from the beginning (NULL) of a CAGG with variable time bucket and +-- using an INTERVAL for the end timestamp (issue #5534) +CREATE MATERIALIZED VIEW transactions_montly +WITH (timescaledb.continuous, timescaledb.materialized_only = true) AS +SELECT time_bucket(INTERVAL '1 month', time) AS bucket, + SUM(fiat_value), + MAX(fiat_value), + MIN(fiat_value) + FROM transactions +GROUP BY 1 +WITH NO DATA; +-- No rows +SELECT * FROM transactions_montly ORDER BY bucket; + bucket | sum | max | min +--------+-----+-----+----- +(0 rows) + +-- Refresh from beginning of the CAGG for 1 month +CALL refresh_continuous_aggregate('transactions_montly', NULL, INTERVAL '1 month'); +SELECT * FROM transactions_montly ORDER BY bucket; + bucket | sum | max | min +------------------------------+-----+-----+----- + Sun Dec 31 16:00:00 2017 PST | 40 | 10 | 10 + Wed Oct 31 17:00:00 2018 PDT | 70 | 10 | 10 +(2 rows) + +TRUNCATE transactions_montly; +-- Partial refresh the CAGG from beginning to an specific timestamp +CALL refresh_continuous_aggregate('transactions_montly', NULL, '2018-11-01 11:50:00-08'::timestamptz); +SELECT * FROM transactions_montly ORDER BY bucket; + bucket | sum | max | min +------------------------------+-----+-----+----- + Sun Dec 31 16:00:00 2017 PST | 40 | 10 | 10 +(1 row) + +-- Full refresh the CAGG +CALL refresh_continuous_aggregate('transactions_montly', NULL, NULL); +SELECT * FROM transactions_montly ORDER BY bucket; + bucket | sum | max | min +------------------------------+-----+-----+----- + Sun Dec 31 16:00:00 2017 PST | 40 | 10 | 10 + Wed Oct 31 17:00:00 2018 PDT | 70 | 10 | 10 +(2 rows) + +-- Check set_chunk_time_interval on continuous aggregate +CREATE MATERIALIZED VIEW cagg_set_chunk_time_interval +WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(INTERVAL '1 month', time) AS bucket, + SUM(fiat_value), + MAX(fiat_value), + MIN(fiat_value) +FROM transactions +GROUP BY 1 +WITH NO DATA; +SELECT set_chunk_time_interval('cagg_set_chunk_time_interval', chunk_time_interval => interval '1 month'); + set_chunk_time_interval +------------------------- + +(1 row) + +CALL refresh_continuous_aggregate('cagg_set_chunk_time_interval', NULL, NULL); +SELECT _timescaledb_functions.to_interval(d.interval_length) = interval '1 month' +FROM _timescaledb_catalog.dimension d + RIGHT JOIN _timescaledb_catalog.continuous_agg ca ON ca.user_view_name = 'cagg_set_chunk_time_interval' +WHERE d.hypertable_id = ca.mat_hypertable_id; + ?column? +---------- + t +(1 row) + +-- Since #6077 CAggs are materialized only by default +DROP TABLE conditions CASCADE; +psql:include/cagg_ddl_common.sql:1370: NOTICE: drop cascades to 3 other objects +psql:include/cagg_ddl_common.sql:1370: NOTICE: drop cascades to 2 other objects +CREATE TABLE conditions ( + time TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL +); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('conditions', 'time', replication_factor => 2); +\else +SELECT create_hypertable('conditions', 'time'); + create_hypertable +-------------------------- + (54,public,conditions,t) +(1 row) + +\endif +INSERT INTO conditions VALUES ( '2018-01-01 09:20:00-08', 'SFO', 55); +INSERT INTO conditions VALUES ( '2018-01-02 09:30:00-08', 'por', 100); +INSERT INTO conditions VALUES ( '2018-01-02 09:20:00-08', 'SFO', 65); +INSERT INTO conditions VALUES ( '2018-01-02 09:10:00-08', 'NYC', 65); +INSERT INTO conditions VALUES ( '2018-11-01 09:20:00-08', 'NYC', 45); +INSERT INTO conditions VALUES ( '2018-11-01 10:40:00-08', 'NYC', 55); +INSERT INTO conditions VALUES ( '2018-11-01 11:50:00-08', 'NYC', 65); +INSERT INTO conditions VALUES ( '2018-11-01 12:10:00-08', 'NYC', 75); +INSERT INTO conditions VALUES ( '2018-11-01 13:10:00-08', 'NYC', 85); +INSERT INTO conditions VALUES ( '2018-11-02 09:20:00-08', 'NYC', 10); +INSERT INTO conditions VALUES ( '2018-11-02 10:30:00-08', 'NYC', 20); +CREATE MATERIALIZED VIEW conditions_daily +WITH (timescaledb.continuous) AS +SELECT location, + time_bucket(INTERVAL '1 day', time) AS bucket, + AVG(temperature) + FROM conditions +GROUP BY location, bucket +WITH NO DATA; +\d+ conditions_daily + View "public.conditions_daily" + Column | Type | Collation | Nullable | Default | Storage | Description +----------+--------------------------+-----------+----------+---------+----------+------------- + location | text | | | | extended | + bucket | timestamp with time zone | | | | plain | + avg | double precision | | | | plain | +View definition: + SELECT _materialized_hypertable_55.location, + _materialized_hypertable_55.bucket, + _materialized_hypertable_55.avg + FROM _timescaledb_internal._materialized_hypertable_55; + +-- Should return NO ROWS +SELECT * FROM conditions_daily ORDER BY bucket, avg; + location | bucket | avg +----------+--------+----- +(0 rows) + +ALTER MATERIALIZED VIEW conditions_daily SET (timescaledb.materialized_only=false); +\d+ conditions_daily + View "public.conditions_daily" + Column | Type | Collation | Nullable | Default | Storage | Description +----------+--------------------------+-----------+----------+---------+----------+------------- + location | text | | | | extended | + bucket | timestamp with time zone | | | | plain | + avg | double precision | | | | plain | +View definition: + SELECT _materialized_hypertable_55.location, + _materialized_hypertable_55.bucket, + _materialized_hypertable_55.avg + FROM _timescaledb_internal._materialized_hypertable_55 + WHERE _materialized_hypertable_55.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(55)), '-infinity'::timestamp with time zone) +UNION ALL + SELECT conditions.location, + time_bucket('@ 1 day'::interval, conditions."time") AS bucket, + avg(conditions.temperature) AS avg + FROM conditions + WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(55)), '-infinity'::timestamp with time zone) + GROUP BY conditions.location, (time_bucket('@ 1 day'::interval, conditions."time")); + +-- Should return ROWS because now it is realtime +SELECT * FROM conditions_daily ORDER BY bucket, avg; + location | bucket | avg +----------+------------------------------+----- + SFO | Sun Dec 31 16:00:00 2017 PST | 55 + SFO | Mon Jan 01 16:00:00 2018 PST | 65 + NYC | Mon Jan 01 16:00:00 2018 PST | 65 + por | Mon Jan 01 16:00:00 2018 PST | 100 + NYC | Wed Oct 31 17:00:00 2018 PDT | 65 + NYC | Thu Nov 01 17:00:00 2018 PDT | 15 +(6 rows) + +-- Should return ROWS because we refreshed it +ALTER MATERIALIZED VIEW conditions_daily SET (timescaledb.materialized_only=true); +\d+ conditions_daily + View "public.conditions_daily" + Column | Type | Collation | Nullable | Default | Storage | Description +----------+--------------------------+-----------+----------+---------+----------+------------- + location | text | | | | extended | + bucket | timestamp with time zone | | | | plain | + avg | double precision | | | | plain | +View definition: + SELECT _materialized_hypertable_55.location, + _materialized_hypertable_55.bucket, + _materialized_hypertable_55.avg + FROM _timescaledb_internal._materialized_hypertable_55; + +CALL refresh_continuous_aggregate('conditions_daily', NULL, NULL); +SELECT * FROM conditions_daily ORDER BY bucket, avg; + location | bucket | avg +----------+------------------------------+----- + SFO | Sun Dec 31 16:00:00 2017 PST | 55 + SFO | Mon Jan 01 16:00:00 2018 PST | 65 + NYC | Mon Jan 01 16:00:00 2018 PST | 65 + por | Mon Jan 01 16:00:00 2018 PST | 100 + NYC | Wed Oct 31 17:00:00 2018 PDT | 65 + NYC | Thu Nov 01 17:00:00 2018 PDT | 15 +(6 rows) + diff --git a/tsl/test/expected/cagg_ddl-15.out b/tsl/test/expected/cagg_ddl-15.out new file mode 100644 index 00000000000..b9514c3eb29 --- /dev/null +++ b/tsl/test/expected/cagg_ddl-15.out @@ -0,0 +1,2164 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\set IS_DISTRIBUTED FALSE +\ir include/cagg_ddl_common.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- Set this variable to avoid using a hard-coded path each time query +-- results are compared +\set QUERY_RESULT_TEST_EQUAL_RELPATH '../../../../test/sql/include/query_result_test_equal.sql' +\if :IS_DISTRIBUTED +\echo 'Running distributed hypertable tests' +\else +\echo 'Running local hypertable tests' +Running local hypertable tests +\endif +SET ROLE :ROLE_DEFAULT_PERM_USER; +--DDL commands on continuous aggregates +CREATE TABLE conditions ( + timec TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + temperature integer NULL, + humidity DOUBLE PRECISION NULL, + timemeasure TIMESTAMPTZ, + timeinterval INTERVAL +); +\if :IS_DISTRIBUTED +SELECT table_name FROM create_distributed_hypertable('conditions', 'timec', replication_factor => 2); +\else +SELECT table_name FROM create_hypertable('conditions', 'timec'); + table_name +------------ + conditions +(1 row) + +\endif +-- schema tests +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER +-- drop if the tablespace1 and/or tablespace2 exists +SET client_min_messages TO error; +DROP TABLESPACE IF EXISTS tablespace1; +DROP TABLESPACE IF EXISTS tablespace2; +RESET client_min_messages; +CREATE TABLESPACE tablespace1 OWNER :ROLE_DEFAULT_PERM_USER LOCATION :TEST_TABLESPACE1_PATH; +CREATE TABLESPACE tablespace2 OWNER :ROLE_DEFAULT_PERM_USER LOCATION :TEST_TABLESPACE2_PATH; +CREATE SCHEMA rename_schema; +GRANT ALL ON SCHEMA rename_schema TO :ROLE_DEFAULT_PERM_USER; +SET ROLE :ROLE_DEFAULT_PERM_USER; +CREATE TABLE foo(time TIMESTAMPTZ NOT NULL, data INTEGER); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('foo', 'time', replication_factor => 2); +\else +SELECT create_hypertable('foo', 'time'); + create_hypertable +------------------- + (2,public,foo,t) +(1 row) + +\endif +CREATE MATERIALIZED VIEW rename_test + WITH ( timescaledb.continuous, timescaledb.materialized_only=true) +AS SELECT time_bucket('1week', time), COUNT(data) + FROM foo + GROUP BY 1 WITH NO DATA; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name +------------------+----------------+-----------------------+------------------- + public | rename_test | _timescaledb_internal | _partial_view_3 +(1 row) + +ALTER MATERIALIZED VIEW rename_test SET SCHEMA rename_schema; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name +------------------+----------------+-----------------------+------------------- + rename_schema | rename_test | _timescaledb_internal | _partial_view_3 +(1 row) + +SELECT ca.raw_hypertable_id as "RAW_HYPERTABLE_ID", + h.schema_name AS "MAT_SCHEMA_NAME", + h.table_name AS "MAT_TABLE_NAME", + partial_view_name as "PART_VIEW_NAME", + partial_view_schema as "PART_VIEW_SCHEMA", + direct_view_name as "DIR_VIEW_NAME", + direct_view_schema as "DIR_VIEW_SCHEMA" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'rename_test' +\gset +RESET ROLE; +SELECT current_user; + current_user +-------------------- + cluster_super_user +(1 row) + +ALTER VIEW :"PART_VIEW_SCHEMA".:"PART_VIEW_NAME" SET SCHEMA public; +SET ROLE :ROLE_DEFAULT_PERM_USER; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name +------------------+----------------+---------------------+------------------- + rename_schema | rename_test | public | _partial_view_3 +(1 row) + +--alter direct view schema +SELECT user_view_schema, user_view_name, direct_view_schema, direct_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | direct_view_schema | direct_view_name +------------------+----------------+-----------------------+------------------ + rename_schema | rename_test | _timescaledb_internal | _direct_view_3 +(1 row) + +RESET ROLE; +SELECT current_user; + current_user +-------------------- + cluster_super_user +(1 row) + +ALTER VIEW :"DIR_VIEW_SCHEMA".:"DIR_VIEW_NAME" SET SCHEMA public; +SET ROLE :ROLE_DEFAULT_PERM_USER; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name, + direct_view_schema, direct_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name | direct_view_schema | direct_view_name +------------------+----------------+---------------------+-------------------+--------------------+------------------ + rename_schema | rename_test | public | _partial_view_3 | public | _direct_view_3 +(1 row) + +RESET ROLE; +SELECT current_user; + current_user +-------------------- + cluster_super_user +(1 row) + +ALTER SCHEMA rename_schema RENAME TO new_name_schema; +SET ROLE :ROLE_DEFAULT_PERM_USER; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name, + direct_view_schema, direct_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name | direct_view_schema | direct_view_name +------------------+----------------+---------------------+-------------------+--------------------+------------------ + new_name_schema | rename_test | public | _partial_view_3 | public | _direct_view_3 +(1 row) + +ALTER VIEW :"PART_VIEW_NAME" SET SCHEMA new_name_schema; +ALTER VIEW :"DIR_VIEW_NAME" SET SCHEMA new_name_schema; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name, + direct_view_schema, direct_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name | direct_view_schema | direct_view_name +------------------+----------------+---------------------+-------------------+--------------------+------------------ + new_name_schema | rename_test | new_name_schema | _partial_view_3 | new_name_schema | _direct_view_3 +(1 row) + +RESET ROLE; +SELECT current_user; + current_user +-------------------- + cluster_super_user +(1 row) + +ALTER SCHEMA new_name_schema RENAME TO foo_name_schema; +SET ROLE :ROLE_DEFAULT_PERM_USER; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name +------------------+----------------+---------------------+------------------- + foo_name_schema | rename_test | foo_name_schema | _partial_view_3 +(1 row) + +ALTER MATERIALIZED VIEW foo_name_schema.rename_test SET SCHEMA public; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name +------------------+----------------+---------------------+------------------- + public | rename_test | foo_name_schema | _partial_view_3 +(1 row) + +RESET ROLE; +SELECT current_user; + current_user +-------------------- + cluster_super_user +(1 row) + +ALTER SCHEMA foo_name_schema RENAME TO rename_schema; +SET ROLE :ROLE_DEFAULT_PERM_USER; +SET client_min_messages TO NOTICE; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name +------------------+----------------+---------------------+------------------- + public | rename_test | rename_schema | _partial_view_3 +(1 row) + +ALTER MATERIALIZED VIEW rename_test RENAME TO rename_c_aggregate; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name +------------------+--------------------+---------------------+------------------- + public | rename_c_aggregate | rename_schema | _partial_view_3 +(1 row) + +SELECT * FROM rename_c_aggregate; + time_bucket | count +-------------+------- +(0 rows) + +ALTER VIEW rename_schema.:"PART_VIEW_NAME" RENAME TO partial_view; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name, + direct_view_schema, direct_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name | direct_view_schema | direct_view_name +------------------+--------------------+---------------------+-------------------+--------------------+------------------ + public | rename_c_aggregate | rename_schema | partial_view | rename_schema | _direct_view_3 +(1 row) + +--rename direct view +ALTER VIEW rename_schema.:"DIR_VIEW_NAME" RENAME TO direct_view; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name, + direct_view_schema, direct_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name | direct_view_schema | direct_view_name +------------------+--------------------+---------------------+-------------------+--------------------+------------------ + public | rename_c_aggregate | rename_schema | partial_view | rename_schema | direct_view +(1 row) + +-- drop_chunks tests +DROP TABLE conditions CASCADE; +DROP TABLE foo CASCADE; +psql:include/cagg_ddl_common.sql:166: NOTICE: drop cascades to 2 other objects +CREATE TABLE drop_chunks_table(time BIGINT NOT NULL, data INTEGER); +\if :IS_DISTRIBUTED +SELECT hypertable_id AS drop_chunks_table_id + FROM create_distributed_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10, replication_factor => 2) \gset +\else +SELECT hypertable_id AS drop_chunks_table_id + FROM create_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10) \gset +\endif +CREATE OR REPLACE FUNCTION integer_now_test() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table $$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ +CREATE OR REPLACE FUNCTION integer_now_test() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table $$; +$DIST$); +\endif +SELECT set_integer_now_func('drop_chunks_table', 'integer_now_test'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE MATERIALIZED VIEW drop_chunks_view + WITH ( + timescaledb.continuous, + timescaledb.materialized_only=true + ) +AS SELECT time_bucket('5', time), COUNT(data) + FROM drop_chunks_table + GROUP BY 1 WITH NO DATA; +SELECT format('%I.%I', schema_name, table_name) AS drop_chunks_mat_table, + schema_name AS drop_chunks_mat_schema, + table_name AS drop_chunks_mat_table_name + FROM _timescaledb_catalog.hypertable, _timescaledb_catalog.continuous_agg + WHERE _timescaledb_catalog.continuous_agg.raw_hypertable_id = :drop_chunks_table_id + AND _timescaledb_catalog.hypertable.id = _timescaledb_catalog.continuous_agg.mat_hypertable_id \gset +-- create 3 chunks, with 3 time bucket +INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(0, 29) AS i; +-- Only refresh up to bucket 15 initially. Matches the old refresh +-- behavior that didn't materialize everything +CALL refresh_continuous_aggregate('drop_chunks_view', 0, 15); +SELECT count(c) FROM show_chunks('drop_chunks_table') AS c; + count +------- + 3 +(1 row) + +SELECT count(c) FROM show_chunks('drop_chunks_view') AS c; + count +------- + 1 +(1 row) + +SELECT * FROM drop_chunks_view ORDER BY 1; + time_bucket | count +-------------+------- + 0 | 5 + 5 | 5 + 10 | 5 +(3 rows) + +-- cannot drop directly from the materialization table without specifying +-- cont. aggregate view name explicitly +\set ON_ERROR_STOP 0 +SELECT drop_chunks(:'drop_chunks_mat_table', + newer_than => -20, + verbose => true); +psql:include/cagg_ddl_common.sql:218: ERROR: operation not supported on materialized hypertable +\set ON_ERROR_STOP 1 +SELECT count(c) FROM show_chunks('drop_chunks_table') AS c; + count +------- + 3 +(1 row) + +SELECT count(c) FROM show_chunks('drop_chunks_view') AS c; + count +------- + 1 +(1 row) + +SELECT * FROM drop_chunks_view ORDER BY 1; + time_bucket | count +-------------+------- + 0 | 5 + 5 | 5 + 10 | 5 +(3 rows) + +-- drop chunks when the chunksize and time_bucket aren't aligned +DROP TABLE drop_chunks_table CASCADE; +psql:include/cagg_ddl_common.sql:227: NOTICE: drop cascades to 2 other objects +psql:include/cagg_ddl_common.sql:227: NOTICE: drop cascades to table _timescaledb_internal._hyper_5_4_chunk +CREATE TABLE drop_chunks_table_u(time BIGINT NOT NULL, data INTEGER); +\if :IS_DISTRIBUTED +SELECT hypertable_id AS drop_chunks_table_u_id + FROM create_distributed_hypertable('drop_chunks_table_u', 'time', chunk_time_interval => 7, replication_factor => 2) \gset +\else +SELECT hypertable_id AS drop_chunks_table_u_id + FROM create_hypertable('drop_chunks_table_u', 'time', chunk_time_interval => 7) \gset +\endif +CREATE OR REPLACE FUNCTION integer_now_test1() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table_u $$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ +CREATE OR REPLACE FUNCTION integer_now_test1() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table_u $$; +$DIST$); +\endif +SELECT set_integer_now_func('drop_chunks_table_u', 'integer_now_test1'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE MATERIALIZED VIEW drop_chunks_view + WITH ( + timescaledb.continuous, + timescaledb.materialized_only=true + ) +AS SELECT time_bucket('3', time), COUNT(data) + FROM drop_chunks_table_u + GROUP BY 1 WITH NO DATA; +SELECT format('%I.%I', schema_name, table_name) AS drop_chunks_mat_table_u, + schema_name AS drop_chunks_mat_schema, + table_name AS drop_chunks_mat_table_u_name + FROM _timescaledb_catalog.hypertable, _timescaledb_catalog.continuous_agg + WHERE _timescaledb_catalog.continuous_agg.raw_hypertable_id = :drop_chunks_table_u_id + AND _timescaledb_catalog.hypertable.id = _timescaledb_catalog.continuous_agg.mat_hypertable_id \gset +-- create 3 chunks, with 3 time bucket +INSERT INTO drop_chunks_table_u SELECT i, i FROM generate_series(0, 21) AS i; +-- Refresh up to bucket 15 to match old materializer behavior +CALL refresh_continuous_aggregate('drop_chunks_view', 0, 15); +SELECT count(c) FROM show_chunks('drop_chunks_table_u') AS c; + count +------- + 4 +(1 row) + +SELECT count(c) FROM show_chunks('drop_chunks_view') AS c; + count +------- + 1 +(1 row) + +SELECT * FROM drop_chunks_view ORDER BY 1; + time_bucket | count +-------------+------- + 0 | 3 + 3 | 3 + 6 | 3 + 9 | 3 + 12 | 3 +(5 rows) + +-- TRUNCATE test +-- Can truncate regular hypertables that have caggs +TRUNCATE drop_chunks_table_u; +\set ON_ERROR_STOP 0 +-- Can't truncate materialized hypertables directly +TRUNCATE :drop_chunks_mat_table_u; +psql:include/cagg_ddl_common.sql:276: ERROR: cannot TRUNCATE a hypertable underlying a continuous aggregate +\set ON_ERROR_STOP 1 +-- Check that we don't interfere with TRUNCATE of normal table and +-- partitioned table +CREATE TABLE truncate (value int); +INSERT INTO truncate VALUES (1), (2); +TRUNCATE truncate; +SELECT * FROM truncate; + value +------- +(0 rows) + +CREATE TABLE truncate_partitioned (value int) + PARTITION BY RANGE(value); +CREATE TABLE truncate_p1 PARTITION OF truncate_partitioned + FOR VALUES FROM (1) TO (3); +INSERT INTO truncate_partitioned VALUES (1), (2); +TRUNCATE truncate_partitioned; +SELECT * FROM truncate_partitioned; + value +------- +(0 rows) + +-- ALTER TABLE tests +\set ON_ERROR_STOP 0 +-- test a variety of ALTER TABLE statements +ALTER TABLE :drop_chunks_mat_table_u RENAME time_bucket TO bad_name; +psql:include/cagg_ddl_common.sql:296: ERROR: renaming columns on materialization tables is not supported +ALTER TABLE :drop_chunks_mat_table_u ADD UNIQUE(time_bucket); +psql:include/cagg_ddl_common.sql:297: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u SET UNLOGGED; +psql:include/cagg_ddl_common.sql:298: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u ENABLE ROW LEVEL SECURITY; +psql:include/cagg_ddl_common.sql:299: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u ADD COLUMN fizzle INTEGER; +psql:include/cagg_ddl_common.sql:300: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u DROP COLUMN time_bucket; +psql:include/cagg_ddl_common.sql:301: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u ALTER COLUMN time_bucket DROP NOT NULL; +psql:include/cagg_ddl_common.sql:302: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u ALTER COLUMN time_bucket SET DEFAULT 1; +psql:include/cagg_ddl_common.sql:303: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u ALTER COLUMN time_bucket SET STORAGE EXTERNAL; +psql:include/cagg_ddl_common.sql:304: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u DISABLE TRIGGER ALL; +psql:include/cagg_ddl_common.sql:305: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u SET TABLESPACE foo; +psql:include/cagg_ddl_common.sql:306: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u NOT OF; +psql:include/cagg_ddl_common.sql:307: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u OWNER TO CURRENT_USER; +psql:include/cagg_ddl_common.sql:308: ERROR: operation not supported on materialization tables +\set ON_ERROR_STOP 1 +ALTER TABLE :drop_chunks_mat_table_u SET SCHEMA public; +ALTER TABLE :drop_chunks_mat_table_u_name RENAME TO new_name; +SET ROLE :ROLE_DEFAULT_PERM_USER; +SET client_min_messages TO NOTICE; +SELECT * FROM new_name; + time_bucket | count +-------------+------- + 0 | 3 + 3 | 3 + 6 | 3 + 9 | 3 + 12 | 3 +(5 rows) + +SELECT * FROM drop_chunks_view ORDER BY 1; + time_bucket | count +-------------+------- + 0 | 3 + 3 | 3 + 6 | 3 + 9 | 3 + 12 | 3 +(5 rows) + +\set ON_ERROR_STOP 0 +-- no continuous aggregates on a continuous aggregate materialization table +CREATE MATERIALIZED VIEW new_name_view + WITH ( + timescaledb.continuous, + timescaledb.materialized_only=true + ) +AS SELECT time_bucket('6', time_bucket), COUNT("count") + FROM new_name + GROUP BY 1 WITH NO DATA; +psql:include/cagg_ddl_common.sql:331: ERROR: hypertable is a continuous aggregate materialization table +\set ON_ERROR_STOP 1 +CREATE TABLE metrics(time timestamptz NOT NULL, device_id int, v1 float, v2 float); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('metrics', 'time', replication_factor => 2); +\else +SELECT create_hypertable('metrics','time'); + create_hypertable +---------------------- + (8,public,metrics,t) +(1 row) + +\endif +INSERT INTO metrics SELECT generate_series('2000-01-01'::timestamptz,'2000-01-10','1m'),1,0.25,0.75; +-- check expressions in view definition +CREATE MATERIALIZED VIEW cagg_expr + WITH (timescaledb.continuous, timescaledb.materialized_only=true) +AS +SELECT + time_bucket('1d', time) AS time, + 'Const'::text AS Const, + 4.3::numeric AS "numeric", + first(metrics,time), + CASE WHEN true THEN 'foo' ELSE 'bar' END, + COALESCE(NULL,'coalesce'), + avg(v1) + avg(v2) AS avg1, + avg(v1+v2) AS avg2 +FROM metrics +GROUP BY 1 WITH NO DATA; +CALL refresh_continuous_aggregate('cagg_expr', NULL, NULL); +SELECT * FROM cagg_expr ORDER BY time LIMIT 5; + time | const | numeric | first | case | coalesce | avg1 | avg2 +------------------------------+-------+---------+----------------------------------------------+------+----------+------+------ + Fri Dec 31 16:00:00 1999 PST | Const | 4.3 | ("Sat Jan 01 00:00:00 2000 PST",1,0.25,0.75) | foo | coalesce | 1 | 1 + Sat Jan 01 16:00:00 2000 PST | Const | 4.3 | ("Sat Jan 01 16:00:00 2000 PST",1,0.25,0.75) | foo | coalesce | 1 | 1 + Sun Jan 02 16:00:00 2000 PST | Const | 4.3 | ("Sun Jan 02 16:00:00 2000 PST",1,0.25,0.75) | foo | coalesce | 1 | 1 + Mon Jan 03 16:00:00 2000 PST | Const | 4.3 | ("Mon Jan 03 16:00:00 2000 PST",1,0.25,0.75) | foo | coalesce | 1 | 1 + Tue Jan 04 16:00:00 2000 PST | Const | 4.3 | ("Tue Jan 04 16:00:00 2000 PST",1,0.25,0.75) | foo | coalesce | 1 | 1 +(5 rows) + +--test materialization of invalidation before drop +DROP TABLE IF EXISTS drop_chunks_table CASCADE; +psql:include/cagg_ddl_common.sql:363: NOTICE: table "drop_chunks_table" does not exist, skipping +DROP TABLE IF EXISTS drop_chunks_table_u CASCADE; +psql:include/cagg_ddl_common.sql:364: NOTICE: drop cascades to 2 other objects +psql:include/cagg_ddl_common.sql:364: NOTICE: drop cascades to table _timescaledb_internal._hyper_7_9_chunk +CREATE TABLE drop_chunks_table(time BIGINT NOT NULL, data INTEGER); +\if :IS_DISTRIBUTED +SELECT hypertable_id AS drop_chunks_table_nid + FROM create_distributed_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10, replication_factor => 2) \gset +\else +SELECT hypertable_id AS drop_chunks_table_nid + FROM create_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10) \gset +\endif +CREATE OR REPLACE FUNCTION integer_now_test2() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table $$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ +CREATE OR REPLACE FUNCTION integer_now_test2() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table $$; +$DIST$); +\endif +SELECT set_integer_now_func('drop_chunks_table', 'integer_now_test2'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE MATERIALIZED VIEW drop_chunks_view + WITH ( + timescaledb.continuous, + timescaledb.materialized_only=true + ) +AS SELECT time_bucket('5', time), max(data) + FROM drop_chunks_table + GROUP BY 1 WITH NO DATA; +INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(0, 20) AS i; +--dropping chunks will process the invalidations +SELECT drop_chunks('drop_chunks_table', older_than => (integer_now_test2()-9)); + drop_chunks +------------------------------------------ + _timescaledb_internal._hyper_10_13_chunk +(1 row) + +SELECT * FROM drop_chunks_table ORDER BY time ASC limit 1; + time | data +------+------ + 10 | 10 +(1 row) + +INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(20, 35) AS i; +CALL refresh_continuous_aggregate('drop_chunks_view', 10, 40); +--this will be seen after the drop its within the invalidation window and will be dropped +INSERT INTO drop_chunks_table VALUES (26, 100); +--this will not be processed by the drop since chunk 30-39 is not dropped but will be seen after refresh +--shows that the drop doesn't do more work than necessary +INSERT INTO drop_chunks_table VALUES (31, 200); +--move the time up to 39 +INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(35, 39) AS i; +--the chunks and ranges we have thus far +SELECT chunk_name, range_start_integer, range_end_integer +FROM timescaledb_information.chunks +WHERE hypertable_name = 'drop_chunks_table'; + chunk_name | range_start_integer | range_end_integer +--------------------+---------------------+------------------- + _hyper_10_14_chunk | 10 | 20 + _hyper_10_15_chunk | 20 | 30 + _hyper_10_16_chunk | 30 | 40 +(3 rows) + +--the invalidation on 25 not yet seen +SELECT * FROM drop_chunks_view ORDER BY time_bucket DESC; + time_bucket | max +-------------+----- + 35 | 35 + 30 | 34 + 25 | 29 + 20 | 24 + 15 | 19 + 10 | 14 +(6 rows) + +--refresh to process the invalidations and then drop +CALL refresh_continuous_aggregate('drop_chunks_view', NULL, (integer_now_test2()-9)); +SELECT drop_chunks('drop_chunks_table', older_than => (integer_now_test2()-9)); + drop_chunks +------------------------------------------ + _timescaledb_internal._hyper_10_14_chunk + _timescaledb_internal._hyper_10_15_chunk +(2 rows) + +--new values on 25 now seen in view +SELECT * FROM drop_chunks_view ORDER BY time_bucket DESC; + time_bucket | max +-------------+----- + 35 | 35 + 30 | 34 + 25 | 100 + 20 | 24 + 15 | 19 + 10 | 14 +(6 rows) + +--earliest datapoint now in table +SELECT * FROM drop_chunks_table ORDER BY time ASC limit 1; + time | data +------+------ + 30 | 30 +(1 row) + +--we see the chunks row with the dropped flags set; +SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk where dropped; + id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk +----+---------------+-----------------------+--------------------+---------------------+---------+--------+----------- + 13 | 10 | _timescaledb_internal | _hyper_10_13_chunk | | t | 0 | f + 14 | 10 | _timescaledb_internal | _hyper_10_14_chunk | | t | 0 | f + 15 | 10 | _timescaledb_internal | _hyper_10_15_chunk | | t | 0 | f +(3 rows) + +--still see data in the view +SELECT * FROM drop_chunks_view WHERE time_bucket < (integer_now_test2()-9) ORDER BY time_bucket DESC; + time_bucket | max +-------------+----- + 25 | 100 + 20 | 24 + 15 | 19 + 10 | 14 +(4 rows) + +--no data but covers dropped chunks +SELECT * FROM drop_chunks_table WHERE time < (integer_now_test2()-9) ORDER BY time DESC; + time | data +------+------ +(0 rows) + +--recreate the dropped chunk +INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(0, 20) AS i; +--see data from recreated region +SELECT * FROM drop_chunks_table WHERE time < (integer_now_test2()-9) ORDER BY time DESC; + time | data +------+------ + 20 | 20 + 19 | 19 + 18 | 18 + 17 | 17 + 16 | 16 + 15 | 15 + 14 | 14 + 13 | 13 + 12 | 12 + 11 | 11 + 10 | 10 + 9 | 9 + 8 | 8 + 7 | 7 + 6 | 6 + 5 | 5 + 4 | 4 + 3 | 3 + 2 | 2 + 1 | 1 + 0 | 0 +(21 rows) + +--should show chunk with old name and old ranges +SELECT chunk_name, range_start_integer, range_end_integer +FROM timescaledb_information.chunks +WHERE hypertable_name = 'drop_chunks_table' +ORDER BY range_start_integer; + chunk_name | range_start_integer | range_end_integer +--------------------+---------------------+------------------- + _hyper_10_13_chunk | 0 | 10 + _hyper_10_14_chunk | 10 | 20 + _hyper_10_15_chunk | 20 | 30 + _hyper_10_16_chunk | 30 | 40 +(4 rows) + +--We dropped everything up to the bucket starting at 30 and then +--inserted new data up to and including time 20. Therefore, the +--dropped data should stay the same as long as we only refresh +--buckets that have non-dropped data. +CALL refresh_continuous_aggregate('drop_chunks_view', 30, 40); +SELECT * FROM drop_chunks_view ORDER BY time_bucket DESC; + time_bucket | max +-------------+----- + 35 | 39 + 30 | 200 + 25 | 100 + 20 | 24 + 15 | 19 + 10 | 14 +(6 rows) + +SELECT format('%I.%I', schema_name, table_name) AS drop_chunks_mat_tablen, + schema_name AS drop_chunks_mat_schema, + table_name AS drop_chunks_mat_table_name + FROM _timescaledb_catalog.hypertable, _timescaledb_catalog.continuous_agg + WHERE _timescaledb_catalog.continuous_agg.raw_hypertable_id = :drop_chunks_table_nid + AND _timescaledb_catalog.hypertable.id = _timescaledb_catalog.continuous_agg.mat_hypertable_id \gset +-- TEST drop chunks from continuous aggregates by specifying view name +SELECT drop_chunks('drop_chunks_view', + newer_than => -20, + verbose => true); +psql:include/cagg_ddl_common.sql:459: INFO: dropping chunk _timescaledb_internal._hyper_11_17_chunk + drop_chunks +------------------------------------------ + _timescaledb_internal._hyper_11_17_chunk +(1 row) + +-- Test that we cannot drop chunks when specifying materialized +-- hypertable +INSERT INTO drop_chunks_table SELECT generate_series(45, 55), 500; +CALL refresh_continuous_aggregate('drop_chunks_view', 45, 55); +SELECT chunk_name, range_start_integer, range_end_integer +FROM timescaledb_information.chunks +WHERE hypertable_name = :'drop_chunks_mat_table_name' ORDER BY range_start_integer; + chunk_name | range_start_integer | range_end_integer +--------------------+---------------------+------------------- + _hyper_11_20_chunk | 0 | 100 +(1 row) + +\set ON_ERROR_STOP 0 +\set VERBOSITY default +SELECT drop_chunks(:'drop_chunks_mat_tablen', older_than => 60); +psql:include/cagg_ddl_common.sql:471: ERROR: operation not supported on materialized hypertable +DETAIL: Hypertable "_materialized_hypertable_11" is a materialized hypertable. +HINT: Try the operation on the continuous aggregate instead. +\set VERBOSITY terse +\set ON_ERROR_STOP 1 +----------------------------------------------------------------- +-- Test that refresh_continuous_aggregate on chunk will refresh, +-- but only in the regions covered by the show chunks. +----------------------------------------------------------------- +SELECT chunk_name, range_start_integer, range_end_integer +FROM timescaledb_information.chunks +WHERE hypertable_name = 'drop_chunks_table' +ORDER BY 2,3; + chunk_name | range_start_integer | range_end_integer +--------------------+---------------------+------------------- + _hyper_10_13_chunk | 0 | 10 + _hyper_10_14_chunk | 10 | 20 + _hyper_10_15_chunk | 20 | 30 + _hyper_10_16_chunk | 30 | 40 + _hyper_10_18_chunk | 40 | 50 + _hyper_10_19_chunk | 50 | 60 +(6 rows) + +-- Pick the second chunk as the one to drop +WITH numbered_chunks AS ( + SELECT row_number() OVER (ORDER BY range_start_integer), chunk_schema, chunk_name, range_start_integer, range_end_integer + FROM timescaledb_information.chunks + WHERE hypertable_name = 'drop_chunks_table' + ORDER BY 1 +) +SELECT format('%I.%I', chunk_schema, chunk_name) AS chunk_to_drop, range_start_integer, range_end_integer +FROM numbered_chunks +WHERE row_number = 2 \gset +-- There's data in the table for the chunk/range we will drop +SELECT * FROM drop_chunks_table +WHERE time >= :range_start_integer +AND time < :range_end_integer +ORDER BY 1; + time | data +------+------ + 10 | 10 + 11 | 11 + 12 | 12 + 13 | 13 + 14 | 14 + 15 | 15 + 16 | 16 + 17 | 17 + 18 | 18 + 19 | 19 +(10 rows) + +-- Make sure there is also data in the continuous aggregate +-- CARE: +-- Note that this behaviour of dropping the materialization table chunks and expecting a refresh +-- that overlaps that time range to NOT update those chunks is undefined. Since CAGGs over +-- distributed hypertables merge the invalidations the refresh region is updated in the distributed +-- case, which may be different than what happens in the normal hypertable case. The command was: +-- SELECT drop_chunks('drop_chunks_view', newer_than => -20, verbose => true); +CALL refresh_continuous_aggregate('drop_chunks_view', 0, 50); +SELECT * FROM drop_chunks_view +ORDER BY 1; + time_bucket | max +-------------+----- + 0 | 4 + 5 | 9 + 10 | 14 + 15 | 19 + 20 | 20 + 45 | 500 + 50 | 500 +(7 rows) + +-- Drop the second chunk, to leave a gap in the data +\if :IS_DISTRIBUTED +CALL distributed_exec(format('DROP TABLE IF EXISTS %s', :'chunk_to_drop')); +DROP FOREIGN TABLE :chunk_to_drop; +\else +DROP TABLE :chunk_to_drop; +\endif +-- Verify that the second chunk is dropped +SELECT chunk_name, range_start_integer, range_end_integer +FROM timescaledb_information.chunks +WHERE hypertable_name = 'drop_chunks_table' +ORDER BY 2,3; + chunk_name | range_start_integer | range_end_integer +--------------------+---------------------+------------------- + _hyper_10_13_chunk | 0 | 10 + _hyper_10_15_chunk | 20 | 30 + _hyper_10_16_chunk | 30 | 40 + _hyper_10_18_chunk | 40 | 50 + _hyper_10_19_chunk | 50 | 60 +(5 rows) + +-- Data is no longer in the table but still in the view +SELECT * FROM drop_chunks_table +WHERE time >= :range_start_integer +AND time < :range_end_integer +ORDER BY 1; + time | data +------+------ +(0 rows) + +SELECT * FROM drop_chunks_view +WHERE time_bucket >= :range_start_integer +AND time_bucket < :range_end_integer +ORDER BY 1; + time_bucket | max +-------------+----- + 10 | 14 + 15 | 19 +(2 rows) + +-- Insert a large value in one of the chunks that will be dropped +INSERT INTO drop_chunks_table VALUES (:range_start_integer-1, 100); +-- Now refresh and drop the two adjecent chunks +CALL refresh_continuous_aggregate('drop_chunks_view', NULL, 30); +SELECT drop_chunks('drop_chunks_table', older_than=>30); + drop_chunks +------------------------------------------ + _timescaledb_internal._hyper_10_13_chunk + _timescaledb_internal._hyper_10_15_chunk +(2 rows) + +-- Verify that the chunks are dropped +SELECT chunk_name, range_start_integer, range_end_integer +FROM timescaledb_information.chunks +WHERE hypertable_name = 'drop_chunks_table' +ORDER BY 2,3; + chunk_name | range_start_integer | range_end_integer +--------------------+---------------------+------------------- + _hyper_10_16_chunk | 30 | 40 + _hyper_10_18_chunk | 40 | 50 + _hyper_10_19_chunk | 50 | 60 +(3 rows) + +-- The continuous aggregate should be refreshed in the regions covered +-- by the dropped chunks, but not in the "gap" region, i.e., the +-- region of the chunk that was dropped via DROP TABLE. +SELECT * FROM drop_chunks_view +ORDER BY 1; + time_bucket | max +-------------+----- + 0 | 4 + 5 | 100 + 20 | 20 + 45 | 500 + 50 | 500 +(5 rows) + +-- Now refresh in the region of the first two dropped chunks +CALL refresh_continuous_aggregate('drop_chunks_view', 0, :range_end_integer); +-- Aggregate data in the refreshed range should no longer exist since +-- the underlying data was dropped. +SELECT * FROM drop_chunks_view +ORDER BY 1; + time_bucket | max +-------------+----- + 20 | 20 + 45 | 500 + 50 | 500 +(3 rows) + +-------------------------------------------------------------------- +-- Check that we can create a materialized table in a tablespace. We +-- create one with tablespace and one without and compare them. +CREATE VIEW cagg_info AS +WITH + caggs AS ( + SELECT format('%I.%I', user_view_schema, user_view_name)::regclass AS user_view, + format('%I.%I', direct_view_schema, direct_view_name)::regclass AS direct_view, + format('%I.%I', partial_view_schema, partial_view_name)::regclass AS partial_view, + format('%I.%I', ht.schema_name, ht.table_name)::regclass AS mat_relid + FROM _timescaledb_catalog.hypertable ht, + _timescaledb_catalog.continuous_agg cagg + WHERE ht.id = cagg.mat_hypertable_id + ) +SELECT user_view, + pg_get_userbyid(relowner) AS user_view_owner, + relname AS mat_table, + (SELECT pg_get_userbyid(relowner) FROM pg_class WHERE oid = mat_relid) AS mat_table_owner, + direct_view, + (SELECT pg_get_userbyid(relowner) FROM pg_class WHERE oid = direct_view) AS direct_view_owner, + partial_view, + (SELECT pg_get_userbyid(relowner) FROM pg_class WHERE oid = partial_view) AS partial_view_owner, + (SELECT spcname FROM pg_tablespace WHERE oid = reltablespace) AS tablespace + FROM pg_class JOIN caggs ON pg_class.oid = caggs.mat_relid; +GRANT SELECT ON cagg_info TO PUBLIC; +CREATE VIEW chunk_info AS +SELECT ht.schema_name, ht.table_name, relname AS chunk_name, + (SELECT spcname FROM pg_tablespace WHERE oid = reltablespace) AS tablespace + FROM pg_class c, + _timescaledb_catalog.hypertable ht, + _timescaledb_catalog.chunk ch + WHERE ch.table_name = c.relname AND ht.id = ch.hypertable_id; +CREATE TABLE whatever(time BIGINT NOT NULL, data INTEGER); +\if :IS_DISTRIBUTED +SELECT hypertable_id AS whatever_nid + FROM create_distributed_hypertable('whatever', 'time', chunk_time_interval => 10, replication_factor => 2) +\gset +\else +SELECT hypertable_id AS whatever_nid + FROM create_hypertable('whatever', 'time', chunk_time_interval => 10) +\gset +\endif +SELECT set_integer_now_func('whatever', 'integer_now_test'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE MATERIALIZED VIEW whatever_view_1 +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT time_bucket('5', time), COUNT(data) + FROM whatever GROUP BY 1 WITH NO DATA; +CREATE MATERIALIZED VIEW whatever_view_2 +WITH (timescaledb.continuous, timescaledb.materialized_only=true) +TABLESPACE tablespace1 AS +SELECT time_bucket('5', time), COUNT(data) + FROM whatever GROUP BY 1 WITH NO DATA; +INSERT INTO whatever SELECT i, i FROM generate_series(0, 29) AS i; +CALL refresh_continuous_aggregate('whatever_view_1', NULL, NULL); +CALL refresh_continuous_aggregate('whatever_view_2', NULL, NULL); +SELECT user_view, + mat_table, + cagg_info.tablespace AS mat_tablespace, + chunk_name, + chunk_info.tablespace AS chunk_tablespace + FROM cagg_info, chunk_info + WHERE mat_table::text = table_name + AND user_view::text LIKE 'whatever_view%'; + user_view | mat_table | mat_tablespace | chunk_name | chunk_tablespace +-----------------+-----------------------------+----------------+--------------------+------------------ + whatever_view_1 | _materialized_hypertable_13 | | _hyper_13_24_chunk | + whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_25_chunk | tablespace1 +(2 rows) + +ALTER MATERIALIZED VIEW whatever_view_1 SET TABLESPACE tablespace2; +SELECT user_view, + mat_table, + cagg_info.tablespace AS mat_tablespace, + chunk_name, + chunk_info.tablespace AS chunk_tablespace + FROM cagg_info, chunk_info + WHERE mat_table::text = table_name + AND user_view::text LIKE 'whatever_view%'; + user_view | mat_table | mat_tablespace | chunk_name | chunk_tablespace +-----------------+-----------------------------+----------------+--------------------+------------------ + whatever_view_1 | _materialized_hypertable_13 | tablespace2 | _hyper_13_24_chunk | tablespace2 + whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_25_chunk | tablespace1 +(2 rows) + +DROP MATERIALIZED VIEW whatever_view_1; +psql:include/cagg_ddl_common.sql:649: NOTICE: drop cascades to table _timescaledb_internal._hyper_13_24_chunk +DROP MATERIALIZED VIEW whatever_view_2; +psql:include/cagg_ddl_common.sql:650: NOTICE: drop cascades to table _timescaledb_internal._hyper_14_25_chunk +-- test bucket width expressions on integer hypertables +CREATE TABLE metrics_int2 ( + time int2 NOT NULL, + device_id int, + v1 float, + v2 float +); +CREATE TABLE metrics_int4 ( + time int4 NOT NULL, + device_id int, + v1 float, + v2 float +); +CREATE TABLE metrics_int8 ( + time int8 NOT NULL, + device_id int, + v1 float, + v2 float +); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable (('metrics_' || dt)::regclass, 'time', chunk_time_interval => 10, replication_factor => 2) +FROM ( + VALUES ('int2'), + ('int4'), + ('int8')) v (dt); +\else +SELECT create_hypertable (('metrics_' || dt)::regclass, 'time', chunk_time_interval => 10) +FROM ( + VALUES ('int2'), + ('int4'), + ('int8')) v (dt); + create_hypertable +---------------------------- + (15,public,metrics_int2,t) + (16,public,metrics_int4,t) + (17,public,metrics_int8,t) +(3 rows) + +\endif +CREATE OR REPLACE FUNCTION int2_now () + RETURNS int2 + LANGUAGE SQL + STABLE + AS $$ + SELECT 10::int2 +$$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ +CREATE OR REPLACE FUNCTION int2_now () + RETURNS int2 + LANGUAGE SQL + STABLE + AS $$ + SELECT 10::int2 +$$; +$DIST$); +\endif +CREATE OR REPLACE FUNCTION int4_now () + RETURNS int4 + LANGUAGE SQL + STABLE + AS $$ + SELECT 10::int4 +$$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ +CREATE OR REPLACE FUNCTION int4_now () + RETURNS int4 + LANGUAGE SQL + STABLE + AS $$ + SELECT 10::int4 +$$; +$DIST$); +\endif +CREATE OR REPLACE FUNCTION int8_now () + RETURNS int8 + LANGUAGE SQL + STABLE + AS $$ + SELECT 10::int8 +$$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ +CREATE OR REPLACE FUNCTION int8_now () + RETURNS int8 + LANGUAGE SQL + STABLE + AS $$ + SELECT 10::int8 +$$; +$DIST$); +\endif +SELECT set_integer_now_func (('metrics_' || dt)::regclass, (dt || '_now')::regproc) +FROM ( + VALUES ('int2'), + ('int4'), + ('int8')) v (dt); + set_integer_now_func +---------------------- + + + +(3 rows) + +-- width expression for int2 hypertables +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(1::smallint, time) +FROM metrics_int2 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:755: NOTICE: continuous aggregate "width_expr" is already up-to-date +DROP MATERIALIZED VIEW width_expr; +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(1::smallint + 2::smallint, time) +FROM metrics_int2 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:762: NOTICE: continuous aggregate "width_expr" is already up-to-date +DROP MATERIALIZED VIEW width_expr; +-- width expression for int4 hypertables +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(1, time) +FROM metrics_int4 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:770: NOTICE: continuous aggregate "width_expr" is already up-to-date +DROP MATERIALIZED VIEW width_expr; +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(1 + 2, time) +FROM metrics_int4 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:777: NOTICE: continuous aggregate "width_expr" is already up-to-date +DROP MATERIALIZED VIEW width_expr; +-- width expression for int8 hypertables +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(1, time) +FROM metrics_int8 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:785: NOTICE: continuous aggregate "width_expr" is already up-to-date +DROP MATERIALIZED VIEW width_expr; +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(1 + 2, time) +FROM metrics_int8 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:792: NOTICE: continuous aggregate "width_expr" is already up-to-date +DROP MATERIALIZED VIEW width_expr; +\set ON_ERROR_STOP 0 +-- non-immutable expresions should be rejected +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(extract(year FROM now())::smallint, time) +FROM metrics_int2 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:801: ERROR: only immutable expressions allowed in time bucket function +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(extract(year FROM now())::int, time) +FROM metrics_int4 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:806: ERROR: only immutable expressions allowed in time bucket function +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(extract(year FROM now())::int, time) +FROM metrics_int8 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:811: ERROR: only immutable expressions allowed in time bucket function +\set ON_ERROR_STOP 1 +-- Test various ALTER MATERIALIZED VIEW statements. +SET ROLE :ROLE_DEFAULT_PERM_USER; +CREATE MATERIALIZED VIEW owner_check WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(1 + 2, time) +FROM metrics_int8 +GROUP BY 1 +WITH NO DATA; +\x on +SELECT * FROM cagg_info WHERE user_view::text = 'owner_check'; +-[ RECORD 1 ]------+--------------------------------------- +user_view | owner_check +user_view_owner | default_perm_user +mat_table | _materialized_hypertable_24 +mat_table_owner | default_perm_user +direct_view | _timescaledb_internal._direct_view_24 +direct_view_owner | default_perm_user +partial_view | _timescaledb_internal._partial_view_24 +partial_view_owner | default_perm_user +tablespace | + +\x off +-- This should not work since the target user has the wrong role, but +-- we test that the normal checks are done when changing the owner. +\set ON_ERROR_STOP 0 +ALTER MATERIALIZED VIEW owner_check OWNER TO :ROLE_1; +psql:include/cagg_ddl_common.sql:831: ERROR: must be member of role "test_role_1" +\set ON_ERROR_STOP 1 +-- Superuser can always change owner +SET ROLE :ROLE_CLUSTER_SUPERUSER; +ALTER MATERIALIZED VIEW owner_check OWNER TO :ROLE_1; +\x on +SELECT * FROM cagg_info WHERE user_view::text = 'owner_check'; +-[ RECORD 1 ]------+--------------------------------------- +user_view | owner_check +user_view_owner | test_role_1 +mat_table | _materialized_hypertable_24 +mat_table_owner | test_role_1 +direct_view | _timescaledb_internal._direct_view_24 +direct_view_owner | test_role_1 +partial_view | _timescaledb_internal._partial_view_24 +partial_view_owner | test_role_1 +tablespace | + +\x off +-- +-- Test drop continuous aggregate cases +-- +-- Issue: #2608 +-- +CREATE OR REPLACE FUNCTION test_int_now() + RETURNS INT LANGUAGE SQL STABLE AS +$BODY$ + SELECT 50; +$BODY$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ + CREATE OR REPLACE FUNCTION test_int_now() + RETURNS INT LANGUAGE SQL STABLE AS + $BODY$ + SELECT 50; + $BODY$; +$DIST$); +\endif +CREATE TABLE conditionsnm(time_int INT NOT NULL, device INT, value FLOAT); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('conditionsnm', 'time_int', chunk_time_interval => 10, replication_factor => 2); +\else +SELECT create_hypertable('conditionsnm', 'time_int', chunk_time_interval => 10); + create_hypertable +---------------------------- + (25,public,conditionsnm,t) +(1 row) + +\endif +SELECT set_integer_now_func('conditionsnm', 'test_int_now'); + set_integer_now_func +---------------------- + +(1 row) + +INSERT INTO conditionsnm +SELECT time_val, time_val % 4, 3.14 FROM generate_series(0,100,1) AS time_val; +-- Case 1: DROP +CREATE MATERIALIZED VIEW conditionsnm_4 +WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) +AS +SELECT time_bucket(7, time_int) as bucket, +SUM(value), COUNT(value) +FROM conditionsnm GROUP BY bucket WITH DATA; +psql:include/cagg_ddl_common.sql:879: NOTICE: refreshing continuous aggregate "conditionsnm_4" +DROP materialized view conditionsnm_4; +psql:include/cagg_ddl_common.sql:881: NOTICE: drop cascades to table _timescaledb_internal._hyper_26_37_chunk +-- Case 2: DROP CASCADE should have similar behaviour as DROP +CREATE MATERIALIZED VIEW conditionsnm_4 +WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) +AS +SELECT time_bucket(7, time_int) as bucket, +SUM(value), COUNT(value) +FROM conditionsnm GROUP BY bucket WITH DATA; +psql:include/cagg_ddl_common.sql:889: NOTICE: refreshing continuous aggregate "conditionsnm_4" +DROP materialized view conditionsnm_4 CASCADE; +psql:include/cagg_ddl_common.sql:891: NOTICE: drop cascades to table _timescaledb_internal._hyper_27_38_chunk +-- Case 3: require CASCADE in case of dependent object +CREATE MATERIALIZED VIEW conditionsnm_4 +WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) +AS +SELECT time_bucket(7, time_int) as bucket, +SUM(value), COUNT(value) +FROM conditionsnm GROUP BY bucket WITH DATA; +psql:include/cagg_ddl_common.sql:899: NOTICE: refreshing continuous aggregate "conditionsnm_4" +CREATE VIEW see_cagg as select * from conditionsnm_4; +\set ON_ERROR_STOP 0 +DROP MATERIALIZED VIEW conditionsnm_4; +psql:include/cagg_ddl_common.sql:903: ERROR: cannot drop view conditionsnm_4 because other objects depend on it +\set ON_ERROR_STOP 1 +-- Case 4: DROP CASCADE with dependency +DROP MATERIALIZED VIEW conditionsnm_4 CASCADE; +psql:include/cagg_ddl_common.sql:907: NOTICE: drop cascades to view see_cagg +psql:include/cagg_ddl_common.sql:907: NOTICE: drop cascades to table _timescaledb_internal._hyper_28_39_chunk +-- Test DROP SCHEMA CASCADE with continuous aggregates +-- +-- Issue: #2350 +-- +-- Case 1: DROP SCHEMA CASCADE +CREATE SCHEMA test_schema; +CREATE TABLE test_schema.telemetry_raw ( + ts TIMESTAMP WITH TIME ZONE NOT NULL, + value DOUBLE PRECISION +); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('test_schema.telemetry_raw', 'ts', replication_factor => 2); +\else +SELECT create_hypertable('test_schema.telemetry_raw', 'ts'); + create_hypertable +---------------------------------- + (29,test_schema,telemetry_raw,t) +(1 row) + +\endif +CREATE MATERIALIZED VIEW test_schema.telemetry_1s + WITH (timescaledb.continuous, timescaledb.materialized_only=false) + AS +SELECT time_bucket(INTERVAL '1s', ts) AS ts_1s, + avg(value) + FROM test_schema.telemetry_raw + GROUP BY ts_1s WITH NO DATA; +SELECT ca.raw_hypertable_id, + h.schema_name, + h.table_name AS "MAT_TABLE_NAME", + partial_view_name as "PART_VIEW_NAME", + partial_view_schema +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'telemetry_1s'; + raw_hypertable_id | schema_name | MAT_TABLE_NAME | PART_VIEW_NAME | partial_view_schema +-------------------+-----------------------+-----------------------------+------------------+----------------------- + 29 | _timescaledb_internal | _materialized_hypertable_30 | _partial_view_30 | _timescaledb_internal +(1 row) + +\gset +DROP SCHEMA test_schema CASCADE; +psql:include/cagg_ddl_common.sql:946: NOTICE: drop cascades to 4 other objects +SELECT count(*) FROM pg_class WHERE relname = :'MAT_TABLE_NAME'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_class WHERE relname = :'PART_VIEW_NAME'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_class WHERE relname = 'telemetry_1s'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_namespace WHERE nspname = 'test_schema'; + count +------- + 0 +(1 row) + +-- Case 2: DROP SCHEMA CASCADE with multiple caggs +CREATE SCHEMA test_schema; +CREATE TABLE test_schema.telemetry_raw ( + ts TIMESTAMP WITH TIME ZONE NOT NULL, + value DOUBLE PRECISION +); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('test_schema.telemetry_raw', 'ts', replication_factor => 2); +\else +SELECT create_hypertable('test_schema.telemetry_raw', 'ts'); + create_hypertable +---------------------------------- + (31,test_schema,telemetry_raw,t) +(1 row) + +\endif +CREATE MATERIALIZED VIEW test_schema.cagg1 + WITH (timescaledb.continuous, timescaledb.materialized_only=false) + AS +SELECT time_bucket(INTERVAL '1s', ts) AS ts_1s, + avg(value) + FROM test_schema.telemetry_raw + GROUP BY ts_1s WITH NO DATA; +CREATE MATERIALIZED VIEW test_schema.cagg2 + WITH (timescaledb.continuous, timescaledb.materialized_only=false) + AS +SELECT time_bucket(INTERVAL '1s', ts) AS ts_1s, + avg(value) + FROM test_schema.telemetry_raw + GROUP BY ts_1s WITH NO DATA; +SELECT ca.raw_hypertable_id, + h.schema_name, + h.table_name AS "MAT_TABLE_NAME1", + partial_view_name as "PART_VIEW_NAME1", + partial_view_schema +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'cagg1'; + raw_hypertable_id | schema_name | MAT_TABLE_NAME1 | PART_VIEW_NAME1 | partial_view_schema +-------------------+-----------------------+-----------------------------+------------------+----------------------- + 31 | _timescaledb_internal | _materialized_hypertable_32 | _partial_view_32 | _timescaledb_internal +(1 row) + +\gset +SELECT ca.raw_hypertable_id, + h.schema_name, + h.table_name AS "MAT_TABLE_NAME2", + partial_view_name as "PART_VIEW_NAME2", + partial_view_schema +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'cagg2'; + raw_hypertable_id | schema_name | MAT_TABLE_NAME2 | PART_VIEW_NAME2 | partial_view_schema +-------------------+-----------------------+-----------------------------+------------------+----------------------- + 31 | _timescaledb_internal | _materialized_hypertable_33 | _partial_view_33 | _timescaledb_internal +(1 row) + +\gset +DROP SCHEMA test_schema CASCADE; +psql:include/cagg_ddl_common.sql:1003: NOTICE: drop cascades to 7 other objects +SELECT count(*) FROM pg_class WHERE relname = :'MAT_TABLE_NAME1'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_class WHERE relname = :'PART_VIEW_NAME1'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_class WHERE relname = 'cagg1'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_class WHERE relname = :'MAT_TABLE_NAME2'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_class WHERE relname = :'PART_VIEW_NAME2'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_class WHERE relname = 'cagg2'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_namespace WHERE nspname = 'test_schema'; + count +------- + 0 +(1 row) + +DROP TABLESPACE tablespace1; +DROP TABLESPACE tablespace2; +-- Check that we can rename a column of a materialized view and still +-- rebuild it after (#3051, #3405) +CREATE TABLE conditions ( + time TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL +); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('conditions', 'time', replication_factor => 2); +\else +SELECT create_hypertable('conditions', 'time'); + create_hypertable +-------------------------- + (34,public,conditions,t) +(1 row) + +\endif +INSERT INTO conditions VALUES ( '2018-01-01 09:20:00-08', 'SFO', 55); +INSERT INTO conditions VALUES ( '2018-01-02 09:30:00-08', 'por', 100); +INSERT INTO conditions VALUES ( '2018-01-02 09:20:00-08', 'SFO', 65); +INSERT INTO conditions VALUES ( '2018-01-02 09:10:00-08', 'NYC', 65); +INSERT INTO conditions VALUES ( '2018-11-01 09:20:00-08', 'NYC', 45); +INSERT INTO conditions VALUES ( '2018-11-01 10:40:00-08', 'NYC', 55); +INSERT INTO conditions VALUES ( '2018-11-01 11:50:00-08', 'NYC', 65); +INSERT INTO conditions VALUES ( '2018-11-01 12:10:00-08', 'NYC', 75); +INSERT INTO conditions VALUES ( '2018-11-01 13:10:00-08', 'NYC', 85); +INSERT INTO conditions VALUES ( '2018-11-02 09:20:00-08', 'NYC', 10); +INSERT INTO conditions VALUES ( '2018-11-02 10:30:00-08', 'NYC', 20); +CREATE MATERIALIZED VIEW conditions_daily +WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS +SELECT location, + time_bucket(INTERVAL '1 day', time) AS bucket, + AVG(temperature) + FROM conditions +GROUP BY location, bucket +WITH NO DATA; +SELECT format('%I.%I', '_timescaledb_internal', h.table_name) AS "MAT_TABLE_NAME", + format('%I.%I', '_timescaledb_internal', partial_view_name) AS "PART_VIEW_NAME", + format('%I.%I', '_timescaledb_internal', direct_view_name) AS "DIRECT_VIEW_NAME" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'conditions_daily' +\gset +-- Show both the columns and the view definitions to see that +-- references are correct in the view as well. +SELECT * FROM test.show_columns('conditions_daily'); + Column | Type | NotNull +----------+--------------------------+--------- + location | text | f + bucket | timestamp with time zone | f + avg | double precision | f +(3 rows) + +SELECT * FROM test.show_columns(:'DIRECT_VIEW_NAME'); + Column | Type | NotNull +----------+--------------------------+--------- + location | text | f + bucket | timestamp with time zone | f + avg | double precision | f +(3 rows) + +SELECT * FROM test.show_columns(:'PART_VIEW_NAME'); + Column | Type | NotNull +----------+--------------------------+--------- + location | text | f + bucket | timestamp with time zone | f + avg | double precision | f +(3 rows) + +SELECT * FROM test.show_columns(:'MAT_TABLE_NAME'); + Column | Type | NotNull +----------+--------------------------+--------- + location | text | f + bucket | timestamp with time zone | t + avg | double precision | f +(3 rows) + +ALTER MATERIALIZED VIEW conditions_daily RENAME COLUMN bucket to "time"; +-- Show both the columns and the view definitions to see that +-- references are correct in the view as well. +SELECT * FROM test.show_columns(' conditions_daily'); + Column | Type | NotNull +----------+--------------------------+--------- + location | text | f + time | timestamp with time zone | f + avg | double precision | f +(3 rows) + +SELECT * FROM test.show_columns(:'DIRECT_VIEW_NAME'); + Column | Type | NotNull +----------+--------------------------+--------- + location | text | f + time | timestamp with time zone | f + avg | double precision | f +(3 rows) + +SELECT * FROM test.show_columns(:'PART_VIEW_NAME'); + Column | Type | NotNull +----------+--------------------------+--------- + location | text | f + time | timestamp with time zone | f + avg | double precision | f +(3 rows) + +SELECT * FROM test.show_columns(:'MAT_TABLE_NAME'); + Column | Type | NotNull +----------+--------------------------+--------- + location | text | f + time | timestamp with time zone | t + avg | double precision | f +(3 rows) + +-- This will rebuild the materialized view and should succeed. +ALTER MATERIALIZED VIEW conditions_daily SET (timescaledb.materialized_only = false); +-- Refresh the continuous aggregate to check that it works after the +-- rename. +\set VERBOSITY verbose +CALL refresh_continuous_aggregate('conditions_daily', NULL, NULL); +\set VERBOSITY terse +-- +-- Indexes on continuous aggregate +-- +\set ON_ERROR_STOP 0 +-- unique indexes are not supported +CREATE UNIQUE INDEX index_unique_error ON conditions_daily ("time", location); +psql:include/cagg_ddl_common.sql:1089: ERROR: continuous aggregates do not support UNIQUE indexes +-- concurrently index creation not supported +CREATE INDEX CONCURRENTLY index_concurrently_avg ON conditions_daily (avg); +psql:include/cagg_ddl_common.sql:1091: ERROR: hypertables do not support concurrent index creation +\set ON_ERROR_STOP 1 +CREATE INDEX index_avg ON conditions_daily (avg); +CREATE INDEX index_avg_only ON ONLY conditions_daily (avg); +CREATE INDEX index_avg_include ON conditions_daily (avg) INCLUDE (location); +CREATE INDEX index_avg_expr ON conditions_daily ((avg + 1)); +CREATE INDEX index_avg_location_sfo ON conditions_daily (avg) WHERE location = 'SFO'; +CREATE INDEX index_avg_expr_location_sfo ON conditions_daily ((avg + 2)) WHERE location = 'SFO'; +SELECT * FROM test.show_indexespred(:'MAT_TABLE_NAME'); + Index | Columns | Expr | Pred | Unique | Primary | Exclusion | Tablespace +-----------------------------------------------------------------------+-------------------+---------------------------+------------------------+--------+---------+-----------+------------ + _timescaledb_internal._materialized_hypertable_35_bucket_idx | {bucket} | | | f | f | f | + _timescaledb_internal._materialized_hypertable_35_location_bucket_idx | {location,bucket} | | | f | f | f | + _timescaledb_internal.index_avg | {avg} | | | f | f | f | + _timescaledb_internal.index_avg_expr | {expr} | avg + 1::double precision | | f | f | f | + _timescaledb_internal.index_avg_expr_location_sfo | {expr} | avg + 2::double precision | location = 'SFO'::text | f | f | f | + _timescaledb_internal.index_avg_include | {avg,location} | | | f | f | f | + _timescaledb_internal.index_avg_location_sfo | {avg} | | location = 'SFO'::text | f | f | f | + _timescaledb_internal.index_avg_only | {avg} | | | f | f | f | +(8 rows) + +-- #3696 assertion failure when referencing columns not present in result +CREATE TABLE i3696(time timestamptz NOT NULL, search_query text, cnt integer, cnt2 integer); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('i3696', 'time', replication_factor => 2); +\else +SELECT table_name FROM create_hypertable('i3696','time'); + table_name +------------ + i3696 +(1 row) + +\endif +CREATE MATERIALIZED VIEW i3696_cagg1 WITH (timescaledb.continuous, timescaledb.materialized_only=false) +AS + SELECT search_query,count(search_query) as count, sum(cnt), time_bucket(INTERVAL '1 minute', time) AS bucket + FROM i3696 GROUP BY cnt +cnt2 , bucket, search_query; +psql:include/cagg_ddl_common.sql:1113: NOTICE: continuous aggregate "i3696_cagg1" is already up-to-date +ALTER MATERIALIZED VIEW i3696_cagg1 SET (timescaledb.materialized_only = 'true'); +CREATE MATERIALIZED VIEW i3696_cagg2 WITH (timescaledb.continuous, timescaledb.materialized_only=false) +AS + SELECT search_query,count(search_query) as count, sum(cnt), time_bucket(INTERVAL '1 minute', time) AS bucket + FROM i3696 GROUP BY cnt + cnt2, bucket, search_query + HAVING cnt + cnt2 + sum(cnt) > 2 or count(cnt2) > 10; +psql:include/cagg_ddl_common.sql:1121: NOTICE: continuous aggregate "i3696_cagg2" is already up-to-date +ALTER MATERIALIZED VIEW i3696_cagg2 SET (timescaledb.materialized_only = 'true'); +--TEST test with multiple settings on continuous aggregates -- +-- test for materialized_only + compress combinations (real time aggs enabled initially) +CREATE TABLE test_setting(time timestamptz not null, val numeric); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('test_setting', 'time', replication_factor => 2); +\else +SELECT create_hypertable('test_setting', 'time'); + create_hypertable +---------------------------- + (39,public,test_setting,t) +(1 row) + +\endif +CREATE MATERIALIZED VIEW test_setting_cagg with (timescaledb.continuous, timescaledb.materialized_only=false) +AS SELECT time_bucket('1h',time), avg(val), count(*) FROM test_setting GROUP BY 1; +psql:include/cagg_ddl_common.sql:1135: NOTICE: continuous aggregate "test_setting_cagg" is already up-to-date +INSERT INTO test_setting +SELECT generate_series( '2020-01-10 8:00'::timestamp, '2020-01-30 10:00+00'::timestamptz, '1 day'::interval), 10.0; +CALL refresh_continuous_aggregate('test_setting_cagg', NULL, '2020-05-30 10:00+00'::timestamptz); +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 20 +(1 row) + +--this row is not in the materialized result --- +INSERT INTO test_setting VALUES( '2020-11-01', 20); +--try out 2 settings here -- +ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='true'); +psql:include/cagg_ddl_common.sql:1146: NOTICE: defaulting compress_orderby to time_bucket +SELECT view_name, compression_enabled, materialized_only +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_setting_cagg'; + view_name | compression_enabled | materialized_only +-------------------+---------------------+------------------- + test_setting_cagg | t | t +(1 row) + +--real time aggs is off now , should return 20 -- +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 20 +(1 row) + +--now set it back to false -- +ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='true'); +psql:include/cagg_ddl_common.sql:1154: NOTICE: defaulting compress_orderby to time_bucket +SELECT view_name, compression_enabled, materialized_only +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_setting_cagg'; + view_name | compression_enabled | materialized_only +-------------------+---------------------+------------------- + test_setting_cagg | t | f +(1 row) + +--count should return additional data since we have real time aggs on +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 21 +(1 row) + +ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='false'); +SELECT view_name, compression_enabled, materialized_only +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_setting_cagg'; + view_name | compression_enabled | materialized_only +-------------------+---------------------+------------------- + test_setting_cagg | f | t +(1 row) + +--real time aggs is off now , should return 20 -- +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 20 +(1 row) + +ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='false'); +SELECT view_name, compression_enabled, materialized_only +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_setting_cagg'; + view_name | compression_enabled | materialized_only +-------------------+---------------------+------------------- + test_setting_cagg | f | f +(1 row) + +--count should return additional data since we have real time aggs on +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 21 +(1 row) + +DELETE FROM test_setting WHERE val = 20; +--TEST test with multiple settings on continuous aggregates with real time aggregates turned off initially -- +-- test for materialized_only + compress combinations (real time aggs enabled initially) +DROP MATERIALIZED VIEW test_setting_cagg; +psql:include/cagg_ddl_common.sql:1179: NOTICE: drop cascades to table _timescaledb_internal._hyper_40_47_chunk +CREATE MATERIALIZED VIEW test_setting_cagg with (timescaledb.continuous, timescaledb.materialized_only = true) +AS SELECT time_bucket('1h',time), avg(val), count(*) FROM test_setting GROUP BY 1; +psql:include/cagg_ddl_common.sql:1182: NOTICE: refreshing continuous aggregate "test_setting_cagg" +CALL refresh_continuous_aggregate('test_setting_cagg', NULL, '2020-05-30 10:00+00'::timestamptz); +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 20 +(1 row) + +--this row is not in the materialized result --- +INSERT INTO test_setting VALUES( '2020-11-01', 20); +--try out 2 settings here -- +ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='true'); +psql:include/cagg_ddl_common.sql:1190: NOTICE: defaulting compress_orderby to time_bucket +SELECT view_name, compression_enabled, materialized_only +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_setting_cagg'; + view_name | compression_enabled | materialized_only +-------------------+---------------------+------------------- + test_setting_cagg | t | f +(1 row) + +--count should return additional data since we have real time aggs on +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 21 +(1 row) + +--now set it back to false -- +ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='true'); +psql:include/cagg_ddl_common.sql:1198: NOTICE: defaulting compress_orderby to time_bucket +SELECT view_name, compression_enabled, materialized_only +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_setting_cagg'; + view_name | compression_enabled | materialized_only +-------------------+---------------------+------------------- + test_setting_cagg | t | t +(1 row) + +--real time aggs is off now , should return 20 -- +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 20 +(1 row) + +ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='false'); +SELECT view_name, compression_enabled, materialized_only +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_setting_cagg'; + view_name | compression_enabled | materialized_only +-------------------+---------------------+------------------- + test_setting_cagg | f | f +(1 row) + +--count should return additional data since we have real time aggs on +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 21 +(1 row) + +ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='false'); +SELECT view_name, compression_enabled, materialized_only +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_setting_cagg'; + view_name | compression_enabled | materialized_only +-------------------+---------------------+------------------- + test_setting_cagg | f | t +(1 row) + +--real time aggs is off now , should return 20 -- +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 20 +(1 row) + +-- END TEST with multiple settings +-- Test View Target Entries that contain both aggrefs and Vars in the same expression +CREATE TABLE transactions +( + "time" timestamp with time zone NOT NULL, + dummy1 integer, + dummy2 integer, + dummy3 integer, + dummy4 integer, + dummy5 integer, + amount integer, + fiat_value integer +); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('transactions', 'time', replication_factor => 2); +\else +SELECT create_hypertable('transactions', 'time'); + create_hypertable +---------------------------- + (46,public,transactions,t) +(1 row) + +\endif +INSERT INTO transactions VALUES ( '2018-01-01 09:20:00-08', 0, 0, 0, 0, 0, 1, 10); +INSERT INTO transactions VALUES ( '2018-01-02 09:30:00-08', 0, 0, 0, 0, 0, -1, 10); +INSERT INTO transactions VALUES ( '2018-01-02 09:20:00-08', 0, 0, 0, 0, 0, -1, 10); +INSERT INTO transactions VALUES ( '2018-01-02 09:10:00-08', 0, 0, 0, 0, 0, -1, 10); +INSERT INTO transactions VALUES ( '2018-11-01 09:20:00-08', 0, 0, 0, 0, 0, 1, 10); +INSERT INTO transactions VALUES ( '2018-11-01 10:40:00-08', 0, 0, 0, 0, 0, 1, 10); +INSERT INTO transactions VALUES ( '2018-11-01 11:50:00-08', 0, 0, 0, 0, 0, 1, 10); +INSERT INTO transactions VALUES ( '2018-11-01 12:10:00-08', 0, 0, 0, 0, 0, -1, 10); +INSERT INTO transactions VALUES ( '2018-11-01 13:10:00-08', 0, 0, 0, 0, 0, -1, 10); +INSERT INTO transactions VALUES ( '2018-11-02 09:20:00-08', 0, 0, 0, 0, 0, 1, 10); +INSERT INTO transactions VALUES ( '2018-11-02 10:30:00-08', 0, 0, 0, 0, 0, -1, 10); +CREATE materialized view cashflows( + bucket, + amount, + cashflow, + cashflow2 +) WITH ( + timescaledb.continuous, + timescaledb.materialized_only = true +) AS +SELECT time_bucket ('1 day', time) AS bucket, + amount, + CASE + WHEN amount < 0 THEN (0 - sum(fiat_value)) + ELSE sum(fiat_value) + END AS cashflow, + amount + sum(fiat_value) +FROM transactions +GROUP BY bucket, amount; +psql:include/cagg_ddl_common.sql:1272: NOTICE: refreshing continuous aggregate "cashflows" +SELECT h.table_name AS "MAT_TABLE_NAME", + partial_view_name AS "PART_VIEW_NAME", + direct_view_name AS "DIRECT_VIEW_NAME" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'cashflows' +\gset +-- Show both the columns and the view definitions to see that +-- references are correct in the view as well. +\d+ "_timescaledb_internal".:"DIRECT_VIEW_NAME" + View "_timescaledb_internal._direct_view_47" + Column | Type | Collation | Nullable | Default | Storage | Description +-----------+--------------------------+-----------+----------+---------+---------+------------- + bucket | timestamp with time zone | | | | plain | + amount | integer | | | | plain | + cashflow | bigint | | | | plain | + cashflow2 | bigint | | | | plain | +View definition: + SELECT time_bucket('@ 1 day'::interval, transactions."time") AS bucket, + transactions.amount, + CASE + WHEN transactions.amount < 0 THEN 0 - sum(transactions.fiat_value) + ELSE sum(transactions.fiat_value) + END AS cashflow, + transactions.amount + sum(transactions.fiat_value) AS cashflow2 + FROM transactions + GROUP BY (time_bucket('@ 1 day'::interval, transactions."time")), transactions.amount; + +\d+ "_timescaledb_internal".:"PART_VIEW_NAME" + View "_timescaledb_internal._partial_view_47" + Column | Type | Collation | Nullable | Default | Storage | Description +-----------+--------------------------+-----------+----------+---------+---------+------------- + bucket | timestamp with time zone | | | | plain | + amount | integer | | | | plain | + cashflow | bigint | | | | plain | + cashflow2 | bigint | | | | plain | +View definition: + SELECT time_bucket('@ 1 day'::interval, transactions."time") AS bucket, + transactions.amount, + CASE + WHEN transactions.amount < 0 THEN 0 - sum(transactions.fiat_value) + ELSE sum(transactions.fiat_value) + END AS cashflow, + transactions.amount + sum(transactions.fiat_value) AS cashflow2 + FROM transactions + GROUP BY (time_bucket('@ 1 day'::interval, transactions."time")), transactions.amount; + +\d+ "_timescaledb_internal".:"MAT_TABLE_NAME" + Table "_timescaledb_internal._materialized_hypertable_47" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +-----------+--------------------------+-----------+----------+---------+---------+--------------+------------- + bucket | timestamp with time zone | | not null | | plain | | + amount | integer | | | | plain | | + cashflow | bigint | | | | plain | | + cashflow2 | bigint | | | | plain | | +Indexes: + "_materialized_hypertable_47_amount_bucket_idx" btree (amount, bucket DESC) + "_materialized_hypertable_47_bucket_idx" btree (bucket DESC) +Triggers: + ts_insert_blocker BEFORE INSERT ON _timescaledb_internal._materialized_hypertable_47 FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker() +Child tables: _timescaledb_internal._hyper_47_52_chunk, + _timescaledb_internal._hyper_47_53_chunk + +\d+ 'cashflows' + View "public.cashflows" + Column | Type | Collation | Nullable | Default | Storage | Description +-----------+--------------------------+-----------+----------+---------+---------+------------- + bucket | timestamp with time zone | | | | plain | + amount | integer | | | | plain | + cashflow | bigint | | | | plain | + cashflow2 | bigint | | | | plain | +View definition: + SELECT _materialized_hypertable_47.bucket, + _materialized_hypertable_47.amount, + _materialized_hypertable_47.cashflow, + _materialized_hypertable_47.cashflow2 + FROM _timescaledb_internal._materialized_hypertable_47; + +SELECT * FROM cashflows; + bucket | amount | cashflow | cashflow2 +------------------------------+--------+----------+----------- + Sun Dec 31 16:00:00 2017 PST | 1 | 10 | 11 + Mon Jan 01 16:00:00 2018 PST | -1 | -30 | 29 + Wed Oct 31 17:00:00 2018 PDT | -1 | -20 | 19 + Wed Oct 31 17:00:00 2018 PDT | 1 | 30 | 31 + Thu Nov 01 17:00:00 2018 PDT | -1 | -10 | 9 + Thu Nov 01 17:00:00 2018 PDT | 1 | 10 | 11 +(6 rows) + +-- test cagg creation with named arguments in time_bucket +-- note that positional arguments cannot follow named arguments +-- 1. test named origin +-- 2. test named timezone +-- 3. test named ts +-- 4. test named bucket width +-- named origin +CREATE MATERIALIZED VIEW cagg_named_origin WITH +(timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket('1h', time, 'UTC', origin => '2001-01-03 01:23:45') AS bucket, +avg(amount) as avg_amount +FROM transactions GROUP BY 1 WITH NO DATA; +-- named timezone +CREATE MATERIALIZED VIEW cagg_named_tz_origin WITH +(timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket('1h', time, timezone => 'UTC', origin => '2001-01-03 01:23:45') AS bucket, +avg(amount) as avg_amount +FROM transactions GROUP BY 1 WITH NO DATA; +-- named ts +CREATE MATERIALIZED VIEW cagg_named_ts_tz_origin WITH +(timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket('1h', ts => time, timezone => 'UTC', origin => '2001-01-03 01:23:45') AS bucket, +avg(amount) as avg_amount +FROM transactions GROUP BY 1 WITH NO DATA; +-- named bucket width +CREATE MATERIALIZED VIEW cagg_named_all WITH +(timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(bucket_width => '1h', ts => time, timezone => 'UTC', origin => '2001-01-03 01:23:45') AS bucket, +avg(amount) as avg_amount +FROM transactions GROUP BY 1 WITH NO DATA; +-- Refreshing from the beginning (NULL) of a CAGG with variable time bucket and +-- using an INTERVAL for the end timestamp (issue #5534) +CREATE MATERIALIZED VIEW transactions_montly +WITH (timescaledb.continuous, timescaledb.materialized_only = true) AS +SELECT time_bucket(INTERVAL '1 month', time) AS bucket, + SUM(fiat_value), + MAX(fiat_value), + MIN(fiat_value) + FROM transactions +GROUP BY 1 +WITH NO DATA; +-- No rows +SELECT * FROM transactions_montly ORDER BY bucket; + bucket | sum | max | min +--------+-----+-----+----- +(0 rows) + +-- Refresh from beginning of the CAGG for 1 month +CALL refresh_continuous_aggregate('transactions_montly', NULL, INTERVAL '1 month'); +SELECT * FROM transactions_montly ORDER BY bucket; + bucket | sum | max | min +------------------------------+-----+-----+----- + Sun Dec 31 16:00:00 2017 PST | 40 | 10 | 10 + Wed Oct 31 17:00:00 2018 PDT | 70 | 10 | 10 +(2 rows) + +TRUNCATE transactions_montly; +-- Partial refresh the CAGG from beginning to an specific timestamp +CALL refresh_continuous_aggregate('transactions_montly', NULL, '2018-11-01 11:50:00-08'::timestamptz); +SELECT * FROM transactions_montly ORDER BY bucket; + bucket | sum | max | min +------------------------------+-----+-----+----- + Sun Dec 31 16:00:00 2017 PST | 40 | 10 | 10 +(1 row) + +-- Full refresh the CAGG +CALL refresh_continuous_aggregate('transactions_montly', NULL, NULL); +SELECT * FROM transactions_montly ORDER BY bucket; + bucket | sum | max | min +------------------------------+-----+-----+----- + Sun Dec 31 16:00:00 2017 PST | 40 | 10 | 10 + Wed Oct 31 17:00:00 2018 PDT | 70 | 10 | 10 +(2 rows) + +-- Check set_chunk_time_interval on continuous aggregate +CREATE MATERIALIZED VIEW cagg_set_chunk_time_interval +WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(INTERVAL '1 month', time) AS bucket, + SUM(fiat_value), + MAX(fiat_value), + MIN(fiat_value) +FROM transactions +GROUP BY 1 +WITH NO DATA; +SELECT set_chunk_time_interval('cagg_set_chunk_time_interval', chunk_time_interval => interval '1 month'); + set_chunk_time_interval +------------------------- + +(1 row) + +CALL refresh_continuous_aggregate('cagg_set_chunk_time_interval', NULL, NULL); +SELECT _timescaledb_functions.to_interval(d.interval_length) = interval '1 month' +FROM _timescaledb_catalog.dimension d + RIGHT JOIN _timescaledb_catalog.continuous_agg ca ON ca.user_view_name = 'cagg_set_chunk_time_interval' +WHERE d.hypertable_id = ca.mat_hypertable_id; + ?column? +---------- + t +(1 row) + +-- Since #6077 CAggs are materialized only by default +DROP TABLE conditions CASCADE; +psql:include/cagg_ddl_common.sql:1370: NOTICE: drop cascades to 3 other objects +psql:include/cagg_ddl_common.sql:1370: NOTICE: drop cascades to 2 other objects +CREATE TABLE conditions ( + time TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL +); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('conditions', 'time', replication_factor => 2); +\else +SELECT create_hypertable('conditions', 'time'); + create_hypertable +-------------------------- + (54,public,conditions,t) +(1 row) + +\endif +INSERT INTO conditions VALUES ( '2018-01-01 09:20:00-08', 'SFO', 55); +INSERT INTO conditions VALUES ( '2018-01-02 09:30:00-08', 'por', 100); +INSERT INTO conditions VALUES ( '2018-01-02 09:20:00-08', 'SFO', 65); +INSERT INTO conditions VALUES ( '2018-01-02 09:10:00-08', 'NYC', 65); +INSERT INTO conditions VALUES ( '2018-11-01 09:20:00-08', 'NYC', 45); +INSERT INTO conditions VALUES ( '2018-11-01 10:40:00-08', 'NYC', 55); +INSERT INTO conditions VALUES ( '2018-11-01 11:50:00-08', 'NYC', 65); +INSERT INTO conditions VALUES ( '2018-11-01 12:10:00-08', 'NYC', 75); +INSERT INTO conditions VALUES ( '2018-11-01 13:10:00-08', 'NYC', 85); +INSERT INTO conditions VALUES ( '2018-11-02 09:20:00-08', 'NYC', 10); +INSERT INTO conditions VALUES ( '2018-11-02 10:30:00-08', 'NYC', 20); +CREATE MATERIALIZED VIEW conditions_daily +WITH (timescaledb.continuous) AS +SELECT location, + time_bucket(INTERVAL '1 day', time) AS bucket, + AVG(temperature) + FROM conditions +GROUP BY location, bucket +WITH NO DATA; +\d+ conditions_daily + View "public.conditions_daily" + Column | Type | Collation | Nullable | Default | Storage | Description +----------+--------------------------+-----------+----------+---------+----------+------------- + location | text | | | | extended | + bucket | timestamp with time zone | | | | plain | + avg | double precision | | | | plain | +View definition: + SELECT _materialized_hypertable_55.location, + _materialized_hypertable_55.bucket, + _materialized_hypertable_55.avg + FROM _timescaledb_internal._materialized_hypertable_55; + +-- Should return NO ROWS +SELECT * FROM conditions_daily ORDER BY bucket, avg; + location | bucket | avg +----------+--------+----- +(0 rows) + +ALTER MATERIALIZED VIEW conditions_daily SET (timescaledb.materialized_only=false); +\d+ conditions_daily + View "public.conditions_daily" + Column | Type | Collation | Nullable | Default | Storage | Description +----------+--------------------------+-----------+----------+---------+----------+------------- + location | text | | | | extended | + bucket | timestamp with time zone | | | | plain | + avg | double precision | | | | plain | +View definition: + SELECT _materialized_hypertable_55.location, + _materialized_hypertable_55.bucket, + _materialized_hypertable_55.avg + FROM _timescaledb_internal._materialized_hypertable_55 + WHERE _materialized_hypertable_55.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(55)), '-infinity'::timestamp with time zone) +UNION ALL + SELECT conditions.location, + time_bucket('@ 1 day'::interval, conditions."time") AS bucket, + avg(conditions.temperature) AS avg + FROM conditions + WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(55)), '-infinity'::timestamp with time zone) + GROUP BY conditions.location, (time_bucket('@ 1 day'::interval, conditions."time")); + +-- Should return ROWS because now it is realtime +SELECT * FROM conditions_daily ORDER BY bucket, avg; + location | bucket | avg +----------+------------------------------+----- + SFO | Sun Dec 31 16:00:00 2017 PST | 55 + SFO | Mon Jan 01 16:00:00 2018 PST | 65 + NYC | Mon Jan 01 16:00:00 2018 PST | 65 + por | Mon Jan 01 16:00:00 2018 PST | 100 + NYC | Wed Oct 31 17:00:00 2018 PDT | 65 + NYC | Thu Nov 01 17:00:00 2018 PDT | 15 +(6 rows) + +-- Should return ROWS because we refreshed it +ALTER MATERIALIZED VIEW conditions_daily SET (timescaledb.materialized_only=true); +\d+ conditions_daily + View "public.conditions_daily" + Column | Type | Collation | Nullable | Default | Storage | Description +----------+--------------------------+-----------+----------+---------+----------+------------- + location | text | | | | extended | + bucket | timestamp with time zone | | | | plain | + avg | double precision | | | | plain | +View definition: + SELECT _materialized_hypertable_55.location, + _materialized_hypertable_55.bucket, + _materialized_hypertable_55.avg + FROM _timescaledb_internal._materialized_hypertable_55; + +CALL refresh_continuous_aggregate('conditions_daily', NULL, NULL); +SELECT * FROM conditions_daily ORDER BY bucket, avg; + location | bucket | avg +----------+------------------------------+----- + SFO | Sun Dec 31 16:00:00 2017 PST | 55 + SFO | Mon Jan 01 16:00:00 2018 PST | 65 + NYC | Mon Jan 01 16:00:00 2018 PST | 65 + por | Mon Jan 01 16:00:00 2018 PST | 100 + NYC | Wed Oct 31 17:00:00 2018 PDT | 65 + NYC | Thu Nov 01 17:00:00 2018 PDT | 15 +(6 rows) + diff --git a/tsl/test/expected/cagg_ddl-16.out b/tsl/test/expected/cagg_ddl-16.out new file mode 100644 index 00000000000..e8461589ca1 --- /dev/null +++ b/tsl/test/expected/cagg_ddl-16.out @@ -0,0 +1,2164 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\set IS_DISTRIBUTED FALSE +\ir include/cagg_ddl_common.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- Set this variable to avoid using a hard-coded path each time query +-- results are compared +\set QUERY_RESULT_TEST_EQUAL_RELPATH '../../../../test/sql/include/query_result_test_equal.sql' +\if :IS_DISTRIBUTED +\echo 'Running distributed hypertable tests' +\else +\echo 'Running local hypertable tests' +Running local hypertable tests +\endif +SET ROLE :ROLE_DEFAULT_PERM_USER; +--DDL commands on continuous aggregates +CREATE TABLE conditions ( + timec TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + temperature integer NULL, + humidity DOUBLE PRECISION NULL, + timemeasure TIMESTAMPTZ, + timeinterval INTERVAL +); +\if :IS_DISTRIBUTED +SELECT table_name FROM create_distributed_hypertable('conditions', 'timec', replication_factor => 2); +\else +SELECT table_name FROM create_hypertable('conditions', 'timec'); + table_name +------------ + conditions +(1 row) + +\endif +-- schema tests +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER +-- drop if the tablespace1 and/or tablespace2 exists +SET client_min_messages TO error; +DROP TABLESPACE IF EXISTS tablespace1; +DROP TABLESPACE IF EXISTS tablespace2; +RESET client_min_messages; +CREATE TABLESPACE tablespace1 OWNER :ROLE_DEFAULT_PERM_USER LOCATION :TEST_TABLESPACE1_PATH; +CREATE TABLESPACE tablespace2 OWNER :ROLE_DEFAULT_PERM_USER LOCATION :TEST_TABLESPACE2_PATH; +CREATE SCHEMA rename_schema; +GRANT ALL ON SCHEMA rename_schema TO :ROLE_DEFAULT_PERM_USER; +SET ROLE :ROLE_DEFAULT_PERM_USER; +CREATE TABLE foo(time TIMESTAMPTZ NOT NULL, data INTEGER); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('foo', 'time', replication_factor => 2); +\else +SELECT create_hypertable('foo', 'time'); + create_hypertable +------------------- + (2,public,foo,t) +(1 row) + +\endif +CREATE MATERIALIZED VIEW rename_test + WITH ( timescaledb.continuous, timescaledb.materialized_only=true) +AS SELECT time_bucket('1week', time), COUNT(data) + FROM foo + GROUP BY 1 WITH NO DATA; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name +------------------+----------------+-----------------------+------------------- + public | rename_test | _timescaledb_internal | _partial_view_3 +(1 row) + +ALTER MATERIALIZED VIEW rename_test SET SCHEMA rename_schema; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name +------------------+----------------+-----------------------+------------------- + rename_schema | rename_test | _timescaledb_internal | _partial_view_3 +(1 row) + +SELECT ca.raw_hypertable_id as "RAW_HYPERTABLE_ID", + h.schema_name AS "MAT_SCHEMA_NAME", + h.table_name AS "MAT_TABLE_NAME", + partial_view_name as "PART_VIEW_NAME", + partial_view_schema as "PART_VIEW_SCHEMA", + direct_view_name as "DIR_VIEW_NAME", + direct_view_schema as "DIR_VIEW_SCHEMA" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'rename_test' +\gset +RESET ROLE; +SELECT current_user; + current_user +-------------------- + cluster_super_user +(1 row) + +ALTER VIEW :"PART_VIEW_SCHEMA".:"PART_VIEW_NAME" SET SCHEMA public; +SET ROLE :ROLE_DEFAULT_PERM_USER; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name +------------------+----------------+---------------------+------------------- + rename_schema | rename_test | public | _partial_view_3 +(1 row) + +--alter direct view schema +SELECT user_view_schema, user_view_name, direct_view_schema, direct_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | direct_view_schema | direct_view_name +------------------+----------------+-----------------------+------------------ + rename_schema | rename_test | _timescaledb_internal | _direct_view_3 +(1 row) + +RESET ROLE; +SELECT current_user; + current_user +-------------------- + cluster_super_user +(1 row) + +ALTER VIEW :"DIR_VIEW_SCHEMA".:"DIR_VIEW_NAME" SET SCHEMA public; +SET ROLE :ROLE_DEFAULT_PERM_USER; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name, + direct_view_schema, direct_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name | direct_view_schema | direct_view_name +------------------+----------------+---------------------+-------------------+--------------------+------------------ + rename_schema | rename_test | public | _partial_view_3 | public | _direct_view_3 +(1 row) + +RESET ROLE; +SELECT current_user; + current_user +-------------------- + cluster_super_user +(1 row) + +ALTER SCHEMA rename_schema RENAME TO new_name_schema; +SET ROLE :ROLE_DEFAULT_PERM_USER; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name, + direct_view_schema, direct_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name | direct_view_schema | direct_view_name +------------------+----------------+---------------------+-------------------+--------------------+------------------ + new_name_schema | rename_test | public | _partial_view_3 | public | _direct_view_3 +(1 row) + +ALTER VIEW :"PART_VIEW_NAME" SET SCHEMA new_name_schema; +ALTER VIEW :"DIR_VIEW_NAME" SET SCHEMA new_name_schema; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name, + direct_view_schema, direct_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name | direct_view_schema | direct_view_name +------------------+----------------+---------------------+-------------------+--------------------+------------------ + new_name_schema | rename_test | new_name_schema | _partial_view_3 | new_name_schema | _direct_view_3 +(1 row) + +RESET ROLE; +SELECT current_user; + current_user +-------------------- + cluster_super_user +(1 row) + +ALTER SCHEMA new_name_schema RENAME TO foo_name_schema; +SET ROLE :ROLE_DEFAULT_PERM_USER; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name +------------------+----------------+---------------------+------------------- + foo_name_schema | rename_test | foo_name_schema | _partial_view_3 +(1 row) + +ALTER MATERIALIZED VIEW foo_name_schema.rename_test SET SCHEMA public; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name +------------------+----------------+---------------------+------------------- + public | rename_test | foo_name_schema | _partial_view_3 +(1 row) + +RESET ROLE; +SELECT current_user; + current_user +-------------------- + cluster_super_user +(1 row) + +ALTER SCHEMA foo_name_schema RENAME TO rename_schema; +SET ROLE :ROLE_DEFAULT_PERM_USER; +SET client_min_messages TO NOTICE; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name +------------------+----------------+---------------------+------------------- + public | rename_test | rename_schema | _partial_view_3 +(1 row) + +ALTER MATERIALIZED VIEW rename_test RENAME TO rename_c_aggregate; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name +------------------+--------------------+---------------------+------------------- + public | rename_c_aggregate | rename_schema | _partial_view_3 +(1 row) + +SELECT * FROM rename_c_aggregate; + time_bucket | count +-------------+------- +(0 rows) + +ALTER VIEW rename_schema.:"PART_VIEW_NAME" RENAME TO partial_view; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name, + direct_view_schema, direct_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name | direct_view_schema | direct_view_name +------------------+--------------------+---------------------+-------------------+--------------------+------------------ + public | rename_c_aggregate | rename_schema | partial_view | rename_schema | _direct_view_3 +(1 row) + +--rename direct view +ALTER VIEW rename_schema.:"DIR_VIEW_NAME" RENAME TO direct_view; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name, + direct_view_schema, direct_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name | direct_view_schema | direct_view_name +------------------+--------------------+---------------------+-------------------+--------------------+------------------ + public | rename_c_aggregate | rename_schema | partial_view | rename_schema | direct_view +(1 row) + +-- drop_chunks tests +DROP TABLE conditions CASCADE; +DROP TABLE foo CASCADE; +psql:include/cagg_ddl_common.sql:166: NOTICE: drop cascades to 2 other objects +CREATE TABLE drop_chunks_table(time BIGINT NOT NULL, data INTEGER); +\if :IS_DISTRIBUTED +SELECT hypertable_id AS drop_chunks_table_id + FROM create_distributed_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10, replication_factor => 2) \gset +\else +SELECT hypertable_id AS drop_chunks_table_id + FROM create_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10) \gset +\endif +CREATE OR REPLACE FUNCTION integer_now_test() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table $$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ +CREATE OR REPLACE FUNCTION integer_now_test() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table $$; +$DIST$); +\endif +SELECT set_integer_now_func('drop_chunks_table', 'integer_now_test'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE MATERIALIZED VIEW drop_chunks_view + WITH ( + timescaledb.continuous, + timescaledb.materialized_only=true + ) +AS SELECT time_bucket('5', time), COUNT(data) + FROM drop_chunks_table + GROUP BY 1 WITH NO DATA; +SELECT format('%I.%I', schema_name, table_name) AS drop_chunks_mat_table, + schema_name AS drop_chunks_mat_schema, + table_name AS drop_chunks_mat_table_name + FROM _timescaledb_catalog.hypertable, _timescaledb_catalog.continuous_agg + WHERE _timescaledb_catalog.continuous_agg.raw_hypertable_id = :drop_chunks_table_id + AND _timescaledb_catalog.hypertable.id = _timescaledb_catalog.continuous_agg.mat_hypertable_id \gset +-- create 3 chunks, with 3 time bucket +INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(0, 29) AS i; +-- Only refresh up to bucket 15 initially. Matches the old refresh +-- behavior that didn't materialize everything +CALL refresh_continuous_aggregate('drop_chunks_view', 0, 15); +SELECT count(c) FROM show_chunks('drop_chunks_table') AS c; + count +------- + 3 +(1 row) + +SELECT count(c) FROM show_chunks('drop_chunks_view') AS c; + count +------- + 1 +(1 row) + +SELECT * FROM drop_chunks_view ORDER BY 1; + time_bucket | count +-------------+------- + 0 | 5 + 5 | 5 + 10 | 5 +(3 rows) + +-- cannot drop directly from the materialization table without specifying +-- cont. aggregate view name explicitly +\set ON_ERROR_STOP 0 +SELECT drop_chunks(:'drop_chunks_mat_table', + newer_than => -20, + verbose => true); +psql:include/cagg_ddl_common.sql:218: ERROR: operation not supported on materialized hypertable +\set ON_ERROR_STOP 1 +SELECT count(c) FROM show_chunks('drop_chunks_table') AS c; + count +------- + 3 +(1 row) + +SELECT count(c) FROM show_chunks('drop_chunks_view') AS c; + count +------- + 1 +(1 row) + +SELECT * FROM drop_chunks_view ORDER BY 1; + time_bucket | count +-------------+------- + 0 | 5 + 5 | 5 + 10 | 5 +(3 rows) + +-- drop chunks when the chunksize and time_bucket aren't aligned +DROP TABLE drop_chunks_table CASCADE; +psql:include/cagg_ddl_common.sql:227: NOTICE: drop cascades to 2 other objects +psql:include/cagg_ddl_common.sql:227: NOTICE: drop cascades to table _timescaledb_internal._hyper_5_4_chunk +CREATE TABLE drop_chunks_table_u(time BIGINT NOT NULL, data INTEGER); +\if :IS_DISTRIBUTED +SELECT hypertable_id AS drop_chunks_table_u_id + FROM create_distributed_hypertable('drop_chunks_table_u', 'time', chunk_time_interval => 7, replication_factor => 2) \gset +\else +SELECT hypertable_id AS drop_chunks_table_u_id + FROM create_hypertable('drop_chunks_table_u', 'time', chunk_time_interval => 7) \gset +\endif +CREATE OR REPLACE FUNCTION integer_now_test1() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table_u $$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ +CREATE OR REPLACE FUNCTION integer_now_test1() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table_u $$; +$DIST$); +\endif +SELECT set_integer_now_func('drop_chunks_table_u', 'integer_now_test1'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE MATERIALIZED VIEW drop_chunks_view + WITH ( + timescaledb.continuous, + timescaledb.materialized_only=true + ) +AS SELECT time_bucket('3', time), COUNT(data) + FROM drop_chunks_table_u + GROUP BY 1 WITH NO DATA; +SELECT format('%I.%I', schema_name, table_name) AS drop_chunks_mat_table_u, + schema_name AS drop_chunks_mat_schema, + table_name AS drop_chunks_mat_table_u_name + FROM _timescaledb_catalog.hypertable, _timescaledb_catalog.continuous_agg + WHERE _timescaledb_catalog.continuous_agg.raw_hypertable_id = :drop_chunks_table_u_id + AND _timescaledb_catalog.hypertable.id = _timescaledb_catalog.continuous_agg.mat_hypertable_id \gset +-- create 3 chunks, with 3 time bucket +INSERT INTO drop_chunks_table_u SELECT i, i FROM generate_series(0, 21) AS i; +-- Refresh up to bucket 15 to match old materializer behavior +CALL refresh_continuous_aggregate('drop_chunks_view', 0, 15); +SELECT count(c) FROM show_chunks('drop_chunks_table_u') AS c; + count +------- + 4 +(1 row) + +SELECT count(c) FROM show_chunks('drop_chunks_view') AS c; + count +------- + 1 +(1 row) + +SELECT * FROM drop_chunks_view ORDER BY 1; + time_bucket | count +-------------+------- + 0 | 3 + 3 | 3 + 6 | 3 + 9 | 3 + 12 | 3 +(5 rows) + +-- TRUNCATE test +-- Can truncate regular hypertables that have caggs +TRUNCATE drop_chunks_table_u; +\set ON_ERROR_STOP 0 +-- Can't truncate materialized hypertables directly +TRUNCATE :drop_chunks_mat_table_u; +psql:include/cagg_ddl_common.sql:276: ERROR: cannot TRUNCATE a hypertable underlying a continuous aggregate +\set ON_ERROR_STOP 1 +-- Check that we don't interfere with TRUNCATE of normal table and +-- partitioned table +CREATE TABLE truncate (value int); +INSERT INTO truncate VALUES (1), (2); +TRUNCATE truncate; +SELECT * FROM truncate; + value +------- +(0 rows) + +CREATE TABLE truncate_partitioned (value int) + PARTITION BY RANGE(value); +CREATE TABLE truncate_p1 PARTITION OF truncate_partitioned + FOR VALUES FROM (1) TO (3); +INSERT INTO truncate_partitioned VALUES (1), (2); +TRUNCATE truncate_partitioned; +SELECT * FROM truncate_partitioned; + value +------- +(0 rows) + +-- ALTER TABLE tests +\set ON_ERROR_STOP 0 +-- test a variety of ALTER TABLE statements +ALTER TABLE :drop_chunks_mat_table_u RENAME time_bucket TO bad_name; +psql:include/cagg_ddl_common.sql:296: ERROR: renaming columns on materialization tables is not supported +ALTER TABLE :drop_chunks_mat_table_u ADD UNIQUE(time_bucket); +psql:include/cagg_ddl_common.sql:297: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u SET UNLOGGED; +psql:include/cagg_ddl_common.sql:298: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u ENABLE ROW LEVEL SECURITY; +psql:include/cagg_ddl_common.sql:299: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u ADD COLUMN fizzle INTEGER; +psql:include/cagg_ddl_common.sql:300: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u DROP COLUMN time_bucket; +psql:include/cagg_ddl_common.sql:301: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u ALTER COLUMN time_bucket DROP NOT NULL; +psql:include/cagg_ddl_common.sql:302: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u ALTER COLUMN time_bucket SET DEFAULT 1; +psql:include/cagg_ddl_common.sql:303: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u ALTER COLUMN time_bucket SET STORAGE EXTERNAL; +psql:include/cagg_ddl_common.sql:304: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u DISABLE TRIGGER ALL; +psql:include/cagg_ddl_common.sql:305: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u SET TABLESPACE foo; +psql:include/cagg_ddl_common.sql:306: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u NOT OF; +psql:include/cagg_ddl_common.sql:307: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u OWNER TO CURRENT_USER; +psql:include/cagg_ddl_common.sql:308: ERROR: operation not supported on materialization tables +\set ON_ERROR_STOP 1 +ALTER TABLE :drop_chunks_mat_table_u SET SCHEMA public; +ALTER TABLE :drop_chunks_mat_table_u_name RENAME TO new_name; +SET ROLE :ROLE_DEFAULT_PERM_USER; +SET client_min_messages TO NOTICE; +SELECT * FROM new_name; + time_bucket | count +-------------+------- + 0 | 3 + 3 | 3 + 6 | 3 + 9 | 3 + 12 | 3 +(5 rows) + +SELECT * FROM drop_chunks_view ORDER BY 1; + time_bucket | count +-------------+------- + 0 | 3 + 3 | 3 + 6 | 3 + 9 | 3 + 12 | 3 +(5 rows) + +\set ON_ERROR_STOP 0 +-- no continuous aggregates on a continuous aggregate materialization table +CREATE MATERIALIZED VIEW new_name_view + WITH ( + timescaledb.continuous, + timescaledb.materialized_only=true + ) +AS SELECT time_bucket('6', time_bucket), COUNT("count") + FROM new_name + GROUP BY 1 WITH NO DATA; +psql:include/cagg_ddl_common.sql:331: ERROR: hypertable is a continuous aggregate materialization table +\set ON_ERROR_STOP 1 +CREATE TABLE metrics(time timestamptz NOT NULL, device_id int, v1 float, v2 float); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('metrics', 'time', replication_factor => 2); +\else +SELECT create_hypertable('metrics','time'); + create_hypertable +---------------------- + (8,public,metrics,t) +(1 row) + +\endif +INSERT INTO metrics SELECT generate_series('2000-01-01'::timestamptz,'2000-01-10','1m'),1,0.25,0.75; +-- check expressions in view definition +CREATE MATERIALIZED VIEW cagg_expr + WITH (timescaledb.continuous, timescaledb.materialized_only=true) +AS +SELECT + time_bucket('1d', time) AS time, + 'Const'::text AS Const, + 4.3::numeric AS "numeric", + first(metrics,time), + CASE WHEN true THEN 'foo' ELSE 'bar' END, + COALESCE(NULL,'coalesce'), + avg(v1) + avg(v2) AS avg1, + avg(v1+v2) AS avg2 +FROM metrics +GROUP BY 1 WITH NO DATA; +CALL refresh_continuous_aggregate('cagg_expr', NULL, NULL); +SELECT * FROM cagg_expr ORDER BY time LIMIT 5; + time | const | numeric | first | case | coalesce | avg1 | avg2 +------------------------------+-------+---------+----------------------------------------------+------+----------+------+------ + Fri Dec 31 16:00:00 1999 PST | Const | 4.3 | ("Sat Jan 01 00:00:00 2000 PST",1,0.25,0.75) | foo | coalesce | 1 | 1 + Sat Jan 01 16:00:00 2000 PST | Const | 4.3 | ("Sat Jan 01 16:00:00 2000 PST",1,0.25,0.75) | foo | coalesce | 1 | 1 + Sun Jan 02 16:00:00 2000 PST | Const | 4.3 | ("Sun Jan 02 16:00:00 2000 PST",1,0.25,0.75) | foo | coalesce | 1 | 1 + Mon Jan 03 16:00:00 2000 PST | Const | 4.3 | ("Mon Jan 03 16:00:00 2000 PST",1,0.25,0.75) | foo | coalesce | 1 | 1 + Tue Jan 04 16:00:00 2000 PST | Const | 4.3 | ("Tue Jan 04 16:00:00 2000 PST",1,0.25,0.75) | foo | coalesce | 1 | 1 +(5 rows) + +--test materialization of invalidation before drop +DROP TABLE IF EXISTS drop_chunks_table CASCADE; +psql:include/cagg_ddl_common.sql:363: NOTICE: table "drop_chunks_table" does not exist, skipping +DROP TABLE IF EXISTS drop_chunks_table_u CASCADE; +psql:include/cagg_ddl_common.sql:364: NOTICE: drop cascades to 2 other objects +psql:include/cagg_ddl_common.sql:364: NOTICE: drop cascades to table _timescaledb_internal._hyper_7_9_chunk +CREATE TABLE drop_chunks_table(time BIGINT NOT NULL, data INTEGER); +\if :IS_DISTRIBUTED +SELECT hypertable_id AS drop_chunks_table_nid + FROM create_distributed_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10, replication_factor => 2) \gset +\else +SELECT hypertable_id AS drop_chunks_table_nid + FROM create_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10) \gset +\endif +CREATE OR REPLACE FUNCTION integer_now_test2() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table $$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ +CREATE OR REPLACE FUNCTION integer_now_test2() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table $$; +$DIST$); +\endif +SELECT set_integer_now_func('drop_chunks_table', 'integer_now_test2'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE MATERIALIZED VIEW drop_chunks_view + WITH ( + timescaledb.continuous, + timescaledb.materialized_only=true + ) +AS SELECT time_bucket('5', time), max(data) + FROM drop_chunks_table + GROUP BY 1 WITH NO DATA; +INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(0, 20) AS i; +--dropping chunks will process the invalidations +SELECT drop_chunks('drop_chunks_table', older_than => (integer_now_test2()-9)); + drop_chunks +------------------------------------------ + _timescaledb_internal._hyper_10_13_chunk +(1 row) + +SELECT * FROM drop_chunks_table ORDER BY time ASC limit 1; + time | data +------+------ + 10 | 10 +(1 row) + +INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(20, 35) AS i; +CALL refresh_continuous_aggregate('drop_chunks_view', 10, 40); +--this will be seen after the drop its within the invalidation window and will be dropped +INSERT INTO drop_chunks_table VALUES (26, 100); +--this will not be processed by the drop since chunk 30-39 is not dropped but will be seen after refresh +--shows that the drop doesn't do more work than necessary +INSERT INTO drop_chunks_table VALUES (31, 200); +--move the time up to 39 +INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(35, 39) AS i; +--the chunks and ranges we have thus far +SELECT chunk_name, range_start_integer, range_end_integer +FROM timescaledb_information.chunks +WHERE hypertable_name = 'drop_chunks_table'; + chunk_name | range_start_integer | range_end_integer +--------------------+---------------------+------------------- + _hyper_10_14_chunk | 10 | 20 + _hyper_10_15_chunk | 20 | 30 + _hyper_10_16_chunk | 30 | 40 +(3 rows) + +--the invalidation on 25 not yet seen +SELECT * FROM drop_chunks_view ORDER BY time_bucket DESC; + time_bucket | max +-------------+----- + 35 | 35 + 30 | 34 + 25 | 29 + 20 | 24 + 15 | 19 + 10 | 14 +(6 rows) + +--refresh to process the invalidations and then drop +CALL refresh_continuous_aggregate('drop_chunks_view', NULL, (integer_now_test2()-9)); +SELECT drop_chunks('drop_chunks_table', older_than => (integer_now_test2()-9)); + drop_chunks +------------------------------------------ + _timescaledb_internal._hyper_10_14_chunk + _timescaledb_internal._hyper_10_15_chunk +(2 rows) + +--new values on 25 now seen in view +SELECT * FROM drop_chunks_view ORDER BY time_bucket DESC; + time_bucket | max +-------------+----- + 35 | 35 + 30 | 34 + 25 | 100 + 20 | 24 + 15 | 19 + 10 | 14 +(6 rows) + +--earliest datapoint now in table +SELECT * FROM drop_chunks_table ORDER BY time ASC limit 1; + time | data +------+------ + 30 | 30 +(1 row) + +--we see the chunks row with the dropped flags set; +SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk where dropped; + id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk +----+---------------+-----------------------+--------------------+---------------------+---------+--------+----------- + 13 | 10 | _timescaledb_internal | _hyper_10_13_chunk | | t | 0 | f + 14 | 10 | _timescaledb_internal | _hyper_10_14_chunk | | t | 0 | f + 15 | 10 | _timescaledb_internal | _hyper_10_15_chunk | | t | 0 | f +(3 rows) + +--still see data in the view +SELECT * FROM drop_chunks_view WHERE time_bucket < (integer_now_test2()-9) ORDER BY time_bucket DESC; + time_bucket | max +-------------+----- + 25 | 100 + 20 | 24 + 15 | 19 + 10 | 14 +(4 rows) + +--no data but covers dropped chunks +SELECT * FROM drop_chunks_table WHERE time < (integer_now_test2()-9) ORDER BY time DESC; + time | data +------+------ +(0 rows) + +--recreate the dropped chunk +INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(0, 20) AS i; +--see data from recreated region +SELECT * FROM drop_chunks_table WHERE time < (integer_now_test2()-9) ORDER BY time DESC; + time | data +------+------ + 20 | 20 + 19 | 19 + 18 | 18 + 17 | 17 + 16 | 16 + 15 | 15 + 14 | 14 + 13 | 13 + 12 | 12 + 11 | 11 + 10 | 10 + 9 | 9 + 8 | 8 + 7 | 7 + 6 | 6 + 5 | 5 + 4 | 4 + 3 | 3 + 2 | 2 + 1 | 1 + 0 | 0 +(21 rows) + +--should show chunk with old name and old ranges +SELECT chunk_name, range_start_integer, range_end_integer +FROM timescaledb_information.chunks +WHERE hypertable_name = 'drop_chunks_table' +ORDER BY range_start_integer; + chunk_name | range_start_integer | range_end_integer +--------------------+---------------------+------------------- + _hyper_10_13_chunk | 0 | 10 + _hyper_10_14_chunk | 10 | 20 + _hyper_10_15_chunk | 20 | 30 + _hyper_10_16_chunk | 30 | 40 +(4 rows) + +--We dropped everything up to the bucket starting at 30 and then +--inserted new data up to and including time 20. Therefore, the +--dropped data should stay the same as long as we only refresh +--buckets that have non-dropped data. +CALL refresh_continuous_aggregate('drop_chunks_view', 30, 40); +SELECT * FROM drop_chunks_view ORDER BY time_bucket DESC; + time_bucket | max +-------------+----- + 35 | 39 + 30 | 200 + 25 | 100 + 20 | 24 + 15 | 19 + 10 | 14 +(6 rows) + +SELECT format('%I.%I', schema_name, table_name) AS drop_chunks_mat_tablen, + schema_name AS drop_chunks_mat_schema, + table_name AS drop_chunks_mat_table_name + FROM _timescaledb_catalog.hypertable, _timescaledb_catalog.continuous_agg + WHERE _timescaledb_catalog.continuous_agg.raw_hypertable_id = :drop_chunks_table_nid + AND _timescaledb_catalog.hypertable.id = _timescaledb_catalog.continuous_agg.mat_hypertable_id \gset +-- TEST drop chunks from continuous aggregates by specifying view name +SELECT drop_chunks('drop_chunks_view', + newer_than => -20, + verbose => true); +psql:include/cagg_ddl_common.sql:459: INFO: dropping chunk _timescaledb_internal._hyper_11_17_chunk + drop_chunks +------------------------------------------ + _timescaledb_internal._hyper_11_17_chunk +(1 row) + +-- Test that we cannot drop chunks when specifying materialized +-- hypertable +INSERT INTO drop_chunks_table SELECT generate_series(45, 55), 500; +CALL refresh_continuous_aggregate('drop_chunks_view', 45, 55); +SELECT chunk_name, range_start_integer, range_end_integer +FROM timescaledb_information.chunks +WHERE hypertable_name = :'drop_chunks_mat_table_name' ORDER BY range_start_integer; + chunk_name | range_start_integer | range_end_integer +--------------------+---------------------+------------------- + _hyper_11_20_chunk | 0 | 100 +(1 row) + +\set ON_ERROR_STOP 0 +\set VERBOSITY default +SELECT drop_chunks(:'drop_chunks_mat_tablen', older_than => 60); +psql:include/cagg_ddl_common.sql:471: ERROR: operation not supported on materialized hypertable +DETAIL: Hypertable "_materialized_hypertable_11" is a materialized hypertable. +HINT: Try the operation on the continuous aggregate instead. +\set VERBOSITY terse +\set ON_ERROR_STOP 1 +----------------------------------------------------------------- +-- Test that refresh_continuous_aggregate on chunk will refresh, +-- but only in the regions covered by the show chunks. +----------------------------------------------------------------- +SELECT chunk_name, range_start_integer, range_end_integer +FROM timescaledb_information.chunks +WHERE hypertable_name = 'drop_chunks_table' +ORDER BY 2,3; + chunk_name | range_start_integer | range_end_integer +--------------------+---------------------+------------------- + _hyper_10_13_chunk | 0 | 10 + _hyper_10_14_chunk | 10 | 20 + _hyper_10_15_chunk | 20 | 30 + _hyper_10_16_chunk | 30 | 40 + _hyper_10_18_chunk | 40 | 50 + _hyper_10_19_chunk | 50 | 60 +(6 rows) + +-- Pick the second chunk as the one to drop +WITH numbered_chunks AS ( + SELECT row_number() OVER (ORDER BY range_start_integer), chunk_schema, chunk_name, range_start_integer, range_end_integer + FROM timescaledb_information.chunks + WHERE hypertable_name = 'drop_chunks_table' + ORDER BY 1 +) +SELECT format('%I.%I', chunk_schema, chunk_name) AS chunk_to_drop, range_start_integer, range_end_integer +FROM numbered_chunks +WHERE row_number = 2 \gset +-- There's data in the table for the chunk/range we will drop +SELECT * FROM drop_chunks_table +WHERE time >= :range_start_integer +AND time < :range_end_integer +ORDER BY 1; + time | data +------+------ + 10 | 10 + 11 | 11 + 12 | 12 + 13 | 13 + 14 | 14 + 15 | 15 + 16 | 16 + 17 | 17 + 18 | 18 + 19 | 19 +(10 rows) + +-- Make sure there is also data in the continuous aggregate +-- CARE: +-- Note that this behaviour of dropping the materialization table chunks and expecting a refresh +-- that overlaps that time range to NOT update those chunks is undefined. Since CAGGs over +-- distributed hypertables merge the invalidations the refresh region is updated in the distributed +-- case, which may be different than what happens in the normal hypertable case. The command was: +-- SELECT drop_chunks('drop_chunks_view', newer_than => -20, verbose => true); +CALL refresh_continuous_aggregate('drop_chunks_view', 0, 50); +SELECT * FROM drop_chunks_view +ORDER BY 1; + time_bucket | max +-------------+----- + 0 | 4 + 5 | 9 + 10 | 14 + 15 | 19 + 20 | 20 + 45 | 500 + 50 | 500 +(7 rows) + +-- Drop the second chunk, to leave a gap in the data +\if :IS_DISTRIBUTED +CALL distributed_exec(format('DROP TABLE IF EXISTS %s', :'chunk_to_drop')); +DROP FOREIGN TABLE :chunk_to_drop; +\else +DROP TABLE :chunk_to_drop; +\endif +-- Verify that the second chunk is dropped +SELECT chunk_name, range_start_integer, range_end_integer +FROM timescaledb_information.chunks +WHERE hypertable_name = 'drop_chunks_table' +ORDER BY 2,3; + chunk_name | range_start_integer | range_end_integer +--------------------+---------------------+------------------- + _hyper_10_13_chunk | 0 | 10 + _hyper_10_15_chunk | 20 | 30 + _hyper_10_16_chunk | 30 | 40 + _hyper_10_18_chunk | 40 | 50 + _hyper_10_19_chunk | 50 | 60 +(5 rows) + +-- Data is no longer in the table but still in the view +SELECT * FROM drop_chunks_table +WHERE time >= :range_start_integer +AND time < :range_end_integer +ORDER BY 1; + time | data +------+------ +(0 rows) + +SELECT * FROM drop_chunks_view +WHERE time_bucket >= :range_start_integer +AND time_bucket < :range_end_integer +ORDER BY 1; + time_bucket | max +-------------+----- + 10 | 14 + 15 | 19 +(2 rows) + +-- Insert a large value in one of the chunks that will be dropped +INSERT INTO drop_chunks_table VALUES (:range_start_integer-1, 100); +-- Now refresh and drop the two adjecent chunks +CALL refresh_continuous_aggregate('drop_chunks_view', NULL, 30); +SELECT drop_chunks('drop_chunks_table', older_than=>30); + drop_chunks +------------------------------------------ + _timescaledb_internal._hyper_10_13_chunk + _timescaledb_internal._hyper_10_15_chunk +(2 rows) + +-- Verify that the chunks are dropped +SELECT chunk_name, range_start_integer, range_end_integer +FROM timescaledb_information.chunks +WHERE hypertable_name = 'drop_chunks_table' +ORDER BY 2,3; + chunk_name | range_start_integer | range_end_integer +--------------------+---------------------+------------------- + _hyper_10_16_chunk | 30 | 40 + _hyper_10_18_chunk | 40 | 50 + _hyper_10_19_chunk | 50 | 60 +(3 rows) + +-- The continuous aggregate should be refreshed in the regions covered +-- by the dropped chunks, but not in the "gap" region, i.e., the +-- region of the chunk that was dropped via DROP TABLE. +SELECT * FROM drop_chunks_view +ORDER BY 1; + time_bucket | max +-------------+----- + 0 | 4 + 5 | 100 + 20 | 20 + 45 | 500 + 50 | 500 +(5 rows) + +-- Now refresh in the region of the first two dropped chunks +CALL refresh_continuous_aggregate('drop_chunks_view', 0, :range_end_integer); +-- Aggregate data in the refreshed range should no longer exist since +-- the underlying data was dropped. +SELECT * FROM drop_chunks_view +ORDER BY 1; + time_bucket | max +-------------+----- + 20 | 20 + 45 | 500 + 50 | 500 +(3 rows) + +-------------------------------------------------------------------- +-- Check that we can create a materialized table in a tablespace. We +-- create one with tablespace and one without and compare them. +CREATE VIEW cagg_info AS +WITH + caggs AS ( + SELECT format('%I.%I', user_view_schema, user_view_name)::regclass AS user_view, + format('%I.%I', direct_view_schema, direct_view_name)::regclass AS direct_view, + format('%I.%I', partial_view_schema, partial_view_name)::regclass AS partial_view, + format('%I.%I', ht.schema_name, ht.table_name)::regclass AS mat_relid + FROM _timescaledb_catalog.hypertable ht, + _timescaledb_catalog.continuous_agg cagg + WHERE ht.id = cagg.mat_hypertable_id + ) +SELECT user_view, + pg_get_userbyid(relowner) AS user_view_owner, + relname AS mat_table, + (SELECT pg_get_userbyid(relowner) FROM pg_class WHERE oid = mat_relid) AS mat_table_owner, + direct_view, + (SELECT pg_get_userbyid(relowner) FROM pg_class WHERE oid = direct_view) AS direct_view_owner, + partial_view, + (SELECT pg_get_userbyid(relowner) FROM pg_class WHERE oid = partial_view) AS partial_view_owner, + (SELECT spcname FROM pg_tablespace WHERE oid = reltablespace) AS tablespace + FROM pg_class JOIN caggs ON pg_class.oid = caggs.mat_relid; +GRANT SELECT ON cagg_info TO PUBLIC; +CREATE VIEW chunk_info AS +SELECT ht.schema_name, ht.table_name, relname AS chunk_name, + (SELECT spcname FROM pg_tablespace WHERE oid = reltablespace) AS tablespace + FROM pg_class c, + _timescaledb_catalog.hypertable ht, + _timescaledb_catalog.chunk ch + WHERE ch.table_name = c.relname AND ht.id = ch.hypertable_id; +CREATE TABLE whatever(time BIGINT NOT NULL, data INTEGER); +\if :IS_DISTRIBUTED +SELECT hypertable_id AS whatever_nid + FROM create_distributed_hypertable('whatever', 'time', chunk_time_interval => 10, replication_factor => 2) +\gset +\else +SELECT hypertable_id AS whatever_nid + FROM create_hypertable('whatever', 'time', chunk_time_interval => 10) +\gset +\endif +SELECT set_integer_now_func('whatever', 'integer_now_test'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE MATERIALIZED VIEW whatever_view_1 +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT time_bucket('5', time), COUNT(data) + FROM whatever GROUP BY 1 WITH NO DATA; +CREATE MATERIALIZED VIEW whatever_view_2 +WITH (timescaledb.continuous, timescaledb.materialized_only=true) +TABLESPACE tablespace1 AS +SELECT time_bucket('5', time), COUNT(data) + FROM whatever GROUP BY 1 WITH NO DATA; +INSERT INTO whatever SELECT i, i FROM generate_series(0, 29) AS i; +CALL refresh_continuous_aggregate('whatever_view_1', NULL, NULL); +CALL refresh_continuous_aggregate('whatever_view_2', NULL, NULL); +SELECT user_view, + mat_table, + cagg_info.tablespace AS mat_tablespace, + chunk_name, + chunk_info.tablespace AS chunk_tablespace + FROM cagg_info, chunk_info + WHERE mat_table::text = table_name + AND user_view::text LIKE 'whatever_view%'; + user_view | mat_table | mat_tablespace | chunk_name | chunk_tablespace +-----------------+-----------------------------+----------------+--------------------+------------------ + whatever_view_1 | _materialized_hypertable_13 | | _hyper_13_24_chunk | + whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_25_chunk | tablespace1 +(2 rows) + +ALTER MATERIALIZED VIEW whatever_view_1 SET TABLESPACE tablespace2; +SELECT user_view, + mat_table, + cagg_info.tablespace AS mat_tablespace, + chunk_name, + chunk_info.tablespace AS chunk_tablespace + FROM cagg_info, chunk_info + WHERE mat_table::text = table_name + AND user_view::text LIKE 'whatever_view%'; + user_view | mat_table | mat_tablespace | chunk_name | chunk_tablespace +-----------------+-----------------------------+----------------+--------------------+------------------ + whatever_view_1 | _materialized_hypertable_13 | tablespace2 | _hyper_13_24_chunk | tablespace2 + whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_25_chunk | tablespace1 +(2 rows) + +DROP MATERIALIZED VIEW whatever_view_1; +psql:include/cagg_ddl_common.sql:649: NOTICE: drop cascades to table _timescaledb_internal._hyper_13_24_chunk +DROP MATERIALIZED VIEW whatever_view_2; +psql:include/cagg_ddl_common.sql:650: NOTICE: drop cascades to table _timescaledb_internal._hyper_14_25_chunk +-- test bucket width expressions on integer hypertables +CREATE TABLE metrics_int2 ( + time int2 NOT NULL, + device_id int, + v1 float, + v2 float +); +CREATE TABLE metrics_int4 ( + time int4 NOT NULL, + device_id int, + v1 float, + v2 float +); +CREATE TABLE metrics_int8 ( + time int8 NOT NULL, + device_id int, + v1 float, + v2 float +); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable (('metrics_' || dt)::regclass, 'time', chunk_time_interval => 10, replication_factor => 2) +FROM ( + VALUES ('int2'), + ('int4'), + ('int8')) v (dt); +\else +SELECT create_hypertable (('metrics_' || dt)::regclass, 'time', chunk_time_interval => 10) +FROM ( + VALUES ('int2'), + ('int4'), + ('int8')) v (dt); + create_hypertable +---------------------------- + (15,public,metrics_int2,t) + (16,public,metrics_int4,t) + (17,public,metrics_int8,t) +(3 rows) + +\endif +CREATE OR REPLACE FUNCTION int2_now () + RETURNS int2 + LANGUAGE SQL + STABLE + AS $$ + SELECT 10::int2 +$$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ +CREATE OR REPLACE FUNCTION int2_now () + RETURNS int2 + LANGUAGE SQL + STABLE + AS $$ + SELECT 10::int2 +$$; +$DIST$); +\endif +CREATE OR REPLACE FUNCTION int4_now () + RETURNS int4 + LANGUAGE SQL + STABLE + AS $$ + SELECT 10::int4 +$$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ +CREATE OR REPLACE FUNCTION int4_now () + RETURNS int4 + LANGUAGE SQL + STABLE + AS $$ + SELECT 10::int4 +$$; +$DIST$); +\endif +CREATE OR REPLACE FUNCTION int8_now () + RETURNS int8 + LANGUAGE SQL + STABLE + AS $$ + SELECT 10::int8 +$$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ +CREATE OR REPLACE FUNCTION int8_now () + RETURNS int8 + LANGUAGE SQL + STABLE + AS $$ + SELECT 10::int8 +$$; +$DIST$); +\endif +SELECT set_integer_now_func (('metrics_' || dt)::regclass, (dt || '_now')::regproc) +FROM ( + VALUES ('int2'), + ('int4'), + ('int8')) v (dt); + set_integer_now_func +---------------------- + + + +(3 rows) + +-- width expression for int2 hypertables +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(1::smallint, time) +FROM metrics_int2 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:755: NOTICE: continuous aggregate "width_expr" is already up-to-date +DROP MATERIALIZED VIEW width_expr; +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(1::smallint + 2::smallint, time) +FROM metrics_int2 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:762: NOTICE: continuous aggregate "width_expr" is already up-to-date +DROP MATERIALIZED VIEW width_expr; +-- width expression for int4 hypertables +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(1, time) +FROM metrics_int4 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:770: NOTICE: continuous aggregate "width_expr" is already up-to-date +DROP MATERIALIZED VIEW width_expr; +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(1 + 2, time) +FROM metrics_int4 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:777: NOTICE: continuous aggregate "width_expr" is already up-to-date +DROP MATERIALIZED VIEW width_expr; +-- width expression for int8 hypertables +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(1, time) +FROM metrics_int8 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:785: NOTICE: continuous aggregate "width_expr" is already up-to-date +DROP MATERIALIZED VIEW width_expr; +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(1 + 2, time) +FROM metrics_int8 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:792: NOTICE: continuous aggregate "width_expr" is already up-to-date +DROP MATERIALIZED VIEW width_expr; +\set ON_ERROR_STOP 0 +-- non-immutable expresions should be rejected +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(extract(year FROM now())::smallint, time) +FROM metrics_int2 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:801: ERROR: only immutable expressions allowed in time bucket function +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(extract(year FROM now())::int, time) +FROM metrics_int4 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:806: ERROR: only immutable expressions allowed in time bucket function +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(extract(year FROM now())::int, time) +FROM metrics_int8 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:811: ERROR: only immutable expressions allowed in time bucket function +\set ON_ERROR_STOP 1 +-- Test various ALTER MATERIALIZED VIEW statements. +SET ROLE :ROLE_DEFAULT_PERM_USER; +CREATE MATERIALIZED VIEW owner_check WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(1 + 2, time) +FROM metrics_int8 +GROUP BY 1 +WITH NO DATA; +\x on +SELECT * FROM cagg_info WHERE user_view::text = 'owner_check'; +-[ RECORD 1 ]------+--------------------------------------- +user_view | owner_check +user_view_owner | default_perm_user +mat_table | _materialized_hypertable_24 +mat_table_owner | default_perm_user +direct_view | _timescaledb_internal._direct_view_24 +direct_view_owner | default_perm_user +partial_view | _timescaledb_internal._partial_view_24 +partial_view_owner | default_perm_user +tablespace | + +\x off +-- This should not work since the target user has the wrong role, but +-- we test that the normal checks are done when changing the owner. +\set ON_ERROR_STOP 0 +ALTER MATERIALIZED VIEW owner_check OWNER TO :ROLE_1; +psql:include/cagg_ddl_common.sql:831: ERROR: must be able to SET ROLE "test_role_1" +\set ON_ERROR_STOP 1 +-- Superuser can always change owner +SET ROLE :ROLE_CLUSTER_SUPERUSER; +ALTER MATERIALIZED VIEW owner_check OWNER TO :ROLE_1; +\x on +SELECT * FROM cagg_info WHERE user_view::text = 'owner_check'; +-[ RECORD 1 ]------+--------------------------------------- +user_view | owner_check +user_view_owner | test_role_1 +mat_table | _materialized_hypertable_24 +mat_table_owner | test_role_1 +direct_view | _timescaledb_internal._direct_view_24 +direct_view_owner | test_role_1 +partial_view | _timescaledb_internal._partial_view_24 +partial_view_owner | test_role_1 +tablespace | + +\x off +-- +-- Test drop continuous aggregate cases +-- +-- Issue: #2608 +-- +CREATE OR REPLACE FUNCTION test_int_now() + RETURNS INT LANGUAGE SQL STABLE AS +$BODY$ + SELECT 50; +$BODY$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ + CREATE OR REPLACE FUNCTION test_int_now() + RETURNS INT LANGUAGE SQL STABLE AS + $BODY$ + SELECT 50; + $BODY$; +$DIST$); +\endif +CREATE TABLE conditionsnm(time_int INT NOT NULL, device INT, value FLOAT); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('conditionsnm', 'time_int', chunk_time_interval => 10, replication_factor => 2); +\else +SELECT create_hypertable('conditionsnm', 'time_int', chunk_time_interval => 10); + create_hypertable +---------------------------- + (25,public,conditionsnm,t) +(1 row) + +\endif +SELECT set_integer_now_func('conditionsnm', 'test_int_now'); + set_integer_now_func +---------------------- + +(1 row) + +INSERT INTO conditionsnm +SELECT time_val, time_val % 4, 3.14 FROM generate_series(0,100,1) AS time_val; +-- Case 1: DROP +CREATE MATERIALIZED VIEW conditionsnm_4 +WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) +AS +SELECT time_bucket(7, time_int) as bucket, +SUM(value), COUNT(value) +FROM conditionsnm GROUP BY bucket WITH DATA; +psql:include/cagg_ddl_common.sql:879: NOTICE: refreshing continuous aggregate "conditionsnm_4" +DROP materialized view conditionsnm_4; +psql:include/cagg_ddl_common.sql:881: NOTICE: drop cascades to table _timescaledb_internal._hyper_26_37_chunk +-- Case 2: DROP CASCADE should have similar behaviour as DROP +CREATE MATERIALIZED VIEW conditionsnm_4 +WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) +AS +SELECT time_bucket(7, time_int) as bucket, +SUM(value), COUNT(value) +FROM conditionsnm GROUP BY bucket WITH DATA; +psql:include/cagg_ddl_common.sql:889: NOTICE: refreshing continuous aggregate "conditionsnm_4" +DROP materialized view conditionsnm_4 CASCADE; +psql:include/cagg_ddl_common.sql:891: NOTICE: drop cascades to table _timescaledb_internal._hyper_27_38_chunk +-- Case 3: require CASCADE in case of dependent object +CREATE MATERIALIZED VIEW conditionsnm_4 +WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) +AS +SELECT time_bucket(7, time_int) as bucket, +SUM(value), COUNT(value) +FROM conditionsnm GROUP BY bucket WITH DATA; +psql:include/cagg_ddl_common.sql:899: NOTICE: refreshing continuous aggregate "conditionsnm_4" +CREATE VIEW see_cagg as select * from conditionsnm_4; +\set ON_ERROR_STOP 0 +DROP MATERIALIZED VIEW conditionsnm_4; +psql:include/cagg_ddl_common.sql:903: ERROR: cannot drop view conditionsnm_4 because other objects depend on it +\set ON_ERROR_STOP 1 +-- Case 4: DROP CASCADE with dependency +DROP MATERIALIZED VIEW conditionsnm_4 CASCADE; +psql:include/cagg_ddl_common.sql:907: NOTICE: drop cascades to view see_cagg +psql:include/cagg_ddl_common.sql:907: NOTICE: drop cascades to table _timescaledb_internal._hyper_28_39_chunk +-- Test DROP SCHEMA CASCADE with continuous aggregates +-- +-- Issue: #2350 +-- +-- Case 1: DROP SCHEMA CASCADE +CREATE SCHEMA test_schema; +CREATE TABLE test_schema.telemetry_raw ( + ts TIMESTAMP WITH TIME ZONE NOT NULL, + value DOUBLE PRECISION +); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('test_schema.telemetry_raw', 'ts', replication_factor => 2); +\else +SELECT create_hypertable('test_schema.telemetry_raw', 'ts'); + create_hypertable +---------------------------------- + (29,test_schema,telemetry_raw,t) +(1 row) + +\endif +CREATE MATERIALIZED VIEW test_schema.telemetry_1s + WITH (timescaledb.continuous, timescaledb.materialized_only=false) + AS +SELECT time_bucket(INTERVAL '1s', ts) AS ts_1s, + avg(value) + FROM test_schema.telemetry_raw + GROUP BY ts_1s WITH NO DATA; +SELECT ca.raw_hypertable_id, + h.schema_name, + h.table_name AS "MAT_TABLE_NAME", + partial_view_name as "PART_VIEW_NAME", + partial_view_schema +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'telemetry_1s'; + raw_hypertable_id | schema_name | MAT_TABLE_NAME | PART_VIEW_NAME | partial_view_schema +-------------------+-----------------------+-----------------------------+------------------+----------------------- + 29 | _timescaledb_internal | _materialized_hypertable_30 | _partial_view_30 | _timescaledb_internal +(1 row) + +\gset +DROP SCHEMA test_schema CASCADE; +psql:include/cagg_ddl_common.sql:946: NOTICE: drop cascades to 4 other objects +SELECT count(*) FROM pg_class WHERE relname = :'MAT_TABLE_NAME'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_class WHERE relname = :'PART_VIEW_NAME'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_class WHERE relname = 'telemetry_1s'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_namespace WHERE nspname = 'test_schema'; + count +------- + 0 +(1 row) + +-- Case 2: DROP SCHEMA CASCADE with multiple caggs +CREATE SCHEMA test_schema; +CREATE TABLE test_schema.telemetry_raw ( + ts TIMESTAMP WITH TIME ZONE NOT NULL, + value DOUBLE PRECISION +); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('test_schema.telemetry_raw', 'ts', replication_factor => 2); +\else +SELECT create_hypertable('test_schema.telemetry_raw', 'ts'); + create_hypertable +---------------------------------- + (31,test_schema,telemetry_raw,t) +(1 row) + +\endif +CREATE MATERIALIZED VIEW test_schema.cagg1 + WITH (timescaledb.continuous, timescaledb.materialized_only=false) + AS +SELECT time_bucket(INTERVAL '1s', ts) AS ts_1s, + avg(value) + FROM test_schema.telemetry_raw + GROUP BY ts_1s WITH NO DATA; +CREATE MATERIALIZED VIEW test_schema.cagg2 + WITH (timescaledb.continuous, timescaledb.materialized_only=false) + AS +SELECT time_bucket(INTERVAL '1s', ts) AS ts_1s, + avg(value) + FROM test_schema.telemetry_raw + GROUP BY ts_1s WITH NO DATA; +SELECT ca.raw_hypertable_id, + h.schema_name, + h.table_name AS "MAT_TABLE_NAME1", + partial_view_name as "PART_VIEW_NAME1", + partial_view_schema +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'cagg1'; + raw_hypertable_id | schema_name | MAT_TABLE_NAME1 | PART_VIEW_NAME1 | partial_view_schema +-------------------+-----------------------+-----------------------------+------------------+----------------------- + 31 | _timescaledb_internal | _materialized_hypertable_32 | _partial_view_32 | _timescaledb_internal +(1 row) + +\gset +SELECT ca.raw_hypertable_id, + h.schema_name, + h.table_name AS "MAT_TABLE_NAME2", + partial_view_name as "PART_VIEW_NAME2", + partial_view_schema +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'cagg2'; + raw_hypertable_id | schema_name | MAT_TABLE_NAME2 | PART_VIEW_NAME2 | partial_view_schema +-------------------+-----------------------+-----------------------------+------------------+----------------------- + 31 | _timescaledb_internal | _materialized_hypertable_33 | _partial_view_33 | _timescaledb_internal +(1 row) + +\gset +DROP SCHEMA test_schema CASCADE; +psql:include/cagg_ddl_common.sql:1003: NOTICE: drop cascades to 7 other objects +SELECT count(*) FROM pg_class WHERE relname = :'MAT_TABLE_NAME1'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_class WHERE relname = :'PART_VIEW_NAME1'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_class WHERE relname = 'cagg1'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_class WHERE relname = :'MAT_TABLE_NAME2'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_class WHERE relname = :'PART_VIEW_NAME2'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_class WHERE relname = 'cagg2'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_namespace WHERE nspname = 'test_schema'; + count +------- + 0 +(1 row) + +DROP TABLESPACE tablespace1; +DROP TABLESPACE tablespace2; +-- Check that we can rename a column of a materialized view and still +-- rebuild it after (#3051, #3405) +CREATE TABLE conditions ( + time TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL +); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('conditions', 'time', replication_factor => 2); +\else +SELECT create_hypertable('conditions', 'time'); + create_hypertable +-------------------------- + (34,public,conditions,t) +(1 row) + +\endif +INSERT INTO conditions VALUES ( '2018-01-01 09:20:00-08', 'SFO', 55); +INSERT INTO conditions VALUES ( '2018-01-02 09:30:00-08', 'por', 100); +INSERT INTO conditions VALUES ( '2018-01-02 09:20:00-08', 'SFO', 65); +INSERT INTO conditions VALUES ( '2018-01-02 09:10:00-08', 'NYC', 65); +INSERT INTO conditions VALUES ( '2018-11-01 09:20:00-08', 'NYC', 45); +INSERT INTO conditions VALUES ( '2018-11-01 10:40:00-08', 'NYC', 55); +INSERT INTO conditions VALUES ( '2018-11-01 11:50:00-08', 'NYC', 65); +INSERT INTO conditions VALUES ( '2018-11-01 12:10:00-08', 'NYC', 75); +INSERT INTO conditions VALUES ( '2018-11-01 13:10:00-08', 'NYC', 85); +INSERT INTO conditions VALUES ( '2018-11-02 09:20:00-08', 'NYC', 10); +INSERT INTO conditions VALUES ( '2018-11-02 10:30:00-08', 'NYC', 20); +CREATE MATERIALIZED VIEW conditions_daily +WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS +SELECT location, + time_bucket(INTERVAL '1 day', time) AS bucket, + AVG(temperature) + FROM conditions +GROUP BY location, bucket +WITH NO DATA; +SELECT format('%I.%I', '_timescaledb_internal', h.table_name) AS "MAT_TABLE_NAME", + format('%I.%I', '_timescaledb_internal', partial_view_name) AS "PART_VIEW_NAME", + format('%I.%I', '_timescaledb_internal', direct_view_name) AS "DIRECT_VIEW_NAME" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'conditions_daily' +\gset +-- Show both the columns and the view definitions to see that +-- references are correct in the view as well. +SELECT * FROM test.show_columns('conditions_daily'); + Column | Type | NotNull +----------+--------------------------+--------- + location | text | f + bucket | timestamp with time zone | f + avg | double precision | f +(3 rows) + +SELECT * FROM test.show_columns(:'DIRECT_VIEW_NAME'); + Column | Type | NotNull +----------+--------------------------+--------- + location | text | f + bucket | timestamp with time zone | f + avg | double precision | f +(3 rows) + +SELECT * FROM test.show_columns(:'PART_VIEW_NAME'); + Column | Type | NotNull +----------+--------------------------+--------- + location | text | f + bucket | timestamp with time zone | f + avg | double precision | f +(3 rows) + +SELECT * FROM test.show_columns(:'MAT_TABLE_NAME'); + Column | Type | NotNull +----------+--------------------------+--------- + location | text | f + bucket | timestamp with time zone | t + avg | double precision | f +(3 rows) + +ALTER MATERIALIZED VIEW conditions_daily RENAME COLUMN bucket to "time"; +-- Show both the columns and the view definitions to see that +-- references are correct in the view as well. +SELECT * FROM test.show_columns(' conditions_daily'); + Column | Type | NotNull +----------+--------------------------+--------- + location | text | f + time | timestamp with time zone | f + avg | double precision | f +(3 rows) + +SELECT * FROM test.show_columns(:'DIRECT_VIEW_NAME'); + Column | Type | NotNull +----------+--------------------------+--------- + location | text | f + time | timestamp with time zone | f + avg | double precision | f +(3 rows) + +SELECT * FROM test.show_columns(:'PART_VIEW_NAME'); + Column | Type | NotNull +----------+--------------------------+--------- + location | text | f + time | timestamp with time zone | f + avg | double precision | f +(3 rows) + +SELECT * FROM test.show_columns(:'MAT_TABLE_NAME'); + Column | Type | NotNull +----------+--------------------------+--------- + location | text | f + time | timestamp with time zone | t + avg | double precision | f +(3 rows) + +-- This will rebuild the materialized view and should succeed. +ALTER MATERIALIZED VIEW conditions_daily SET (timescaledb.materialized_only = false); +-- Refresh the continuous aggregate to check that it works after the +-- rename. +\set VERBOSITY verbose +CALL refresh_continuous_aggregate('conditions_daily', NULL, NULL); +\set VERBOSITY terse +-- +-- Indexes on continuous aggregate +-- +\set ON_ERROR_STOP 0 +-- unique indexes are not supported +CREATE UNIQUE INDEX index_unique_error ON conditions_daily ("time", location); +psql:include/cagg_ddl_common.sql:1089: ERROR: continuous aggregates do not support UNIQUE indexes +-- concurrently index creation not supported +CREATE INDEX CONCURRENTLY index_concurrently_avg ON conditions_daily (avg); +psql:include/cagg_ddl_common.sql:1091: ERROR: hypertables do not support concurrent index creation +\set ON_ERROR_STOP 1 +CREATE INDEX index_avg ON conditions_daily (avg); +CREATE INDEX index_avg_only ON ONLY conditions_daily (avg); +CREATE INDEX index_avg_include ON conditions_daily (avg) INCLUDE (location); +CREATE INDEX index_avg_expr ON conditions_daily ((avg + 1)); +CREATE INDEX index_avg_location_sfo ON conditions_daily (avg) WHERE location = 'SFO'; +CREATE INDEX index_avg_expr_location_sfo ON conditions_daily ((avg + 2)) WHERE location = 'SFO'; +SELECT * FROM test.show_indexespred(:'MAT_TABLE_NAME'); + Index | Columns | Expr | Pred | Unique | Primary | Exclusion | Tablespace +-----------------------------------------------------------------------+-------------------+---------------------------+------------------------+--------+---------+-----------+------------ + _timescaledb_internal._materialized_hypertable_35_bucket_idx | {bucket} | | | f | f | f | + _timescaledb_internal._materialized_hypertable_35_location_bucket_idx | {location,bucket} | | | f | f | f | + _timescaledb_internal.index_avg | {avg} | | | f | f | f | + _timescaledb_internal.index_avg_expr | {expr} | avg + 1::double precision | | f | f | f | + _timescaledb_internal.index_avg_expr_location_sfo | {expr} | avg + 2::double precision | location = 'SFO'::text | f | f | f | + _timescaledb_internal.index_avg_include | {avg,location} | | | f | f | f | + _timescaledb_internal.index_avg_location_sfo | {avg} | | location = 'SFO'::text | f | f | f | + _timescaledb_internal.index_avg_only | {avg} | | | f | f | f | +(8 rows) + +-- #3696 assertion failure when referencing columns not present in result +CREATE TABLE i3696(time timestamptz NOT NULL, search_query text, cnt integer, cnt2 integer); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('i3696', 'time', replication_factor => 2); +\else +SELECT table_name FROM create_hypertable('i3696','time'); + table_name +------------ + i3696 +(1 row) + +\endif +CREATE MATERIALIZED VIEW i3696_cagg1 WITH (timescaledb.continuous, timescaledb.materialized_only=false) +AS + SELECT search_query,count(search_query) as count, sum(cnt), time_bucket(INTERVAL '1 minute', time) AS bucket + FROM i3696 GROUP BY cnt +cnt2 , bucket, search_query; +psql:include/cagg_ddl_common.sql:1113: NOTICE: continuous aggregate "i3696_cagg1" is already up-to-date +ALTER MATERIALIZED VIEW i3696_cagg1 SET (timescaledb.materialized_only = 'true'); +CREATE MATERIALIZED VIEW i3696_cagg2 WITH (timescaledb.continuous, timescaledb.materialized_only=false) +AS + SELECT search_query,count(search_query) as count, sum(cnt), time_bucket(INTERVAL '1 minute', time) AS bucket + FROM i3696 GROUP BY cnt + cnt2, bucket, search_query + HAVING cnt + cnt2 + sum(cnt) > 2 or count(cnt2) > 10; +psql:include/cagg_ddl_common.sql:1121: NOTICE: continuous aggregate "i3696_cagg2" is already up-to-date +ALTER MATERIALIZED VIEW i3696_cagg2 SET (timescaledb.materialized_only = 'true'); +--TEST test with multiple settings on continuous aggregates -- +-- test for materialized_only + compress combinations (real time aggs enabled initially) +CREATE TABLE test_setting(time timestamptz not null, val numeric); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('test_setting', 'time', replication_factor => 2); +\else +SELECT create_hypertable('test_setting', 'time'); + create_hypertable +---------------------------- + (39,public,test_setting,t) +(1 row) + +\endif +CREATE MATERIALIZED VIEW test_setting_cagg with (timescaledb.continuous, timescaledb.materialized_only=false) +AS SELECT time_bucket('1h',time), avg(val), count(*) FROM test_setting GROUP BY 1; +psql:include/cagg_ddl_common.sql:1135: NOTICE: continuous aggregate "test_setting_cagg" is already up-to-date +INSERT INTO test_setting +SELECT generate_series( '2020-01-10 8:00'::timestamp, '2020-01-30 10:00+00'::timestamptz, '1 day'::interval), 10.0; +CALL refresh_continuous_aggregate('test_setting_cagg', NULL, '2020-05-30 10:00+00'::timestamptz); +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 20 +(1 row) + +--this row is not in the materialized result --- +INSERT INTO test_setting VALUES( '2020-11-01', 20); +--try out 2 settings here -- +ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='true'); +psql:include/cagg_ddl_common.sql:1146: NOTICE: defaulting compress_orderby to time_bucket +SELECT view_name, compression_enabled, materialized_only +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_setting_cagg'; + view_name | compression_enabled | materialized_only +-------------------+---------------------+------------------- + test_setting_cagg | t | t +(1 row) + +--real time aggs is off now , should return 20 -- +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 20 +(1 row) + +--now set it back to false -- +ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='true'); +psql:include/cagg_ddl_common.sql:1154: NOTICE: defaulting compress_orderby to time_bucket +SELECT view_name, compression_enabled, materialized_only +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_setting_cagg'; + view_name | compression_enabled | materialized_only +-------------------+---------------------+------------------- + test_setting_cagg | t | f +(1 row) + +--count should return additional data since we have real time aggs on +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 21 +(1 row) + +ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='false'); +SELECT view_name, compression_enabled, materialized_only +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_setting_cagg'; + view_name | compression_enabled | materialized_only +-------------------+---------------------+------------------- + test_setting_cagg | f | t +(1 row) + +--real time aggs is off now , should return 20 -- +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 20 +(1 row) + +ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='false'); +SELECT view_name, compression_enabled, materialized_only +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_setting_cagg'; + view_name | compression_enabled | materialized_only +-------------------+---------------------+------------------- + test_setting_cagg | f | f +(1 row) + +--count should return additional data since we have real time aggs on +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 21 +(1 row) + +DELETE FROM test_setting WHERE val = 20; +--TEST test with multiple settings on continuous aggregates with real time aggregates turned off initially -- +-- test for materialized_only + compress combinations (real time aggs enabled initially) +DROP MATERIALIZED VIEW test_setting_cagg; +psql:include/cagg_ddl_common.sql:1179: NOTICE: drop cascades to table _timescaledb_internal._hyper_40_47_chunk +CREATE MATERIALIZED VIEW test_setting_cagg with (timescaledb.continuous, timescaledb.materialized_only = true) +AS SELECT time_bucket('1h',time), avg(val), count(*) FROM test_setting GROUP BY 1; +psql:include/cagg_ddl_common.sql:1182: NOTICE: refreshing continuous aggregate "test_setting_cagg" +CALL refresh_continuous_aggregate('test_setting_cagg', NULL, '2020-05-30 10:00+00'::timestamptz); +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 20 +(1 row) + +--this row is not in the materialized result --- +INSERT INTO test_setting VALUES( '2020-11-01', 20); +--try out 2 settings here -- +ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='true'); +psql:include/cagg_ddl_common.sql:1190: NOTICE: defaulting compress_orderby to time_bucket +SELECT view_name, compression_enabled, materialized_only +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_setting_cagg'; + view_name | compression_enabled | materialized_only +-------------------+---------------------+------------------- + test_setting_cagg | t | f +(1 row) + +--count should return additional data since we have real time aggs on +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 21 +(1 row) + +--now set it back to false -- +ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='true'); +psql:include/cagg_ddl_common.sql:1198: NOTICE: defaulting compress_orderby to time_bucket +SELECT view_name, compression_enabled, materialized_only +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_setting_cagg'; + view_name | compression_enabled | materialized_only +-------------------+---------------------+------------------- + test_setting_cagg | t | t +(1 row) + +--real time aggs is off now , should return 20 -- +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 20 +(1 row) + +ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='false'); +SELECT view_name, compression_enabled, materialized_only +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_setting_cagg'; + view_name | compression_enabled | materialized_only +-------------------+---------------------+------------------- + test_setting_cagg | f | f +(1 row) + +--count should return additional data since we have real time aggs on +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 21 +(1 row) + +ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='false'); +SELECT view_name, compression_enabled, materialized_only +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_setting_cagg'; + view_name | compression_enabled | materialized_only +-------------------+---------------------+------------------- + test_setting_cagg | f | t +(1 row) + +--real time aggs is off now , should return 20 -- +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 20 +(1 row) + +-- END TEST with multiple settings +-- Test View Target Entries that contain both aggrefs and Vars in the same expression +CREATE TABLE transactions +( + "time" timestamp with time zone NOT NULL, + dummy1 integer, + dummy2 integer, + dummy3 integer, + dummy4 integer, + dummy5 integer, + amount integer, + fiat_value integer +); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('transactions', 'time', replication_factor => 2); +\else +SELECT create_hypertable('transactions', 'time'); + create_hypertable +---------------------------- + (46,public,transactions,t) +(1 row) + +\endif +INSERT INTO transactions VALUES ( '2018-01-01 09:20:00-08', 0, 0, 0, 0, 0, 1, 10); +INSERT INTO transactions VALUES ( '2018-01-02 09:30:00-08', 0, 0, 0, 0, 0, -1, 10); +INSERT INTO transactions VALUES ( '2018-01-02 09:20:00-08', 0, 0, 0, 0, 0, -1, 10); +INSERT INTO transactions VALUES ( '2018-01-02 09:10:00-08', 0, 0, 0, 0, 0, -1, 10); +INSERT INTO transactions VALUES ( '2018-11-01 09:20:00-08', 0, 0, 0, 0, 0, 1, 10); +INSERT INTO transactions VALUES ( '2018-11-01 10:40:00-08', 0, 0, 0, 0, 0, 1, 10); +INSERT INTO transactions VALUES ( '2018-11-01 11:50:00-08', 0, 0, 0, 0, 0, 1, 10); +INSERT INTO transactions VALUES ( '2018-11-01 12:10:00-08', 0, 0, 0, 0, 0, -1, 10); +INSERT INTO transactions VALUES ( '2018-11-01 13:10:00-08', 0, 0, 0, 0, 0, -1, 10); +INSERT INTO transactions VALUES ( '2018-11-02 09:20:00-08', 0, 0, 0, 0, 0, 1, 10); +INSERT INTO transactions VALUES ( '2018-11-02 10:30:00-08', 0, 0, 0, 0, 0, -1, 10); +CREATE materialized view cashflows( + bucket, + amount, + cashflow, + cashflow2 +) WITH ( + timescaledb.continuous, + timescaledb.materialized_only = true +) AS +SELECT time_bucket ('1 day', time) AS bucket, + amount, + CASE + WHEN amount < 0 THEN (0 - sum(fiat_value)) + ELSE sum(fiat_value) + END AS cashflow, + amount + sum(fiat_value) +FROM transactions +GROUP BY bucket, amount; +psql:include/cagg_ddl_common.sql:1272: NOTICE: refreshing continuous aggregate "cashflows" +SELECT h.table_name AS "MAT_TABLE_NAME", + partial_view_name AS "PART_VIEW_NAME", + direct_view_name AS "DIRECT_VIEW_NAME" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'cashflows' +\gset +-- Show both the columns and the view definitions to see that +-- references are correct in the view as well. +\d+ "_timescaledb_internal".:"DIRECT_VIEW_NAME" + View "_timescaledb_internal._direct_view_47" + Column | Type | Collation | Nullable | Default | Storage | Description +-----------+--------------------------+-----------+----------+---------+---------+------------- + bucket | timestamp with time zone | | | | plain | + amount | integer | | | | plain | + cashflow | bigint | | | | plain | + cashflow2 | bigint | | | | plain | +View definition: + SELECT time_bucket('@ 1 day'::interval, "time") AS bucket, + amount, + CASE + WHEN amount < 0 THEN 0 - sum(fiat_value) + ELSE sum(fiat_value) + END AS cashflow, + amount + sum(fiat_value) AS cashflow2 + FROM transactions + GROUP BY (time_bucket('@ 1 day'::interval, "time")), amount; + +\d+ "_timescaledb_internal".:"PART_VIEW_NAME" + View "_timescaledb_internal._partial_view_47" + Column | Type | Collation | Nullable | Default | Storage | Description +-----------+--------------------------+-----------+----------+---------+---------+------------- + bucket | timestamp with time zone | | | | plain | + amount | integer | | | | plain | + cashflow | bigint | | | | plain | + cashflow2 | bigint | | | | plain | +View definition: + SELECT time_bucket('@ 1 day'::interval, "time") AS bucket, + amount, + CASE + WHEN amount < 0 THEN 0 - sum(fiat_value) + ELSE sum(fiat_value) + END AS cashflow, + amount + sum(fiat_value) AS cashflow2 + FROM transactions + GROUP BY (time_bucket('@ 1 day'::interval, "time")), amount; + +\d+ "_timescaledb_internal".:"MAT_TABLE_NAME" + Table "_timescaledb_internal._materialized_hypertable_47" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +-----------+--------------------------+-----------+----------+---------+---------+--------------+------------- + bucket | timestamp with time zone | | not null | | plain | | + amount | integer | | | | plain | | + cashflow | bigint | | | | plain | | + cashflow2 | bigint | | | | plain | | +Indexes: + "_materialized_hypertable_47_amount_bucket_idx" btree (amount, bucket DESC) + "_materialized_hypertable_47_bucket_idx" btree (bucket DESC) +Triggers: + ts_insert_blocker BEFORE INSERT ON _timescaledb_internal._materialized_hypertable_47 FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker() +Child tables: _timescaledb_internal._hyper_47_52_chunk, + _timescaledb_internal._hyper_47_53_chunk + +\d+ 'cashflows' + View "public.cashflows" + Column | Type | Collation | Nullable | Default | Storage | Description +-----------+--------------------------+-----------+----------+---------+---------+------------- + bucket | timestamp with time zone | | | | plain | + amount | integer | | | | plain | + cashflow | bigint | | | | plain | + cashflow2 | bigint | | | | plain | +View definition: + SELECT bucket, + amount, + cashflow, + cashflow2 + FROM _timescaledb_internal._materialized_hypertable_47; + +SELECT * FROM cashflows; + bucket | amount | cashflow | cashflow2 +------------------------------+--------+----------+----------- + Sun Dec 31 16:00:00 2017 PST | 1 | 10 | 11 + Mon Jan 01 16:00:00 2018 PST | -1 | -30 | 29 + Wed Oct 31 17:00:00 2018 PDT | -1 | -20 | 19 + Wed Oct 31 17:00:00 2018 PDT | 1 | 30 | 31 + Thu Nov 01 17:00:00 2018 PDT | -1 | -10 | 9 + Thu Nov 01 17:00:00 2018 PDT | 1 | 10 | 11 +(6 rows) + +-- test cagg creation with named arguments in time_bucket +-- note that positional arguments cannot follow named arguments +-- 1. test named origin +-- 2. test named timezone +-- 3. test named ts +-- 4. test named bucket width +-- named origin +CREATE MATERIALIZED VIEW cagg_named_origin WITH +(timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket('1h', time, 'UTC', origin => '2001-01-03 01:23:45') AS bucket, +avg(amount) as avg_amount +FROM transactions GROUP BY 1 WITH NO DATA; +-- named timezone +CREATE MATERIALIZED VIEW cagg_named_tz_origin WITH +(timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket('1h', time, timezone => 'UTC', origin => '2001-01-03 01:23:45') AS bucket, +avg(amount) as avg_amount +FROM transactions GROUP BY 1 WITH NO DATA; +-- named ts +CREATE MATERIALIZED VIEW cagg_named_ts_tz_origin WITH +(timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket('1h', ts => time, timezone => 'UTC', origin => '2001-01-03 01:23:45') AS bucket, +avg(amount) as avg_amount +FROM transactions GROUP BY 1 WITH NO DATA; +-- named bucket width +CREATE MATERIALIZED VIEW cagg_named_all WITH +(timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(bucket_width => '1h', ts => time, timezone => 'UTC', origin => '2001-01-03 01:23:45') AS bucket, +avg(amount) as avg_amount +FROM transactions GROUP BY 1 WITH NO DATA; +-- Refreshing from the beginning (NULL) of a CAGG with variable time bucket and +-- using an INTERVAL for the end timestamp (issue #5534) +CREATE MATERIALIZED VIEW transactions_montly +WITH (timescaledb.continuous, timescaledb.materialized_only = true) AS +SELECT time_bucket(INTERVAL '1 month', time) AS bucket, + SUM(fiat_value), + MAX(fiat_value), + MIN(fiat_value) + FROM transactions +GROUP BY 1 +WITH NO DATA; +-- No rows +SELECT * FROM transactions_montly ORDER BY bucket; + bucket | sum | max | min +--------+-----+-----+----- +(0 rows) + +-- Refresh from beginning of the CAGG for 1 month +CALL refresh_continuous_aggregate('transactions_montly', NULL, INTERVAL '1 month'); +SELECT * FROM transactions_montly ORDER BY bucket; + bucket | sum | max | min +------------------------------+-----+-----+----- + Sun Dec 31 16:00:00 2017 PST | 40 | 10 | 10 + Wed Oct 31 17:00:00 2018 PDT | 70 | 10 | 10 +(2 rows) + +TRUNCATE transactions_montly; +-- Partial refresh the CAGG from beginning to an specific timestamp +CALL refresh_continuous_aggregate('transactions_montly', NULL, '2018-11-01 11:50:00-08'::timestamptz); +SELECT * FROM transactions_montly ORDER BY bucket; + bucket | sum | max | min +------------------------------+-----+-----+----- + Sun Dec 31 16:00:00 2017 PST | 40 | 10 | 10 +(1 row) + +-- Full refresh the CAGG +CALL refresh_continuous_aggregate('transactions_montly', NULL, NULL); +SELECT * FROM transactions_montly ORDER BY bucket; + bucket | sum | max | min +------------------------------+-----+-----+----- + Sun Dec 31 16:00:00 2017 PST | 40 | 10 | 10 + Wed Oct 31 17:00:00 2018 PDT | 70 | 10 | 10 +(2 rows) + +-- Check set_chunk_time_interval on continuous aggregate +CREATE MATERIALIZED VIEW cagg_set_chunk_time_interval +WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(INTERVAL '1 month', time) AS bucket, + SUM(fiat_value), + MAX(fiat_value), + MIN(fiat_value) +FROM transactions +GROUP BY 1 +WITH NO DATA; +SELECT set_chunk_time_interval('cagg_set_chunk_time_interval', chunk_time_interval => interval '1 month'); + set_chunk_time_interval +------------------------- + +(1 row) + +CALL refresh_continuous_aggregate('cagg_set_chunk_time_interval', NULL, NULL); +SELECT _timescaledb_functions.to_interval(d.interval_length) = interval '1 month' +FROM _timescaledb_catalog.dimension d + RIGHT JOIN _timescaledb_catalog.continuous_agg ca ON ca.user_view_name = 'cagg_set_chunk_time_interval' +WHERE d.hypertable_id = ca.mat_hypertable_id; + ?column? +---------- + t +(1 row) + +-- Since #6077 CAggs are materialized only by default +DROP TABLE conditions CASCADE; +psql:include/cagg_ddl_common.sql:1370: NOTICE: drop cascades to 3 other objects +psql:include/cagg_ddl_common.sql:1370: NOTICE: drop cascades to 2 other objects +CREATE TABLE conditions ( + time TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL +); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('conditions', 'time', replication_factor => 2); +\else +SELECT create_hypertable('conditions', 'time'); + create_hypertable +-------------------------- + (54,public,conditions,t) +(1 row) + +\endif +INSERT INTO conditions VALUES ( '2018-01-01 09:20:00-08', 'SFO', 55); +INSERT INTO conditions VALUES ( '2018-01-02 09:30:00-08', 'por', 100); +INSERT INTO conditions VALUES ( '2018-01-02 09:20:00-08', 'SFO', 65); +INSERT INTO conditions VALUES ( '2018-01-02 09:10:00-08', 'NYC', 65); +INSERT INTO conditions VALUES ( '2018-11-01 09:20:00-08', 'NYC', 45); +INSERT INTO conditions VALUES ( '2018-11-01 10:40:00-08', 'NYC', 55); +INSERT INTO conditions VALUES ( '2018-11-01 11:50:00-08', 'NYC', 65); +INSERT INTO conditions VALUES ( '2018-11-01 12:10:00-08', 'NYC', 75); +INSERT INTO conditions VALUES ( '2018-11-01 13:10:00-08', 'NYC', 85); +INSERT INTO conditions VALUES ( '2018-11-02 09:20:00-08', 'NYC', 10); +INSERT INTO conditions VALUES ( '2018-11-02 10:30:00-08', 'NYC', 20); +CREATE MATERIALIZED VIEW conditions_daily +WITH (timescaledb.continuous) AS +SELECT location, + time_bucket(INTERVAL '1 day', time) AS bucket, + AVG(temperature) + FROM conditions +GROUP BY location, bucket +WITH NO DATA; +\d+ conditions_daily + View "public.conditions_daily" + Column | Type | Collation | Nullable | Default | Storage | Description +----------+--------------------------+-----------+----------+---------+----------+------------- + location | text | | | | extended | + bucket | timestamp with time zone | | | | plain | + avg | double precision | | | | plain | +View definition: + SELECT location, + bucket, + avg + FROM _timescaledb_internal._materialized_hypertable_55; + +-- Should return NO ROWS +SELECT * FROM conditions_daily ORDER BY bucket, avg; + location | bucket | avg +----------+--------+----- +(0 rows) + +ALTER MATERIALIZED VIEW conditions_daily SET (timescaledb.materialized_only=false); +\d+ conditions_daily + View "public.conditions_daily" + Column | Type | Collation | Nullable | Default | Storage | Description +----------+--------------------------+-----------+----------+---------+----------+------------- + location | text | | | | extended | + bucket | timestamp with time zone | | | | plain | + avg | double precision | | | | plain | +View definition: + SELECT _materialized_hypertable_55.location, + _materialized_hypertable_55.bucket, + _materialized_hypertable_55.avg + FROM _timescaledb_internal._materialized_hypertable_55 + WHERE _materialized_hypertable_55.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(55)), '-infinity'::timestamp with time zone) +UNION ALL + SELECT conditions.location, + time_bucket('@ 1 day'::interval, conditions."time") AS bucket, + avg(conditions.temperature) AS avg + FROM conditions + WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(55)), '-infinity'::timestamp with time zone) + GROUP BY conditions.location, (time_bucket('@ 1 day'::interval, conditions."time")); + +-- Should return ROWS because now it is realtime +SELECT * FROM conditions_daily ORDER BY bucket, avg; + location | bucket | avg +----------+------------------------------+----- + SFO | Sun Dec 31 16:00:00 2017 PST | 55 + SFO | Mon Jan 01 16:00:00 2018 PST | 65 + NYC | Mon Jan 01 16:00:00 2018 PST | 65 + por | Mon Jan 01 16:00:00 2018 PST | 100 + NYC | Wed Oct 31 17:00:00 2018 PDT | 65 + NYC | Thu Nov 01 17:00:00 2018 PDT | 15 +(6 rows) + +-- Should return ROWS because we refreshed it +ALTER MATERIALIZED VIEW conditions_daily SET (timescaledb.materialized_only=true); +\d+ conditions_daily + View "public.conditions_daily" + Column | Type | Collation | Nullable | Default | Storage | Description +----------+--------------------------+-----------+----------+---------+----------+------------- + location | text | | | | extended | + bucket | timestamp with time zone | | | | plain | + avg | double precision | | | | plain | +View definition: + SELECT location, + bucket, + avg + FROM _timescaledb_internal._materialized_hypertable_55; + +CALL refresh_continuous_aggregate('conditions_daily', NULL, NULL); +SELECT * FROM conditions_daily ORDER BY bucket, avg; + location | bucket | avg +----------+------------------------------+----- + SFO | Sun Dec 31 16:00:00 2017 PST | 55 + SFO | Mon Jan 01 16:00:00 2018 PST | 65 + NYC | Mon Jan 01 16:00:00 2018 PST | 65 + por | Mon Jan 01 16:00:00 2018 PST | 100 + NYC | Wed Oct 31 17:00:00 2018 PDT | 65 + NYC | Thu Nov 01 17:00:00 2018 PDT | 15 +(6 rows) + diff --git a/tsl/test/expected/cagg_ddl_dist_ht.out b/tsl/test/expected/cagg_ddl_dist_ht-13.out similarity index 100% rename from tsl/test/expected/cagg_ddl_dist_ht.out rename to tsl/test/expected/cagg_ddl_dist_ht-13.out diff --git a/tsl/test/expected/cagg_ddl_dist_ht-14.out b/tsl/test/expected/cagg_ddl_dist_ht-14.out new file mode 100644 index 00000000000..8c4e1394a03 --- /dev/null +++ b/tsl/test/expected/cagg_ddl_dist_ht-14.out @@ -0,0 +1,2207 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +------------------------------------ +-- Set up a distributed environment +------------------------------------ +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER +\set DATA_NODE_1 :TEST_DBNAME _1 +\set DATA_NODE_2 :TEST_DBNAME _2 +\set DATA_NODE_3 :TEST_DBNAME _3 +\ir include/remote_exec.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +CREATE SCHEMA IF NOT EXISTS test; +psql:include/remote_exec.sql:5: NOTICE: schema "test" already exists, skipping +GRANT USAGE ON SCHEMA test TO PUBLIC; +CREATE OR REPLACE FUNCTION test.remote_exec(srv_name name[], command text) +RETURNS VOID +AS :TSL_MODULE_PATHNAME, 'ts_remote_exec' +LANGUAGE C; +CREATE OR REPLACE FUNCTION test.remote_exec_get_result_strings(srv_name name[], command text) +RETURNS TABLE("table_record" CSTRING[]) +AS :TSL_MODULE_PATHNAME, 'ts_remote_exec_get_result_strings' +LANGUAGE C; +SELECT node_name, database, node_created, database_created, extension_created +FROM ( + SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* + FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) +) a; + node_name | database | node_created | database_created | extension_created +-----------------------+-----------------------+--------------+------------------+------------------- + db_cagg_ddl_dist_ht_1 | db_cagg_ddl_dist_ht_1 | t | t | t + db_cagg_ddl_dist_ht_2 | db_cagg_ddl_dist_ht_2 | t | t | t + db_cagg_ddl_dist_ht_3 | db_cagg_ddl_dist_ht_3 | t | t | t +(3 rows) + +GRANT USAGE ON FOREIGN SERVER :DATA_NODE_1, :DATA_NODE_2, :DATA_NODE_3 TO PUBLIC; +-- though user on access node has required GRANTS, this will propagate GRANTS to the connected data nodes +GRANT CREATE ON SCHEMA public TO :ROLE_DEFAULT_PERM_USER; +\set IS_DISTRIBUTED TRUE +\ir include/cagg_ddl_common.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- Set this variable to avoid using a hard-coded path each time query +-- results are compared +\set QUERY_RESULT_TEST_EQUAL_RELPATH '../../../../test/sql/include/query_result_test_equal.sql' +\if :IS_DISTRIBUTED +\echo 'Running distributed hypertable tests' +Running distributed hypertable tests +\else +\echo 'Running local hypertable tests' +\endif +SET ROLE :ROLE_DEFAULT_PERM_USER; +--DDL commands on continuous aggregates +CREATE TABLE conditions ( + timec TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + temperature integer NULL, + humidity DOUBLE PRECISION NULL, + timemeasure TIMESTAMPTZ, + timeinterval INTERVAL +); +\if :IS_DISTRIBUTED +SELECT table_name FROM create_distributed_hypertable('conditions', 'timec', replication_factor => 2); + table_name +------------ + conditions +(1 row) + +\else +SELECT table_name FROM create_hypertable('conditions', 'timec'); +\endif +-- schema tests +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER +CREATE TABLESPACE tablespace1 OWNER :ROLE_DEFAULT_PERM_USER LOCATION :TEST_TABLESPACE1_PATH; +CREATE TABLESPACE tablespace2 OWNER :ROLE_DEFAULT_PERM_USER LOCATION :TEST_TABLESPACE2_PATH; +CREATE SCHEMA rename_schema; +GRANT ALL ON SCHEMA rename_schema TO :ROLE_DEFAULT_PERM_USER; +SET ROLE :ROLE_DEFAULT_PERM_USER; +CREATE TABLE foo(time TIMESTAMPTZ NOT NULL, data INTEGER); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('foo', 'time', replication_factor => 2); + create_distributed_hypertable +------------------------------- + (2,public,foo,t) +(1 row) + +\else +SELECT create_hypertable('foo', 'time'); +\endif +CREATE MATERIALIZED VIEW rename_test + WITH ( timescaledb.continuous, timescaledb.materialized_only=true) +AS SELECT time_bucket('1week', time), COUNT(data) + FROM foo + GROUP BY 1 WITH NO DATA; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name +------------------+----------------+-----------------------+------------------- + public | rename_test | _timescaledb_internal | _partial_view_3 +(1 row) + +ALTER MATERIALIZED VIEW rename_test SET SCHEMA rename_schema; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name +------------------+----------------+-----------------------+------------------- + rename_schema | rename_test | _timescaledb_internal | _partial_view_3 +(1 row) + +SELECT ca.raw_hypertable_id as "RAW_HYPERTABLE_ID", + h.schema_name AS "MAT_SCHEMA_NAME", + h.table_name AS "MAT_TABLE_NAME", + partial_view_name as "PART_VIEW_NAME", + partial_view_schema as "PART_VIEW_SCHEMA", + direct_view_name as "DIR_VIEW_NAME", + direct_view_schema as "DIR_VIEW_SCHEMA" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'rename_test' +\gset +RESET ROLE; +SELECT current_user; + current_user +-------------------- + cluster_super_user +(1 row) + +ALTER VIEW :"PART_VIEW_SCHEMA".:"PART_VIEW_NAME" SET SCHEMA public; +SET ROLE :ROLE_DEFAULT_PERM_USER; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name +------------------+----------------+---------------------+------------------- + rename_schema | rename_test | public | _partial_view_3 +(1 row) + +--alter direct view schema +SELECT user_view_schema, user_view_name, direct_view_schema, direct_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | direct_view_schema | direct_view_name +------------------+----------------+-----------------------+------------------ + rename_schema | rename_test | _timescaledb_internal | _direct_view_3 +(1 row) + +RESET ROLE; +SELECT current_user; + current_user +-------------------- + cluster_super_user +(1 row) + +ALTER VIEW :"DIR_VIEW_SCHEMA".:"DIR_VIEW_NAME" SET SCHEMA public; +SET ROLE :ROLE_DEFAULT_PERM_USER; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name, + direct_view_schema, direct_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name | direct_view_schema | direct_view_name +------------------+----------------+---------------------+-------------------+--------------------+------------------ + rename_schema | rename_test | public | _partial_view_3 | public | _direct_view_3 +(1 row) + +RESET ROLE; +SELECT current_user; + current_user +-------------------- + cluster_super_user +(1 row) + +ALTER SCHEMA rename_schema RENAME TO new_name_schema; +SET ROLE :ROLE_DEFAULT_PERM_USER; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name, + direct_view_schema, direct_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name | direct_view_schema | direct_view_name +------------------+----------------+---------------------+-------------------+--------------------+------------------ + new_name_schema | rename_test | public | _partial_view_3 | public | _direct_view_3 +(1 row) + +ALTER VIEW :"PART_VIEW_NAME" SET SCHEMA new_name_schema; +ALTER VIEW :"DIR_VIEW_NAME" SET SCHEMA new_name_schema; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name, + direct_view_schema, direct_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name | direct_view_schema | direct_view_name +------------------+----------------+---------------------+-------------------+--------------------+------------------ + new_name_schema | rename_test | new_name_schema | _partial_view_3 | new_name_schema | _direct_view_3 +(1 row) + +RESET ROLE; +SELECT current_user; + current_user +-------------------- + cluster_super_user +(1 row) + +ALTER SCHEMA new_name_schema RENAME TO foo_name_schema; +SET ROLE :ROLE_DEFAULT_PERM_USER; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name +------------------+----------------+---------------------+------------------- + foo_name_schema | rename_test | foo_name_schema | _partial_view_3 +(1 row) + +ALTER MATERIALIZED VIEW foo_name_schema.rename_test SET SCHEMA public; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name +------------------+----------------+---------------------+------------------- + public | rename_test | foo_name_schema | _partial_view_3 +(1 row) + +RESET ROLE; +SELECT current_user; + current_user +-------------------- + cluster_super_user +(1 row) + +ALTER SCHEMA foo_name_schema RENAME TO rename_schema; +SET ROLE :ROLE_DEFAULT_PERM_USER; +SET client_min_messages TO NOTICE; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name +------------------+----------------+---------------------+------------------- + public | rename_test | rename_schema | _partial_view_3 +(1 row) + +ALTER MATERIALIZED VIEW rename_test RENAME TO rename_c_aggregate; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name +------------------+--------------------+---------------------+------------------- + public | rename_c_aggregate | rename_schema | _partial_view_3 +(1 row) + +SELECT * FROM rename_c_aggregate; + time_bucket | count +-------------+------- +(0 rows) + +ALTER VIEW rename_schema.:"PART_VIEW_NAME" RENAME TO partial_view; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name, + direct_view_schema, direct_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name | direct_view_schema | direct_view_name +------------------+--------------------+---------------------+-------------------+--------------------+------------------ + public | rename_c_aggregate | rename_schema | partial_view | rename_schema | _direct_view_3 +(1 row) + +--rename direct view +ALTER VIEW rename_schema.:"DIR_VIEW_NAME" RENAME TO direct_view; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name, + direct_view_schema, direct_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name | direct_view_schema | direct_view_name +------------------+--------------------+---------------------+-------------------+--------------------+------------------ + public | rename_c_aggregate | rename_schema | partial_view | rename_schema | direct_view +(1 row) + +-- drop_chunks tests +DROP TABLE conditions CASCADE; +DROP TABLE foo CASCADE; +psql:include/cagg_ddl_common.sql:161: NOTICE: drop cascades to 2 other objects +CREATE TABLE drop_chunks_table(time BIGINT NOT NULL, data INTEGER); +\if :IS_DISTRIBUTED +SELECT hypertable_id AS drop_chunks_table_id + FROM create_distributed_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10, replication_factor => 2) \gset +\else +SELECT hypertable_id AS drop_chunks_table_id + FROM create_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10) \gset +\endif +CREATE OR REPLACE FUNCTION integer_now_test() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table $$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ +CREATE OR REPLACE FUNCTION integer_now_test() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table $$; +$DIST$); +\endif +SELECT set_integer_now_func('drop_chunks_table', 'integer_now_test'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE MATERIALIZED VIEW drop_chunks_view + WITH ( + timescaledb.continuous, + timescaledb.materialized_only=true + ) +AS SELECT time_bucket('5', time), COUNT(data) + FROM drop_chunks_table + GROUP BY 1 WITH NO DATA; +SELECT format('%I.%I', schema_name, table_name) AS drop_chunks_mat_table, + schema_name AS drop_chunks_mat_schema, + table_name AS drop_chunks_mat_table_name + FROM _timescaledb_catalog.hypertable, _timescaledb_catalog.continuous_agg + WHERE _timescaledb_catalog.continuous_agg.raw_hypertable_id = :drop_chunks_table_id + AND _timescaledb_catalog.hypertable.id = _timescaledb_catalog.continuous_agg.mat_hypertable_id \gset +-- create 3 chunks, with 3 time bucket +INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(0, 29) AS i; +-- Only refresh up to bucket 15 initially. Matches the old refresh +-- behavior that didn't materialize everything +CALL refresh_continuous_aggregate('drop_chunks_view', 0, 15); +SELECT count(c) FROM show_chunks('drop_chunks_table') AS c; + count +------- + 3 +(1 row) + +SELECT count(c) FROM show_chunks('drop_chunks_view') AS c; + count +------- + 1 +(1 row) + +SELECT * FROM drop_chunks_view ORDER BY 1; + time_bucket | count +-------------+------- + 0 | 5 + 5 | 5 + 10 | 5 +(3 rows) + +-- cannot drop directly from the materialization table without specifying +-- cont. aggregate view name explicitly +\set ON_ERROR_STOP 0 +SELECT drop_chunks(:'drop_chunks_mat_table', + newer_than => -20, + verbose => true); +psql:include/cagg_ddl_common.sql:213: ERROR: operation not supported on materialized hypertable +\set ON_ERROR_STOP 1 +SELECT count(c) FROM show_chunks('drop_chunks_table') AS c; + count +------- + 3 +(1 row) + +SELECT count(c) FROM show_chunks('drop_chunks_view') AS c; + count +------- + 1 +(1 row) + +SELECT * FROM drop_chunks_view ORDER BY 1; + time_bucket | count +-------------+------- + 0 | 5 + 5 | 5 + 10 | 5 +(3 rows) + +-- drop chunks when the chunksize and time_bucket aren't aligned +DROP TABLE drop_chunks_table CASCADE; +psql:include/cagg_ddl_common.sql:222: NOTICE: drop cascades to 2 other objects +psql:include/cagg_ddl_common.sql:222: NOTICE: drop cascades to table _timescaledb_internal._hyper_5_4_chunk +CREATE TABLE drop_chunks_table_u(time BIGINT NOT NULL, data INTEGER); +\if :IS_DISTRIBUTED +SELECT hypertable_id AS drop_chunks_table_u_id + FROM create_distributed_hypertable('drop_chunks_table_u', 'time', chunk_time_interval => 7, replication_factor => 2) \gset +\else +SELECT hypertable_id AS drop_chunks_table_u_id + FROM create_hypertable('drop_chunks_table_u', 'time', chunk_time_interval => 7) \gset +\endif +CREATE OR REPLACE FUNCTION integer_now_test1() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table_u $$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ +CREATE OR REPLACE FUNCTION integer_now_test1() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table_u $$; +$DIST$); +\endif +SELECT set_integer_now_func('drop_chunks_table_u', 'integer_now_test1'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE MATERIALIZED VIEW drop_chunks_view + WITH ( + timescaledb.continuous, + timescaledb.materialized_only=true + ) +AS SELECT time_bucket('3', time), COUNT(data) + FROM drop_chunks_table_u + GROUP BY 1 WITH NO DATA; +SELECT format('%I.%I', schema_name, table_name) AS drop_chunks_mat_table_u, + schema_name AS drop_chunks_mat_schema, + table_name AS drop_chunks_mat_table_u_name + FROM _timescaledb_catalog.hypertable, _timescaledb_catalog.continuous_agg + WHERE _timescaledb_catalog.continuous_agg.raw_hypertable_id = :drop_chunks_table_u_id + AND _timescaledb_catalog.hypertable.id = _timescaledb_catalog.continuous_agg.mat_hypertable_id \gset +-- create 3 chunks, with 3 time bucket +INSERT INTO drop_chunks_table_u SELECT i, i FROM generate_series(0, 21) AS i; +-- Refresh up to bucket 15 to match old materializer behavior +CALL refresh_continuous_aggregate('drop_chunks_view', 0, 15); +SELECT count(c) FROM show_chunks('drop_chunks_table_u') AS c; + count +------- + 4 +(1 row) + +SELECT count(c) FROM show_chunks('drop_chunks_view') AS c; + count +------- + 1 +(1 row) + +SELECT * FROM drop_chunks_view ORDER BY 1; + time_bucket | count +-------------+------- + 0 | 3 + 3 | 3 + 6 | 3 + 9 | 3 + 12 | 3 +(5 rows) + +-- TRUNCATE test +-- Can truncate regular hypertables that have caggs +TRUNCATE drop_chunks_table_u; +\set ON_ERROR_STOP 0 +-- Can't truncate materialized hypertables directly +TRUNCATE :drop_chunks_mat_table_u; +psql:include/cagg_ddl_common.sql:271: ERROR: cannot TRUNCATE a hypertable underlying a continuous aggregate +\set ON_ERROR_STOP 1 +-- Check that we don't interfere with TRUNCATE of normal table and +-- partitioned table +CREATE TABLE truncate (value int); +INSERT INTO truncate VALUES (1), (2); +TRUNCATE truncate; +SELECT * FROM truncate; + value +------- +(0 rows) + +CREATE TABLE truncate_partitioned (value int) + PARTITION BY RANGE(value); +CREATE TABLE truncate_p1 PARTITION OF truncate_partitioned + FOR VALUES FROM (1) TO (3); +INSERT INTO truncate_partitioned VALUES (1), (2); +TRUNCATE truncate_partitioned; +SELECT * FROM truncate_partitioned; + value +------- +(0 rows) + +-- ALTER TABLE tests +\set ON_ERROR_STOP 0 +-- test a variety of ALTER TABLE statements +ALTER TABLE :drop_chunks_mat_table_u RENAME time_bucket TO bad_name; +psql:include/cagg_ddl_common.sql:291: ERROR: renaming columns on materialization tables is not supported +ALTER TABLE :drop_chunks_mat_table_u ADD UNIQUE(time_bucket); +psql:include/cagg_ddl_common.sql:292: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u SET UNLOGGED; +psql:include/cagg_ddl_common.sql:293: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u ENABLE ROW LEVEL SECURITY; +psql:include/cagg_ddl_common.sql:294: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u ADD COLUMN fizzle INTEGER; +psql:include/cagg_ddl_common.sql:295: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u DROP COLUMN time_bucket; +psql:include/cagg_ddl_common.sql:296: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u ALTER COLUMN time_bucket DROP NOT NULL; +psql:include/cagg_ddl_common.sql:297: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u ALTER COLUMN time_bucket SET DEFAULT 1; +psql:include/cagg_ddl_common.sql:298: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u ALTER COLUMN time_bucket SET STORAGE EXTERNAL; +psql:include/cagg_ddl_common.sql:299: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u DISABLE TRIGGER ALL; +psql:include/cagg_ddl_common.sql:300: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u SET TABLESPACE foo; +psql:include/cagg_ddl_common.sql:301: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u NOT OF; +psql:include/cagg_ddl_common.sql:302: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u OWNER TO CURRENT_USER; +psql:include/cagg_ddl_common.sql:303: ERROR: operation not supported on materialization tables +\set ON_ERROR_STOP 1 +ALTER TABLE :drop_chunks_mat_table_u SET SCHEMA public; +ALTER TABLE :drop_chunks_mat_table_u_name RENAME TO new_name; +SET ROLE :ROLE_DEFAULT_PERM_USER; +SET client_min_messages TO NOTICE; +SELECT * FROM new_name; + time_bucket | count +-------------+------- + 0 | 3 + 3 | 3 + 6 | 3 + 9 | 3 + 12 | 3 +(5 rows) + +SELECT * FROM drop_chunks_view ORDER BY 1; + time_bucket | count +-------------+------- + 0 | 3 + 3 | 3 + 6 | 3 + 9 | 3 + 12 | 3 +(5 rows) + +\set ON_ERROR_STOP 0 +-- no continuous aggregates on a continuous aggregate materialization table +CREATE MATERIALIZED VIEW new_name_view + WITH ( + timescaledb.continuous, + timescaledb.materialized_only=true + ) +AS SELECT time_bucket('6', time_bucket), COUNT("count") + FROM new_name + GROUP BY 1 WITH NO DATA; +psql:include/cagg_ddl_common.sql:326: ERROR: hypertable is a continuous aggregate materialization table +\set ON_ERROR_STOP 1 +CREATE TABLE metrics(time timestamptz NOT NULL, device_id int, v1 float, v2 float); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('metrics', 'time', replication_factor => 2); + create_distributed_hypertable +------------------------------- + (8,public,metrics,t) +(1 row) + +\else +SELECT create_hypertable('metrics','time'); +\endif +INSERT INTO metrics SELECT generate_series('2000-01-01'::timestamptz,'2000-01-10','1m'),1,0.25,0.75; +-- check expressions in view definition +CREATE MATERIALIZED VIEW cagg_expr + WITH (timescaledb.continuous, timescaledb.materialized_only=true) +AS +SELECT + time_bucket('1d', time) AS time, + 'Const'::text AS Const, + 4.3::numeric AS "numeric", + first(metrics,time), + CASE WHEN true THEN 'foo' ELSE 'bar' END, + COALESCE(NULL,'coalesce'), + avg(v1) + avg(v2) AS avg1, + avg(v1+v2) AS avg2 +FROM metrics +GROUP BY 1 WITH NO DATA; +CALL refresh_continuous_aggregate('cagg_expr', NULL, NULL); +SELECT * FROM cagg_expr ORDER BY time LIMIT 5; + time | const | numeric | first | case | coalesce | avg1 | avg2 +------------------------------+-------+---------+----------------------------------------------+------+----------+------+------ + Fri Dec 31 16:00:00 1999 PST | Const | 4.3 | ("Sat Jan 01 00:00:00 2000 PST",1,0.25,0.75) | foo | coalesce | 1 | 1 + Sat Jan 01 16:00:00 2000 PST | Const | 4.3 | ("Sat Jan 01 16:00:00 2000 PST",1,0.25,0.75) | foo | coalesce | 1 | 1 + Sun Jan 02 16:00:00 2000 PST | Const | 4.3 | ("Sun Jan 02 16:00:00 2000 PST",1,0.25,0.75) | foo | coalesce | 1 | 1 + Mon Jan 03 16:00:00 2000 PST | Const | 4.3 | ("Mon Jan 03 16:00:00 2000 PST",1,0.25,0.75) | foo | coalesce | 1 | 1 + Tue Jan 04 16:00:00 2000 PST | Const | 4.3 | ("Tue Jan 04 16:00:00 2000 PST",1,0.25,0.75) | foo | coalesce | 1 | 1 +(5 rows) + +--test materialization of invalidation before drop +DROP TABLE IF EXISTS drop_chunks_table CASCADE; +psql:include/cagg_ddl_common.sql:358: NOTICE: table "drop_chunks_table" does not exist, skipping +DROP TABLE IF EXISTS drop_chunks_table_u CASCADE; +psql:include/cagg_ddl_common.sql:359: NOTICE: drop cascades to 2 other objects +psql:include/cagg_ddl_common.sql:359: NOTICE: drop cascades to table _timescaledb_internal._hyper_7_9_chunk +CREATE TABLE drop_chunks_table(time BIGINT NOT NULL, data INTEGER); +\if :IS_DISTRIBUTED +SELECT hypertable_id AS drop_chunks_table_nid + FROM create_distributed_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10, replication_factor => 2) \gset +\else +SELECT hypertable_id AS drop_chunks_table_nid + FROM create_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10) \gset +\endif +CREATE OR REPLACE FUNCTION integer_now_test2() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table $$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ +CREATE OR REPLACE FUNCTION integer_now_test2() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table $$; +$DIST$); +\endif +SELECT set_integer_now_func('drop_chunks_table', 'integer_now_test2'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE MATERIALIZED VIEW drop_chunks_view + WITH ( + timescaledb.continuous, + timescaledb.materialized_only=true + ) +AS SELECT time_bucket('5', time), max(data) + FROM drop_chunks_table + GROUP BY 1 WITH NO DATA; +INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(0, 20) AS i; +--dropping chunks will process the invalidations +SELECT drop_chunks('drop_chunks_table', older_than => (integer_now_test2()-9)); + drop_chunks +----------------------------------------------- + _timescaledb_internal._dist_hyper_10_13_chunk +(1 row) + +SELECT * FROM drop_chunks_table ORDER BY time ASC limit 1; + time | data +------+------ + 10 | 10 +(1 row) + +INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(20, 35) AS i; +CALL refresh_continuous_aggregate('drop_chunks_view', 10, 40); +--this will be seen after the drop its within the invalidation window and will be dropped +INSERT INTO drop_chunks_table VALUES (26, 100); +--this will not be processed by the drop since chunk 30-39 is not dropped but will be seen after refresh +--shows that the drop doesn't do more work than necessary +INSERT INTO drop_chunks_table VALUES (31, 200); +--move the time up to 39 +INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(35, 39) AS i; +--the chunks and ranges we have thus far +SELECT chunk_name, range_start_integer, range_end_integer +FROM timescaledb_information.chunks +WHERE hypertable_name = 'drop_chunks_table'; + chunk_name | range_start_integer | range_end_integer +-------------------------+---------------------+------------------- + _dist_hyper_10_14_chunk | 10 | 20 + _dist_hyper_10_15_chunk | 20 | 30 + _dist_hyper_10_16_chunk | 30 | 40 +(3 rows) + +--the invalidation on 25 not yet seen +SELECT * FROM drop_chunks_view ORDER BY time_bucket DESC; + time_bucket | max +-------------+----- + 35 | 35 + 30 | 34 + 25 | 29 + 20 | 24 + 15 | 19 + 10 | 14 +(6 rows) + +--refresh to process the invalidations and then drop +CALL refresh_continuous_aggregate('drop_chunks_view', NULL, (integer_now_test2()-9)); +SELECT drop_chunks('drop_chunks_table', older_than => (integer_now_test2()-9)); + drop_chunks +----------------------------------------------- + _timescaledb_internal._dist_hyper_10_14_chunk + _timescaledb_internal._dist_hyper_10_15_chunk +(2 rows) + +--new values on 25 now seen in view +SELECT * FROM drop_chunks_view ORDER BY time_bucket DESC; + time_bucket | max +-------------+----- + 35 | 35 + 30 | 34 + 25 | 100 + 20 | 24 + 15 | 19 + 10 | 14 +(6 rows) + +--earliest datapoint now in table +SELECT * FROM drop_chunks_table ORDER BY time ASC limit 1; + time | data +------+------ + 30 | 30 +(1 row) + +--we see the chunks row with the dropped flags set; +SELECT * FROM _timescaledb_catalog.chunk where dropped; + id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk +----+---------------+-----------------------+-------------------------+---------------------+---------+--------+----------- + 13 | 10 | _timescaledb_internal | _dist_hyper_10_13_chunk | | t | 0 | f + 14 | 10 | _timescaledb_internal | _dist_hyper_10_14_chunk | | t | 0 | f + 15 | 10 | _timescaledb_internal | _dist_hyper_10_15_chunk | | t | 0 | f +(3 rows) + +--still see data in the view +SELECT * FROM drop_chunks_view WHERE time_bucket < (integer_now_test2()-9) ORDER BY time_bucket DESC; + time_bucket | max +-------------+----- + 25 | 100 + 20 | 24 + 15 | 19 + 10 | 14 +(4 rows) + +--no data but covers dropped chunks +SELECT * FROM drop_chunks_table WHERE time < (integer_now_test2()-9) ORDER BY time DESC; + time | data +------+------ +(0 rows) + +--recreate the dropped chunk +INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(0, 20) AS i; +--see data from recreated region +SELECT * FROM drop_chunks_table WHERE time < (integer_now_test2()-9) ORDER BY time DESC; + time | data +------+------ + 20 | 20 + 19 | 19 + 18 | 18 + 17 | 17 + 16 | 16 + 15 | 15 + 14 | 14 + 13 | 13 + 12 | 12 + 11 | 11 + 10 | 10 + 9 | 9 + 8 | 8 + 7 | 7 + 6 | 6 + 5 | 5 + 4 | 4 + 3 | 3 + 2 | 2 + 1 | 1 + 0 | 0 +(21 rows) + +--should show chunk with old name and old ranges +SELECT chunk_name, range_start_integer, range_end_integer +FROM timescaledb_information.chunks +WHERE hypertable_name = 'drop_chunks_table' +ORDER BY range_start_integer; + chunk_name | range_start_integer | range_end_integer +-------------------------+---------------------+------------------- + _dist_hyper_10_13_chunk | 0 | 10 + _dist_hyper_10_14_chunk | 10 | 20 + _dist_hyper_10_15_chunk | 20 | 30 + _dist_hyper_10_16_chunk | 30 | 40 +(4 rows) + +--We dropped everything up to the bucket starting at 30 and then +--inserted new data up to and including time 20. Therefore, the +--dropped data should stay the same as long as we only refresh +--buckets that have non-dropped data. +CALL refresh_continuous_aggregate('drop_chunks_view', 30, 40); +SELECT * FROM drop_chunks_view ORDER BY time_bucket DESC; + time_bucket | max +-------------+----- + 35 | 39 + 30 | 200 + 25 | 100 + 20 | 24 + 15 | 19 + 10 | 14 +(6 rows) + +SELECT format('%I.%I', schema_name, table_name) AS drop_chunks_mat_tablen, + schema_name AS drop_chunks_mat_schema, + table_name AS drop_chunks_mat_table_name + FROM _timescaledb_catalog.hypertable, _timescaledb_catalog.continuous_agg + WHERE _timescaledb_catalog.continuous_agg.raw_hypertable_id = :drop_chunks_table_nid + AND _timescaledb_catalog.hypertable.id = _timescaledb_catalog.continuous_agg.mat_hypertable_id \gset +-- TEST drop chunks from continuous aggregates by specifying view name +SELECT drop_chunks('drop_chunks_view', + newer_than => -20, + verbose => true); +psql:include/cagg_ddl_common.sql:454: INFO: dropping chunk _timescaledb_internal._hyper_11_17_chunk + drop_chunks +------------------------------------------ + _timescaledb_internal._hyper_11_17_chunk +(1 row) + +-- Test that we cannot drop chunks when specifying materialized +-- hypertable +INSERT INTO drop_chunks_table SELECT generate_series(45, 55), 500; +CALL refresh_continuous_aggregate('drop_chunks_view', 45, 55); +SELECT chunk_name, range_start_integer, range_end_integer +FROM timescaledb_information.chunks +WHERE hypertable_name = :'drop_chunks_mat_table_name' ORDER BY range_start_integer; + chunk_name | range_start_integer | range_end_integer +--------------------+---------------------+------------------- + _hyper_11_20_chunk | 0 | 100 +(1 row) + +\set ON_ERROR_STOP 0 +\set VERBOSITY default +SELECT drop_chunks(:'drop_chunks_mat_tablen', older_than => 60); +psql:include/cagg_ddl_common.sql:466: ERROR: operation not supported on materialized hypertable +DETAIL: Hypertable "_materialized_hypertable_11" is a materialized hypertable. +HINT: Try the operation on the continuous aggregate instead. +\set VERBOSITY terse +\set ON_ERROR_STOP 1 +----------------------------------------------------------------- +-- Test that refresh_continuous_aggregate on chunk will refresh, +-- but only in the regions covered by the show chunks. +----------------------------------------------------------------- +SELECT chunk_name, range_start_integer, range_end_integer +FROM timescaledb_information.chunks +WHERE hypertable_name = 'drop_chunks_table' +ORDER BY 2,3; + chunk_name | range_start_integer | range_end_integer +-------------------------+---------------------+------------------- + _dist_hyper_10_13_chunk | 0 | 10 + _dist_hyper_10_14_chunk | 10 | 20 + _dist_hyper_10_15_chunk | 20 | 30 + _dist_hyper_10_16_chunk | 30 | 40 + _dist_hyper_10_18_chunk | 40 | 50 + _dist_hyper_10_19_chunk | 50 | 60 +(6 rows) + +-- Pick the second chunk as the one to drop +WITH numbered_chunks AS ( + SELECT row_number() OVER (ORDER BY range_start_integer), chunk_schema, chunk_name, range_start_integer, range_end_integer + FROM timescaledb_information.chunks + WHERE hypertable_name = 'drop_chunks_table' + ORDER BY 1 +) +SELECT format('%I.%I', chunk_schema, chunk_name) AS chunk_to_drop, range_start_integer, range_end_integer +FROM numbered_chunks +WHERE row_number = 2 \gset +-- There's data in the table for the chunk/range we will drop +SELECT * FROM drop_chunks_table +WHERE time >= :range_start_integer +AND time < :range_end_integer +ORDER BY 1; + time | data +------+------ + 10 | 10 + 11 | 11 + 12 | 12 + 13 | 13 + 14 | 14 + 15 | 15 + 16 | 16 + 17 | 17 + 18 | 18 + 19 | 19 +(10 rows) + +-- Make sure there is also data in the continuous aggregate +-- CARE: +-- Note that this behaviour of dropping the materialization table chunks and expecting a refresh +-- that overlaps that time range to NOT update those chunks is undefined. Since CAGGs over +-- distributed hypertables merge the invalidations the refresh region is updated in the distributed +-- case, which may be different than what happens in the normal hypertable case. The command was: +-- SELECT drop_chunks('drop_chunks_view', newer_than => -20, verbose => true); +CALL refresh_continuous_aggregate('drop_chunks_view', 0, 50); +SELECT * FROM drop_chunks_view +ORDER BY 1; + time_bucket | max +-------------+----- + 0 | 4 + 5 | 9 + 10 | 14 + 15 | 19 + 20 | 20 + 30 | 200 + 35 | 39 + 45 | 500 + 50 | 500 +(9 rows) + +-- Drop the second chunk, to leave a gap in the data +\if :IS_DISTRIBUTED +CALL distributed_exec(format('DROP TABLE IF EXISTS %s', :'chunk_to_drop')); +DROP FOREIGN TABLE :chunk_to_drop; +\else +DROP TABLE :chunk_to_drop; +\endif +-- Verify that the second chunk is dropped +SELECT chunk_name, range_start_integer, range_end_integer +FROM timescaledb_information.chunks +WHERE hypertable_name = 'drop_chunks_table' +ORDER BY 2,3; + chunk_name | range_start_integer | range_end_integer +-------------------------+---------------------+------------------- + _dist_hyper_10_13_chunk | 0 | 10 + _dist_hyper_10_15_chunk | 20 | 30 + _dist_hyper_10_16_chunk | 30 | 40 + _dist_hyper_10_18_chunk | 40 | 50 + _dist_hyper_10_19_chunk | 50 | 60 +(5 rows) + +-- Data is no longer in the table but still in the view +SELECT * FROM drop_chunks_table +WHERE time >= :range_start_integer +AND time < :range_end_integer +ORDER BY 1; + time | data +------+------ +(0 rows) + +SELECT * FROM drop_chunks_view +WHERE time_bucket >= :range_start_integer +AND time_bucket < :range_end_integer +ORDER BY 1; + time_bucket | max +-------------+----- + 10 | 14 + 15 | 19 +(2 rows) + +-- Insert a large value in one of the chunks that will be dropped +INSERT INTO drop_chunks_table VALUES (:range_start_integer-1, 100); +-- Now refresh and drop the two adjecent chunks +CALL refresh_continuous_aggregate('drop_chunks_view', NULL, 30); +SELECT drop_chunks('drop_chunks_table', older_than=>30); + drop_chunks +----------------------------------------------- + _timescaledb_internal._dist_hyper_10_13_chunk + _timescaledb_internal._dist_hyper_10_15_chunk +(2 rows) + +-- Verify that the chunks are dropped +SELECT chunk_name, range_start_integer, range_end_integer +FROM timescaledb_information.chunks +WHERE hypertable_name = 'drop_chunks_table' +ORDER BY 2,3; + chunk_name | range_start_integer | range_end_integer +-------------------------+---------------------+------------------- + _dist_hyper_10_16_chunk | 30 | 40 + _dist_hyper_10_18_chunk | 40 | 50 + _dist_hyper_10_19_chunk | 50 | 60 +(3 rows) + +-- The continuous aggregate should be refreshed in the regions covered +-- by the dropped chunks, but not in the "gap" region, i.e., the +-- region of the chunk that was dropped via DROP TABLE. +SELECT * FROM drop_chunks_view +ORDER BY 1; + time_bucket | max +-------------+----- + 0 | 4 + 5 | 100 + 20 | 20 + 30 | 200 + 35 | 39 + 45 | 500 + 50 | 500 +(7 rows) + +-- Now refresh in the region of the first two dropped chunks +CALL refresh_continuous_aggregate('drop_chunks_view', 0, :range_end_integer); +-- Aggregate data in the refreshed range should no longer exist since +-- the underlying data was dropped. +SELECT * FROM drop_chunks_view +ORDER BY 1; + time_bucket | max +-------------+----- + 20 | 20 + 30 | 200 + 35 | 39 + 45 | 500 + 50 | 500 +(5 rows) + +-------------------------------------------------------------------- +-- Check that we can create a materialized table in a tablespace. We +-- create one with tablespace and one without and compare them. +CREATE VIEW cagg_info AS +WITH + caggs AS ( + SELECT format('%I.%I', user_view_schema, user_view_name)::regclass AS user_view, + format('%I.%I', direct_view_schema, direct_view_name)::regclass AS direct_view, + format('%I.%I', partial_view_schema, partial_view_name)::regclass AS partial_view, + format('%I.%I', ht.schema_name, ht.table_name)::regclass AS mat_relid + FROM _timescaledb_catalog.hypertable ht, + _timescaledb_catalog.continuous_agg cagg + WHERE ht.id = cagg.mat_hypertable_id + ) +SELECT user_view, + pg_get_userbyid(relowner) AS user_view_owner, + relname AS mat_table, + (SELECT pg_get_userbyid(relowner) FROM pg_class WHERE oid = mat_relid) AS mat_table_owner, + direct_view, + (SELECT pg_get_userbyid(relowner) FROM pg_class WHERE oid = direct_view) AS direct_view_owner, + partial_view, + (SELECT pg_get_userbyid(relowner) FROM pg_class WHERE oid = partial_view) AS partial_view_owner, + (SELECT spcname FROM pg_tablespace WHERE oid = reltablespace) AS tablespace + FROM pg_class JOIN caggs ON pg_class.oid = caggs.mat_relid; +GRANT SELECT ON cagg_info TO PUBLIC; +CREATE VIEW chunk_info AS +SELECT ht.schema_name, ht.table_name, relname AS chunk_name, + (SELECT spcname FROM pg_tablespace WHERE oid = reltablespace) AS tablespace + FROM pg_class c, + _timescaledb_catalog.hypertable ht, + _timescaledb_catalog.chunk ch + WHERE ch.table_name = c.relname AND ht.id = ch.hypertable_id; +CREATE TABLE whatever(time BIGINT NOT NULL, data INTEGER); +\if :IS_DISTRIBUTED +SELECT hypertable_id AS whatever_nid + FROM create_distributed_hypertable('whatever', 'time', chunk_time_interval => 10, replication_factor => 2) +\gset +\else +SELECT hypertable_id AS whatever_nid + FROM create_hypertable('whatever', 'time', chunk_time_interval => 10) +\gset +\endif +SELECT set_integer_now_func('whatever', 'integer_now_test'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE MATERIALIZED VIEW whatever_view_1 +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT time_bucket('5', time), COUNT(data) + FROM whatever GROUP BY 1 WITH NO DATA; +CREATE MATERIALIZED VIEW whatever_view_2 +WITH (timescaledb.continuous, timescaledb.materialized_only=true) +TABLESPACE tablespace1 AS +SELECT time_bucket('5', time), COUNT(data) + FROM whatever GROUP BY 1 WITH NO DATA; +INSERT INTO whatever SELECT i, i FROM generate_series(0, 29) AS i; +CALL refresh_continuous_aggregate('whatever_view_1', NULL, NULL); +CALL refresh_continuous_aggregate('whatever_view_2', NULL, NULL); +SELECT user_view, + mat_table, + cagg_info.tablespace AS mat_tablespace, + chunk_name, + chunk_info.tablespace AS chunk_tablespace + FROM cagg_info, chunk_info + WHERE mat_table::text = table_name + AND user_view::text LIKE 'whatever_view%'; + user_view | mat_table | mat_tablespace | chunk_name | chunk_tablespace +-----------------+-----------------------------+----------------+--------------------+------------------ + whatever_view_1 | _materialized_hypertable_13 | | _hyper_13_24_chunk | + whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_25_chunk | tablespace1 +(2 rows) + +ALTER MATERIALIZED VIEW whatever_view_1 SET TABLESPACE tablespace2; +SELECT user_view, + mat_table, + cagg_info.tablespace AS mat_tablespace, + chunk_name, + chunk_info.tablespace AS chunk_tablespace + FROM cagg_info, chunk_info + WHERE mat_table::text = table_name + AND user_view::text LIKE 'whatever_view%'; + user_view | mat_table | mat_tablespace | chunk_name | chunk_tablespace +-----------------+-----------------------------+----------------+--------------------+------------------ + whatever_view_1 | _materialized_hypertable_13 | tablespace2 | _hyper_13_24_chunk | tablespace2 + whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_25_chunk | tablespace1 +(2 rows) + +DROP MATERIALIZED VIEW whatever_view_1; +psql:include/cagg_ddl_common.sql:644: NOTICE: drop cascades to table _timescaledb_internal._hyper_13_24_chunk +DROP MATERIALIZED VIEW whatever_view_2; +psql:include/cagg_ddl_common.sql:645: NOTICE: drop cascades to table _timescaledb_internal._hyper_14_25_chunk +-- test bucket width expressions on integer hypertables +CREATE TABLE metrics_int2 ( + time int2 NOT NULL, + device_id int, + v1 float, + v2 float +); +CREATE TABLE metrics_int4 ( + time int4 NOT NULL, + device_id int, + v1 float, + v2 float +); +CREATE TABLE metrics_int8 ( + time int8 NOT NULL, + device_id int, + v1 float, + v2 float +); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable (('metrics_' || dt)::regclass, 'time', chunk_time_interval => 10, replication_factor => 2) +FROM ( + VALUES ('int2'), + ('int4'), + ('int8')) v (dt); + create_distributed_hypertable +------------------------------- + (15,public,metrics_int2,t) + (16,public,metrics_int4,t) + (17,public,metrics_int8,t) +(3 rows) + +\else +SELECT create_hypertable (('metrics_' || dt)::regclass, 'time', chunk_time_interval => 10) +FROM ( + VALUES ('int2'), + ('int4'), + ('int8')) v (dt); +\endif +CREATE OR REPLACE FUNCTION int2_now () + RETURNS int2 + LANGUAGE SQL + STABLE + AS $$ + SELECT 10::int2 +$$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ +CREATE OR REPLACE FUNCTION int2_now () + RETURNS int2 + LANGUAGE SQL + STABLE + AS $$ + SELECT 10::int2 +$$; +$DIST$); +\endif +CREATE OR REPLACE FUNCTION int4_now () + RETURNS int4 + LANGUAGE SQL + STABLE + AS $$ + SELECT 10::int4 +$$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ +CREATE OR REPLACE FUNCTION int4_now () + RETURNS int4 + LANGUAGE SQL + STABLE + AS $$ + SELECT 10::int4 +$$; +$DIST$); +\endif +CREATE OR REPLACE FUNCTION int8_now () + RETURNS int8 + LANGUAGE SQL + STABLE + AS $$ + SELECT 10::int8 +$$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ +CREATE OR REPLACE FUNCTION int8_now () + RETURNS int8 + LANGUAGE SQL + STABLE + AS $$ + SELECT 10::int8 +$$; +$DIST$); +\endif +SELECT set_integer_now_func (('metrics_' || dt)::regclass, (dt || '_now')::regproc) +FROM ( + VALUES ('int2'), + ('int4'), + ('int8')) v (dt); + set_integer_now_func +---------------------- + + + +(3 rows) + +-- width expression for int2 hypertables +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(1::smallint, time) +FROM metrics_int2 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:750: NOTICE: continuous aggregate "width_expr" is already up-to-date +DROP MATERIALIZED VIEW width_expr; +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(1::smallint + 2::smallint, time) +FROM metrics_int2 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:757: NOTICE: continuous aggregate "width_expr" is already up-to-date +DROP MATERIALIZED VIEW width_expr; +-- width expression for int4 hypertables +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(1, time) +FROM metrics_int4 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:765: NOTICE: continuous aggregate "width_expr" is already up-to-date +DROP MATERIALIZED VIEW width_expr; +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(1 + 2, time) +FROM metrics_int4 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:772: NOTICE: continuous aggregate "width_expr" is already up-to-date +DROP MATERIALIZED VIEW width_expr; +-- width expression for int8 hypertables +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(1, time) +FROM metrics_int8 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:780: NOTICE: continuous aggregate "width_expr" is already up-to-date +DROP MATERIALIZED VIEW width_expr; +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(1 + 2, time) +FROM metrics_int8 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:787: NOTICE: continuous aggregate "width_expr" is already up-to-date +DROP MATERIALIZED VIEW width_expr; +\set ON_ERROR_STOP 0 +-- non-immutable expresions should be rejected +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(extract(year FROM now())::smallint, time) +FROM metrics_int2 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:796: ERROR: only immutable expressions allowed in time bucket function +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(extract(year FROM now())::int, time) +FROM metrics_int4 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:801: ERROR: only immutable expressions allowed in time bucket function +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(extract(year FROM now())::int, time) +FROM metrics_int8 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:806: ERROR: only immutable expressions allowed in time bucket function +\set ON_ERROR_STOP 1 +-- Test various ALTER MATERIALIZED VIEW statements. +SET ROLE :ROLE_DEFAULT_PERM_USER; +CREATE MATERIALIZED VIEW owner_check WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(1 + 2, time) +FROM metrics_int8 +GROUP BY 1 +WITH NO DATA; +\x on +SELECT * FROM cagg_info WHERE user_view::text = 'owner_check'; +-[ RECORD 1 ]------+--------------------------------------- +user_view | owner_check +user_view_owner | default_perm_user +mat_table | _materialized_hypertable_24 +mat_table_owner | default_perm_user +direct_view | _timescaledb_internal._direct_view_24 +direct_view_owner | default_perm_user +partial_view | _timescaledb_internal._partial_view_24 +partial_view_owner | default_perm_user +tablespace | + +\x off +-- This should not work since the target user has the wrong role, but +-- we test that the normal checks are done when changing the owner. +\set ON_ERROR_STOP 0 +ALTER MATERIALIZED VIEW owner_check OWNER TO :ROLE_1; +psql:include/cagg_ddl_common.sql:826: ERROR: must be member of role "test_role_1" +\set ON_ERROR_STOP 1 +-- Superuser can always change owner +SET ROLE :ROLE_CLUSTER_SUPERUSER; +ALTER MATERIALIZED VIEW owner_check OWNER TO :ROLE_1; +\x on +SELECT * FROM cagg_info WHERE user_view::text = 'owner_check'; +-[ RECORD 1 ]------+--------------------------------------- +user_view | owner_check +user_view_owner | test_role_1 +mat_table | _materialized_hypertable_24 +mat_table_owner | test_role_1 +direct_view | _timescaledb_internal._direct_view_24 +direct_view_owner | test_role_1 +partial_view | _timescaledb_internal._partial_view_24 +partial_view_owner | test_role_1 +tablespace | + +\x off +-- +-- Test drop continuous aggregate cases +-- +-- Issue: #2608 +-- +CREATE OR REPLACE FUNCTION test_int_now() + RETURNS INT LANGUAGE SQL STABLE AS +$BODY$ + SELECT 50; +$BODY$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ + CREATE OR REPLACE FUNCTION test_int_now() + RETURNS INT LANGUAGE SQL STABLE AS + $BODY$ + SELECT 50; + $BODY$; +$DIST$); +\endif +CREATE TABLE conditionsnm(time_int INT NOT NULL, device INT, value FLOAT); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('conditionsnm', 'time_int', chunk_time_interval => 10, replication_factor => 2); + create_distributed_hypertable +------------------------------- + (25,public,conditionsnm,t) +(1 row) + +\else +SELECT create_hypertable('conditionsnm', 'time_int', chunk_time_interval => 10); +\endif +SELECT set_integer_now_func('conditionsnm', 'test_int_now'); + set_integer_now_func +---------------------- + +(1 row) + +INSERT INTO conditionsnm +SELECT time_val, time_val % 4, 3.14 FROM generate_series(0,100,1) AS time_val; +-- Case 1: DROP +CREATE MATERIALIZED VIEW conditionsnm_4 +WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) +AS +SELECT time_bucket(7, time_int) as bucket, +SUM(value), COUNT(value) +FROM conditionsnm GROUP BY bucket WITH DATA; +psql:include/cagg_ddl_common.sql:874: NOTICE: refreshing continuous aggregate "conditionsnm_4" +DROP materialized view conditionsnm_4; +psql:include/cagg_ddl_common.sql:876: NOTICE: drop cascades to table _timescaledb_internal._hyper_26_37_chunk +-- Case 2: DROP CASCADE should have similar behaviour as DROP +CREATE MATERIALIZED VIEW conditionsnm_4 +WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) +AS +SELECT time_bucket(7, time_int) as bucket, +SUM(value), COUNT(value) +FROM conditionsnm GROUP BY bucket WITH DATA; +psql:include/cagg_ddl_common.sql:884: NOTICE: refreshing continuous aggregate "conditionsnm_4" +DROP materialized view conditionsnm_4 CASCADE; +psql:include/cagg_ddl_common.sql:886: NOTICE: drop cascades to table _timescaledb_internal._hyper_27_38_chunk +-- Case 3: require CASCADE in case of dependent object +CREATE MATERIALIZED VIEW conditionsnm_4 +WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) +AS +SELECT time_bucket(7, time_int) as bucket, +SUM(value), COUNT(value) +FROM conditionsnm GROUP BY bucket WITH DATA; +psql:include/cagg_ddl_common.sql:894: NOTICE: refreshing continuous aggregate "conditionsnm_4" +CREATE VIEW see_cagg as select * from conditionsnm_4; +\set ON_ERROR_STOP 0 +DROP MATERIALIZED VIEW conditionsnm_4; +psql:include/cagg_ddl_common.sql:898: ERROR: cannot drop view conditionsnm_4 because other objects depend on it +\set ON_ERROR_STOP 1 +-- Case 4: DROP CASCADE with dependency +DROP MATERIALIZED VIEW conditionsnm_4 CASCADE; +psql:include/cagg_ddl_common.sql:902: NOTICE: drop cascades to view see_cagg +psql:include/cagg_ddl_common.sql:902: NOTICE: drop cascades to table _timescaledb_internal._hyper_28_39_chunk +-- Test DROP SCHEMA CASCADE with continuous aggregates +-- +-- Issue: #2350 +-- +-- Case 1: DROP SCHEMA CASCADE +CREATE SCHEMA test_schema; +CREATE TABLE test_schema.telemetry_raw ( + ts TIMESTAMP WITH TIME ZONE NOT NULL, + value DOUBLE PRECISION +); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('test_schema.telemetry_raw', 'ts', replication_factor => 2); + create_distributed_hypertable +---------------------------------- + (29,test_schema,telemetry_raw,t) +(1 row) + +\else +SELECT create_hypertable('test_schema.telemetry_raw', 'ts'); +\endif +CREATE MATERIALIZED VIEW test_schema.telemetry_1s + WITH (timescaledb.continuous, timescaledb.materialized_only=false) + AS +SELECT time_bucket(INTERVAL '1s', ts) AS ts_1s, + avg(value) + FROM test_schema.telemetry_raw + GROUP BY ts_1s WITH NO DATA; +SELECT ca.raw_hypertable_id, + h.schema_name, + h.table_name AS "MAT_TABLE_NAME", + partial_view_name as "PART_VIEW_NAME", + partial_view_schema +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'telemetry_1s'; + raw_hypertable_id | schema_name | MAT_TABLE_NAME | PART_VIEW_NAME | partial_view_schema +-------------------+-----------------------+-----------------------------+------------------+----------------------- + 29 | _timescaledb_internal | _materialized_hypertable_30 | _partial_view_30 | _timescaledb_internal +(1 row) + +\gset +DROP SCHEMA test_schema CASCADE; +psql:include/cagg_ddl_common.sql:941: NOTICE: drop cascades to 4 other objects +SELECT count(*) FROM pg_class WHERE relname = :'MAT_TABLE_NAME'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_class WHERE relname = :'PART_VIEW_NAME'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_class WHERE relname = 'telemetry_1s'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_namespace WHERE nspname = 'test_schema'; + count +------- + 0 +(1 row) + +-- Case 2: DROP SCHEMA CASCADE with multiple caggs +CREATE SCHEMA test_schema; +CREATE TABLE test_schema.telemetry_raw ( + ts TIMESTAMP WITH TIME ZONE NOT NULL, + value DOUBLE PRECISION +); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('test_schema.telemetry_raw', 'ts', replication_factor => 2); + create_distributed_hypertable +---------------------------------- + (31,test_schema,telemetry_raw,t) +(1 row) + +\else +SELECT create_hypertable('test_schema.telemetry_raw', 'ts'); +\endif +CREATE MATERIALIZED VIEW test_schema.cagg1 + WITH (timescaledb.continuous, timescaledb.materialized_only=false) + AS +SELECT time_bucket(INTERVAL '1s', ts) AS ts_1s, + avg(value) + FROM test_schema.telemetry_raw + GROUP BY ts_1s WITH NO DATA; +CREATE MATERIALIZED VIEW test_schema.cagg2 + WITH (timescaledb.continuous, timescaledb.materialized_only=false) + AS +SELECT time_bucket(INTERVAL '1s', ts) AS ts_1s, + avg(value) + FROM test_schema.telemetry_raw + GROUP BY ts_1s WITH NO DATA; +SELECT ca.raw_hypertable_id, + h.schema_name, + h.table_name AS "MAT_TABLE_NAME1", + partial_view_name as "PART_VIEW_NAME1", + partial_view_schema +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'cagg1'; + raw_hypertable_id | schema_name | MAT_TABLE_NAME1 | PART_VIEW_NAME1 | partial_view_schema +-------------------+-----------------------+-----------------------------+------------------+----------------------- + 31 | _timescaledb_internal | _materialized_hypertable_32 | _partial_view_32 | _timescaledb_internal +(1 row) + +\gset +SELECT ca.raw_hypertable_id, + h.schema_name, + h.table_name AS "MAT_TABLE_NAME2", + partial_view_name as "PART_VIEW_NAME2", + partial_view_schema +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'cagg2'; + raw_hypertable_id | schema_name | MAT_TABLE_NAME2 | PART_VIEW_NAME2 | partial_view_schema +-------------------+-----------------------+-----------------------------+------------------+----------------------- + 31 | _timescaledb_internal | _materialized_hypertable_33 | _partial_view_33 | _timescaledb_internal +(1 row) + +\gset +DROP SCHEMA test_schema CASCADE; +psql:include/cagg_ddl_common.sql:998: NOTICE: drop cascades to 7 other objects +SELECT count(*) FROM pg_class WHERE relname = :'MAT_TABLE_NAME1'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_class WHERE relname = :'PART_VIEW_NAME1'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_class WHERE relname = 'cagg1'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_class WHERE relname = :'MAT_TABLE_NAME2'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_class WHERE relname = :'PART_VIEW_NAME2'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_class WHERE relname = 'cagg2'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_namespace WHERE nspname = 'test_schema'; + count +------- + 0 +(1 row) + +DROP TABLESPACE tablespace1; +DROP TABLESPACE tablespace2; +-- Check that we can rename a column of a materialized view and still +-- rebuild it after (#3051, #3405) +CREATE TABLE conditions ( + time TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL +); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('conditions', 'time', replication_factor => 2); + create_distributed_hypertable +------------------------------- + (34,public,conditions,t) +(1 row) + +\else +SELECT create_hypertable('conditions', 'time'); +\endif +INSERT INTO conditions VALUES ( '2018-01-01 09:20:00-08', 'SFO', 55); +INSERT INTO conditions VALUES ( '2018-01-02 09:30:00-08', 'por', 100); +INSERT INTO conditions VALUES ( '2018-01-02 09:20:00-08', 'SFO', 65); +INSERT INTO conditions VALUES ( '2018-01-02 09:10:00-08', 'NYC', 65); +INSERT INTO conditions VALUES ( '2018-11-01 09:20:00-08', 'NYC', 45); +INSERT INTO conditions VALUES ( '2018-11-01 10:40:00-08', 'NYC', 55); +INSERT INTO conditions VALUES ( '2018-11-01 11:50:00-08', 'NYC', 65); +INSERT INTO conditions VALUES ( '2018-11-01 12:10:00-08', 'NYC', 75); +INSERT INTO conditions VALUES ( '2018-11-01 13:10:00-08', 'NYC', 85); +INSERT INTO conditions VALUES ( '2018-11-02 09:20:00-08', 'NYC', 10); +INSERT INTO conditions VALUES ( '2018-11-02 10:30:00-08', 'NYC', 20); +CREATE MATERIALIZED VIEW conditions_daily +WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS +SELECT location, + time_bucket(INTERVAL '1 day', time) AS bucket, + AVG(temperature) + FROM conditions +GROUP BY location, bucket +WITH NO DATA; +SELECT format('%I.%I', '_timescaledb_internal', h.table_name) AS "MAT_TABLE_NAME", + format('%I.%I', '_timescaledb_internal', partial_view_name) AS "PART_VIEW_NAME", + format('%I.%I', '_timescaledb_internal', direct_view_name) AS "DIRECT_VIEW_NAME" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'conditions_daily' +\gset +-- Show both the columns and the view definitions to see that +-- references are correct in the view as well. +SELECT * FROM test.show_columns('conditions_daily'); + Column | Type | NotNull +----------+--------------------------+--------- + location | text | f + bucket | timestamp with time zone | f + avg | double precision | f +(3 rows) + +SELECT * FROM test.show_columns(:'DIRECT_VIEW_NAME'); + Column | Type | NotNull +----------+--------------------------+--------- + location | text | f + bucket | timestamp with time zone | f + avg | double precision | f +(3 rows) + +SELECT * FROM test.show_columns(:'PART_VIEW_NAME'); + Column | Type | NotNull +----------+--------------------------+--------- + location | text | f + bucket | timestamp with time zone | f + avg | double precision | f +(3 rows) + +SELECT * FROM test.show_columns(:'MAT_TABLE_NAME'); + Column | Type | NotNull +----------+--------------------------+--------- + location | text | f + bucket | timestamp with time zone | t + avg | double precision | f +(3 rows) + +ALTER MATERIALIZED VIEW conditions_daily RENAME COLUMN bucket to "time"; +-- Show both the columns and the view definitions to see that +-- references are correct in the view as well. +SELECT * FROM test.show_columns(' conditions_daily'); + Column | Type | NotNull +----------+--------------------------+--------- + location | text | f + time | timestamp with time zone | f + avg | double precision | f +(3 rows) + +SELECT * FROM test.show_columns(:'DIRECT_VIEW_NAME'); + Column | Type | NotNull +----------+--------------------------+--------- + location | text | f + time | timestamp with time zone | f + avg | double precision | f +(3 rows) + +SELECT * FROM test.show_columns(:'PART_VIEW_NAME'); + Column | Type | NotNull +----------+--------------------------+--------- + location | text | f + time | timestamp with time zone | f + avg | double precision | f +(3 rows) + +SELECT * FROM test.show_columns(:'MAT_TABLE_NAME'); + Column | Type | NotNull +----------+--------------------------+--------- + location | text | f + time | timestamp with time zone | t + avg | double precision | f +(3 rows) + +-- This will rebuild the materialized view and should succeed. +ALTER MATERIALIZED VIEW conditions_daily SET (timescaledb.materialized_only = false); +-- Refresh the continuous aggregate to check that it works after the +-- rename. +\set VERBOSITY verbose +CALL refresh_continuous_aggregate('conditions_daily', NULL, NULL); +\set VERBOSITY terse +-- +-- Indexes on continuous aggregate +-- +\set ON_ERROR_STOP 0 +-- unique indexes are not supported +CREATE UNIQUE INDEX index_unique_error ON conditions_daily ("time", location); +psql:include/cagg_ddl_common.sql:1084: ERROR: continuous aggregates do not support UNIQUE indexes +-- concurrently index creation not supported +CREATE INDEX CONCURRENTLY index_concurrently_avg ON conditions_daily (avg); +psql:include/cagg_ddl_common.sql:1086: ERROR: hypertables do not support concurrent index creation +\set ON_ERROR_STOP 1 +CREATE INDEX index_avg ON conditions_daily (avg); +CREATE INDEX index_avg_only ON ONLY conditions_daily (avg); +CREATE INDEX index_avg_include ON conditions_daily (avg) INCLUDE (location); +CREATE INDEX index_avg_expr ON conditions_daily ((avg + 1)); +CREATE INDEX index_avg_location_sfo ON conditions_daily (avg) WHERE location = 'SFO'; +CREATE INDEX index_avg_expr_location_sfo ON conditions_daily ((avg + 2)) WHERE location = 'SFO'; +SELECT * FROM test.show_indexespred(:'MAT_TABLE_NAME'); + Index | Columns | Expr | Pred | Unique | Primary | Exclusion | Tablespace +-----------------------------------------------------------------------+-------------------+---------------------------+------------------------+--------+---------+-----------+------------ + _timescaledb_internal._materialized_hypertable_35_bucket_idx | {bucket} | | | f | f | f | + _timescaledb_internal._materialized_hypertable_35_location_bucket_idx | {location,bucket} | | | f | f | f | + _timescaledb_internal.index_avg | {avg} | | | f | f | f | + _timescaledb_internal.index_avg_expr | {expr} | avg + 1::double precision | | f | f | f | + _timescaledb_internal.index_avg_expr_location_sfo | {expr} | avg + 2::double precision | location = 'SFO'::text | f | f | f | + _timescaledb_internal.index_avg_include | {avg,location} | | | f | f | f | + _timescaledb_internal.index_avg_location_sfo | {avg} | | location = 'SFO'::text | f | f | f | + _timescaledb_internal.index_avg_only | {avg} | | | f | f | f | +(8 rows) + +-- #3696 assertion failure when referencing columns not present in result +CREATE TABLE i3696(time timestamptz NOT NULL, search_query text, cnt integer, cnt2 integer); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('i3696', 'time', replication_factor => 2); + create_distributed_hypertable +------------------------------- + (36,public,i3696,t) +(1 row) + +\else +SELECT table_name FROM create_hypertable('i3696','time'); +\endif +CREATE MATERIALIZED VIEW i3696_cagg1 WITH (timescaledb.continuous, timescaledb.materialized_only=false) +AS + SELECT search_query,count(search_query) as count, sum(cnt), time_bucket(INTERVAL '1 minute', time) AS bucket + FROM i3696 GROUP BY cnt +cnt2 , bucket, search_query; +psql:include/cagg_ddl_common.sql:1108: NOTICE: continuous aggregate "i3696_cagg1" is already up-to-date +ALTER MATERIALIZED VIEW i3696_cagg1 SET (timescaledb.materialized_only = 'true'); +CREATE MATERIALIZED VIEW i3696_cagg2 WITH (timescaledb.continuous, timescaledb.materialized_only=false) +AS + SELECT search_query,count(search_query) as count, sum(cnt), time_bucket(INTERVAL '1 minute', time) AS bucket + FROM i3696 GROUP BY cnt + cnt2, bucket, search_query + HAVING cnt + cnt2 + sum(cnt) > 2 or count(cnt2) > 10; +psql:include/cagg_ddl_common.sql:1116: NOTICE: continuous aggregate "i3696_cagg2" is already up-to-date +ALTER MATERIALIZED VIEW i3696_cagg2 SET (timescaledb.materialized_only = 'true'); +--TEST test with multiple settings on continuous aggregates -- +-- test for materialized_only + compress combinations (real time aggs enabled initially) +CREATE TABLE test_setting(time timestamptz not null, val numeric); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('test_setting', 'time', replication_factor => 2); + create_distributed_hypertable +------------------------------- + (39,public,test_setting,t) +(1 row) + +\else +SELECT create_hypertable('test_setting', 'time'); +\endif +CREATE MATERIALIZED VIEW test_setting_cagg with (timescaledb.continuous, timescaledb.materialized_only=false) +AS SELECT time_bucket('1h',time), avg(val), count(*) FROM test_setting GROUP BY 1; +psql:include/cagg_ddl_common.sql:1130: NOTICE: continuous aggregate "test_setting_cagg" is already up-to-date +INSERT INTO test_setting +SELECT generate_series( '2020-01-10 8:00'::timestamp, '2020-01-30 10:00+00'::timestamptz, '1 day'::interval), 10.0; +CALL refresh_continuous_aggregate('test_setting_cagg', NULL, '2020-05-30 10:00+00'::timestamptz); +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 20 +(1 row) + +--this row is not in the materialized result --- +INSERT INTO test_setting VALUES( '2020-11-01', 20); +--try out 2 settings here -- +ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='true'); +psql:include/cagg_ddl_common.sql:1141: NOTICE: defaulting compress_orderby to time_bucket +SELECT view_name, compression_enabled, materialized_only +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_setting_cagg'; + view_name | compression_enabled | materialized_only +-------------------+---------------------+------------------- + test_setting_cagg | t | t +(1 row) + +--real time aggs is off now , should return 20 -- +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 20 +(1 row) + +--now set it back to false -- +ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='true'); +psql:include/cagg_ddl_common.sql:1149: NOTICE: defaulting compress_orderby to time_bucket +SELECT view_name, compression_enabled, materialized_only +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_setting_cagg'; + view_name | compression_enabled | materialized_only +-------------------+---------------------+------------------- + test_setting_cagg | t | f +(1 row) + +--count should return additional data since we have real time aggs on +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 21 +(1 row) + +ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='false'); +SELECT view_name, compression_enabled, materialized_only +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_setting_cagg'; + view_name | compression_enabled | materialized_only +-------------------+---------------------+------------------- + test_setting_cagg | f | t +(1 row) + +--real time aggs is off now , should return 20 -- +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 20 +(1 row) + +ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='false'); +SELECT view_name, compression_enabled, materialized_only +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_setting_cagg'; + view_name | compression_enabled | materialized_only +-------------------+---------------------+------------------- + test_setting_cagg | f | f +(1 row) + +--count should return additional data since we have real time aggs on +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 21 +(1 row) + +DELETE FROM test_setting WHERE val = 20; +--TEST test with multiple settings on continuous aggregates with real time aggregates turned off initially -- +-- test for materialized_only + compress combinations (real time aggs enabled initially) +DROP MATERIALIZED VIEW test_setting_cagg; +psql:include/cagg_ddl_common.sql:1174: NOTICE: drop cascades to table _timescaledb_internal._hyper_40_47_chunk +CREATE MATERIALIZED VIEW test_setting_cagg with (timescaledb.continuous, timescaledb.materialized_only = true) +AS SELECT time_bucket('1h',time), avg(val), count(*) FROM test_setting GROUP BY 1; +psql:include/cagg_ddl_common.sql:1177: NOTICE: refreshing continuous aggregate "test_setting_cagg" +CALL refresh_continuous_aggregate('test_setting_cagg', NULL, '2020-05-30 10:00+00'::timestamptz); +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 20 +(1 row) + +--this row is not in the materialized result --- +INSERT INTO test_setting VALUES( '2020-11-01', 20); +--try out 2 settings here -- +ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='true'); +psql:include/cagg_ddl_common.sql:1185: NOTICE: defaulting compress_orderby to time_bucket +SELECT view_name, compression_enabled, materialized_only +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_setting_cagg'; + view_name | compression_enabled | materialized_only +-------------------+---------------------+------------------- + test_setting_cagg | t | f +(1 row) + +--count should return additional data since we have real time aggs on +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 21 +(1 row) + +--now set it back to false -- +ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='true'); +psql:include/cagg_ddl_common.sql:1193: NOTICE: defaulting compress_orderby to time_bucket +SELECT view_name, compression_enabled, materialized_only +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_setting_cagg'; + view_name | compression_enabled | materialized_only +-------------------+---------------------+------------------- + test_setting_cagg | t | t +(1 row) + +--real time aggs is off now , should return 20 -- +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 20 +(1 row) + +ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='false'); +SELECT view_name, compression_enabled, materialized_only +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_setting_cagg'; + view_name | compression_enabled | materialized_only +-------------------+---------------------+------------------- + test_setting_cagg | f | f +(1 row) + +--count should return additional data since we have real time aggs on +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 21 +(1 row) + +ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='false'); +SELECT view_name, compression_enabled, materialized_only +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_setting_cagg'; + view_name | compression_enabled | materialized_only +-------------------+---------------------+------------------- + test_setting_cagg | f | t +(1 row) + +--real time aggs is off now , should return 20 -- +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 20 +(1 row) + +-- END TEST with multiple settings +-- Test View Target Entries that contain both aggrefs and Vars in the same expression +CREATE TABLE transactions +( + "time" timestamp with time zone NOT NULL, + dummy1 integer, + dummy2 integer, + dummy3 integer, + dummy4 integer, + dummy5 integer, + amount integer, + fiat_value integer +); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('transactions', 'time', replication_factor => 2); + create_distributed_hypertable +------------------------------- + (46,public,transactions,t) +(1 row) + +\else +SELECT create_hypertable('transactions', 'time'); +\endif +INSERT INTO transactions VALUES ( '2018-01-01 09:20:00-08', 0, 0, 0, 0, 0, 1, 10); +INSERT INTO transactions VALUES ( '2018-01-02 09:30:00-08', 0, 0, 0, 0, 0, -1, 10); +INSERT INTO transactions VALUES ( '2018-01-02 09:20:00-08', 0, 0, 0, 0, 0, -1, 10); +INSERT INTO transactions VALUES ( '2018-01-02 09:10:00-08', 0, 0, 0, 0, 0, -1, 10); +INSERT INTO transactions VALUES ( '2018-11-01 09:20:00-08', 0, 0, 0, 0, 0, 1, 10); +INSERT INTO transactions VALUES ( '2018-11-01 10:40:00-08', 0, 0, 0, 0, 0, 1, 10); +INSERT INTO transactions VALUES ( '2018-11-01 11:50:00-08', 0, 0, 0, 0, 0, 1, 10); +INSERT INTO transactions VALUES ( '2018-11-01 12:10:00-08', 0, 0, 0, 0, 0, -1, 10); +INSERT INTO transactions VALUES ( '2018-11-01 13:10:00-08', 0, 0, 0, 0, 0, -1, 10); +INSERT INTO transactions VALUES ( '2018-11-02 09:20:00-08', 0, 0, 0, 0, 0, 1, 10); +INSERT INTO transactions VALUES ( '2018-11-02 10:30:00-08', 0, 0, 0, 0, 0, -1, 10); +CREATE materialized view cashflows( + bucket, + amount, + cashflow, + cashflow2 +) WITH ( + timescaledb.continuous, + timescaledb.materialized_only = true +) AS +SELECT time_bucket ('1 day', time) AS bucket, + amount, + CASE + WHEN amount < 0 THEN (0 - sum(fiat_value)) + ELSE sum(fiat_value) + END AS cashflow, + amount + sum(fiat_value) +FROM transactions +GROUP BY bucket, amount; +psql:include/cagg_ddl_common.sql:1267: NOTICE: refreshing continuous aggregate "cashflows" +SELECT h.table_name AS "MAT_TABLE_NAME", + partial_view_name AS "PART_VIEW_NAME", + direct_view_name AS "DIRECT_VIEW_NAME" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'cashflows' +\gset +-- Show both the columns and the view definitions to see that +-- references are correct in the view as well. +\d+ "_timescaledb_internal".:"DIRECT_VIEW_NAME" + View "_timescaledb_internal._direct_view_47" + Column | Type | Collation | Nullable | Default | Storage | Description +-----------+--------------------------+-----------+----------+---------+---------+------------- + bucket | timestamp with time zone | | | | plain | + amount | integer | | | | plain | + cashflow | bigint | | | | plain | + cashflow2 | bigint | | | | plain | +View definition: + SELECT time_bucket('@ 1 day'::interval, transactions."time") AS bucket, + transactions.amount, + CASE + WHEN transactions.amount < 0 THEN 0 - sum(transactions.fiat_value) + ELSE sum(transactions.fiat_value) + END AS cashflow, + transactions.amount + sum(transactions.fiat_value) AS cashflow2 + FROM transactions + GROUP BY (time_bucket('@ 1 day'::interval, transactions."time")), transactions.amount; + +\d+ "_timescaledb_internal".:"PART_VIEW_NAME" + View "_timescaledb_internal._partial_view_47" + Column | Type | Collation | Nullable | Default | Storage | Description +-----------+--------------------------+-----------+----------+---------+---------+------------- + bucket | timestamp with time zone | | | | plain | + amount | integer | | | | plain | + cashflow | bigint | | | | plain | + cashflow2 | bigint | | | | plain | +View definition: + SELECT time_bucket('@ 1 day'::interval, transactions."time") AS bucket, + transactions.amount, + CASE + WHEN transactions.amount < 0 THEN 0 - sum(transactions.fiat_value) + ELSE sum(transactions.fiat_value) + END AS cashflow, + transactions.amount + sum(transactions.fiat_value) AS cashflow2 + FROM transactions + GROUP BY (time_bucket('@ 1 day'::interval, transactions."time")), transactions.amount; + +\d+ "_timescaledb_internal".:"MAT_TABLE_NAME" + Table "_timescaledb_internal._materialized_hypertable_47" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +-----------+--------------------------+-----------+----------+---------+---------+--------------+------------- + bucket | timestamp with time zone | | not null | | plain | | + amount | integer | | | | plain | | + cashflow | bigint | | | | plain | | + cashflow2 | bigint | | | | plain | | +Indexes: + "_materialized_hypertable_47_amount_bucket_idx" btree (amount, bucket DESC) + "_materialized_hypertable_47_bucket_idx" btree (bucket DESC) +Triggers: + ts_insert_blocker BEFORE INSERT ON _timescaledb_internal._materialized_hypertable_47 FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker() +Child tables: _timescaledb_internal._hyper_47_52_chunk, + _timescaledb_internal._hyper_47_53_chunk + +\d+ 'cashflows' + View "public.cashflows" + Column | Type | Collation | Nullable | Default | Storage | Description +-----------+--------------------------+-----------+----------+---------+---------+------------- + bucket | timestamp with time zone | | | | plain | + amount | integer | | | | plain | + cashflow | bigint | | | | plain | + cashflow2 | bigint | | | | plain | +View definition: + SELECT _materialized_hypertable_47.bucket, + _materialized_hypertable_47.amount, + _materialized_hypertable_47.cashflow, + _materialized_hypertable_47.cashflow2 + FROM _timescaledb_internal._materialized_hypertable_47; + +SELECT * FROM cashflows; + bucket | amount | cashflow | cashflow2 +------------------------------+--------+----------+----------- + Sun Dec 31 16:00:00 2017 PST | 1 | 10 | 11 + Mon Jan 01 16:00:00 2018 PST | -1 | -30 | 29 + Wed Oct 31 17:00:00 2018 PDT | -1 | -20 | 19 + Wed Oct 31 17:00:00 2018 PDT | 1 | 30 | 31 + Thu Nov 01 17:00:00 2018 PDT | -1 | -10 | 9 + Thu Nov 01 17:00:00 2018 PDT | 1 | 10 | 11 +(6 rows) + +-- test cagg creation with named arguments in time_bucket +-- note that positional arguments cannot follow named arguments +-- 1. test named origin +-- 2. test named timezone +-- 3. test named ts +-- 4. test named bucket width +-- named origin +CREATE MATERIALIZED VIEW cagg_named_origin WITH +(timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket('1h', time, 'UTC', origin => '2001-01-03 01:23:45') AS bucket, +avg(amount) as avg_amount +FROM transactions GROUP BY 1 WITH NO DATA; +-- named timezone +CREATE MATERIALIZED VIEW cagg_named_tz_origin WITH +(timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket('1h', time, timezone => 'UTC', origin => '2001-01-03 01:23:45') AS bucket, +avg(amount) as avg_amount +FROM transactions GROUP BY 1 WITH NO DATA; +-- named ts +CREATE MATERIALIZED VIEW cagg_named_ts_tz_origin WITH +(timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket('1h', ts => time, timezone => 'UTC', origin => '2001-01-03 01:23:45') AS bucket, +avg(amount) as avg_amount +FROM transactions GROUP BY 1 WITH NO DATA; +-- named bucket width +CREATE MATERIALIZED VIEW cagg_named_all WITH +(timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(bucket_width => '1h', ts => time, timezone => 'UTC', origin => '2001-01-03 01:23:45') AS bucket, +avg(amount) as avg_amount +FROM transactions GROUP BY 1 WITH NO DATA; +-- Refreshing from the beginning (NULL) of a CAGG with variable time bucket and +-- using an INTERVAL for the end timestamp (issue #5534) +CREATE MATERIALIZED VIEW transactions_montly +WITH (timescaledb.continuous, timescaledb.materialized_only = true) AS +SELECT time_bucket(INTERVAL '1 month', time) AS bucket, + SUM(fiat_value), + MAX(fiat_value), + MIN(fiat_value) + FROM transactions +GROUP BY 1 +WITH NO DATA; +-- No rows +SELECT * FROM transactions_montly ORDER BY bucket; + bucket | sum | max | min +--------+-----+-----+----- +(0 rows) + +-- Refresh from beginning of the CAGG for 1 month +CALL refresh_continuous_aggregate('transactions_montly', NULL, INTERVAL '1 month'); +SELECT * FROM transactions_montly ORDER BY bucket; + bucket | sum | max | min +------------------------------+-----+-----+----- + Sun Dec 31 16:00:00 2017 PST | 40 | 10 | 10 + Wed Oct 31 17:00:00 2018 PDT | 70 | 10 | 10 +(2 rows) + +TRUNCATE transactions_montly; +-- Partial refresh the CAGG from beginning to an specific timestamp +CALL refresh_continuous_aggregate('transactions_montly', NULL, '2018-11-01 11:50:00-08'::timestamptz); +SELECT * FROM transactions_montly ORDER BY bucket; + bucket | sum | max | min +------------------------------+-----+-----+----- + Sun Dec 31 16:00:00 2017 PST | 40 | 10 | 10 +(1 row) + +-- Full refresh the CAGG +CALL refresh_continuous_aggregate('transactions_montly', NULL, NULL); +SELECT * FROM transactions_montly ORDER BY bucket; + bucket | sum | max | min +------------------------------+-----+-----+----- + Sun Dec 31 16:00:00 2017 PST | 40 | 10 | 10 + Wed Oct 31 17:00:00 2018 PDT | 70 | 10 | 10 +(2 rows) + +-- Check set_chunk_time_interval on continuous aggregate +CREATE MATERIALIZED VIEW cagg_set_chunk_time_interval +WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(INTERVAL '1 month', time) AS bucket, + SUM(fiat_value), + MAX(fiat_value), + MIN(fiat_value) +FROM transactions +GROUP BY 1 +WITH NO DATA; +SELECT set_chunk_time_interval('cagg_set_chunk_time_interval', chunk_time_interval => interval '1 month'); + set_chunk_time_interval +------------------------- + +(1 row) + +CALL refresh_continuous_aggregate('cagg_set_chunk_time_interval', NULL, NULL); +SELECT _timescaledb_functions.to_interval(d.interval_length) = interval '1 month' +FROM _timescaledb_catalog.dimension d + RIGHT JOIN _timescaledb_catalog.continuous_agg ca ON ca.user_view_name = 'cagg_set_chunk_time_interval' +WHERE d.hypertable_id = ca.mat_hypertable_id; + ?column? +---------- + t +(1 row) + +-- Since #6077 CAggs are materialized only by default +DROP TABLE conditions CASCADE; +psql:include/cagg_ddl_common.sql:1365: NOTICE: drop cascades to 3 other objects +psql:include/cagg_ddl_common.sql:1365: NOTICE: drop cascades to 2 other objects +CREATE TABLE conditions ( + time TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL +); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('conditions', 'time', replication_factor => 2); + create_distributed_hypertable +------------------------------- + (54,public,conditions,t) +(1 row) + +\else +SELECT create_hypertable('conditions', 'time'); +\endif +INSERT INTO conditions VALUES ( '2018-01-01 09:20:00-08', 'SFO', 55); +INSERT INTO conditions VALUES ( '2018-01-02 09:30:00-08', 'por', 100); +INSERT INTO conditions VALUES ( '2018-01-02 09:20:00-08', 'SFO', 65); +INSERT INTO conditions VALUES ( '2018-01-02 09:10:00-08', 'NYC', 65); +INSERT INTO conditions VALUES ( '2018-11-01 09:20:00-08', 'NYC', 45); +INSERT INTO conditions VALUES ( '2018-11-01 10:40:00-08', 'NYC', 55); +INSERT INTO conditions VALUES ( '2018-11-01 11:50:00-08', 'NYC', 65); +INSERT INTO conditions VALUES ( '2018-11-01 12:10:00-08', 'NYC', 75); +INSERT INTO conditions VALUES ( '2018-11-01 13:10:00-08', 'NYC', 85); +INSERT INTO conditions VALUES ( '2018-11-02 09:20:00-08', 'NYC', 10); +INSERT INTO conditions VALUES ( '2018-11-02 10:30:00-08', 'NYC', 20); +CREATE MATERIALIZED VIEW conditions_daily +WITH (timescaledb.continuous) AS +SELECT location, + time_bucket(INTERVAL '1 day', time) AS bucket, + AVG(temperature) + FROM conditions +GROUP BY location, bucket +WITH NO DATA; +\d+ conditions_daily + View "public.conditions_daily" + Column | Type | Collation | Nullable | Default | Storage | Description +----------+--------------------------+-----------+----------+---------+----------+------------- + location | text | | | | extended | + bucket | timestamp with time zone | | | | plain | + avg | double precision | | | | plain | +View definition: + SELECT _materialized_hypertable_55.location, + _materialized_hypertable_55.bucket, + _materialized_hypertable_55.avg + FROM _timescaledb_internal._materialized_hypertable_55; + +-- Should return NO ROWS +SELECT * FROM conditions_daily ORDER BY bucket, avg; + location | bucket | avg +----------+--------+----- +(0 rows) + +ALTER MATERIALIZED VIEW conditions_daily SET (timescaledb.materialized_only=false); +\d+ conditions_daily + View "public.conditions_daily" + Column | Type | Collation | Nullable | Default | Storage | Description +----------+--------------------------+-----------+----------+---------+----------+------------- + location | text | | | | extended | + bucket | timestamp with time zone | | | | plain | + avg | double precision | | | | plain | +View definition: + SELECT _materialized_hypertable_55.location, + _materialized_hypertable_55.bucket, + _materialized_hypertable_55.avg + FROM _timescaledb_internal._materialized_hypertable_55 + WHERE _materialized_hypertable_55.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(55)), '-infinity'::timestamp with time zone) +UNION ALL + SELECT conditions.location, + time_bucket('@ 1 day'::interval, conditions."time") AS bucket, + avg(conditions.temperature) AS avg + FROM conditions + WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(55)), '-infinity'::timestamp with time zone) + GROUP BY conditions.location, (time_bucket('@ 1 day'::interval, conditions."time")); + +-- Should return ROWS because now it is realtime +SELECT * FROM conditions_daily ORDER BY bucket, avg; + location | bucket | avg +----------+------------------------------+----- + SFO | Sun Dec 31 16:00:00 2017 PST | 55 + NYC | Mon Jan 01 16:00:00 2018 PST | 65 + SFO | Mon Jan 01 16:00:00 2018 PST | 65 + por | Mon Jan 01 16:00:00 2018 PST | 100 + NYC | Wed Oct 31 17:00:00 2018 PDT | 65 + NYC | Thu Nov 01 17:00:00 2018 PDT | 15 +(6 rows) + +-- Should return ROWS because we refreshed it +ALTER MATERIALIZED VIEW conditions_daily SET (timescaledb.materialized_only=true); +\d+ conditions_daily + View "public.conditions_daily" + Column | Type | Collation | Nullable | Default | Storage | Description +----------+--------------------------+-----------+----------+---------+----------+------------- + location | text | | | | extended | + bucket | timestamp with time zone | | | | plain | + avg | double precision | | | | plain | +View definition: + SELECT _materialized_hypertable_55.location, + _materialized_hypertable_55.bucket, + _materialized_hypertable_55.avg + FROM _timescaledb_internal._materialized_hypertable_55; + +CALL refresh_continuous_aggregate('conditions_daily', NULL, NULL); +SELECT * FROM conditions_daily ORDER BY bucket, avg; + location | bucket | avg +----------+------------------------------+----- + SFO | Sun Dec 31 16:00:00 2017 PST | 55 + SFO | Mon Jan 01 16:00:00 2018 PST | 65 + NYC | Mon Jan 01 16:00:00 2018 PST | 65 + por | Mon Jan 01 16:00:00 2018 PST | 100 + NYC | Wed Oct 31 17:00:00 2018 PDT | 65 + NYC | Thu Nov 01 17:00:00 2018 PDT | 15 +(6 rows) + +-- cleanup +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; +DROP DATABASE :DATA_NODE_1 WITH (FORCE); +DROP DATABASE :DATA_NODE_2 WITH (FORCE); +DROP DATABASE :DATA_NODE_3 WITH (FORCE); diff --git a/tsl/test/expected/cagg_ddl_dist_ht-15.out b/tsl/test/expected/cagg_ddl_dist_ht-15.out new file mode 100644 index 00000000000..8c4e1394a03 --- /dev/null +++ b/tsl/test/expected/cagg_ddl_dist_ht-15.out @@ -0,0 +1,2207 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +------------------------------------ +-- Set up a distributed environment +------------------------------------ +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER +\set DATA_NODE_1 :TEST_DBNAME _1 +\set DATA_NODE_2 :TEST_DBNAME _2 +\set DATA_NODE_3 :TEST_DBNAME _3 +\ir include/remote_exec.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +CREATE SCHEMA IF NOT EXISTS test; +psql:include/remote_exec.sql:5: NOTICE: schema "test" already exists, skipping +GRANT USAGE ON SCHEMA test TO PUBLIC; +CREATE OR REPLACE FUNCTION test.remote_exec(srv_name name[], command text) +RETURNS VOID +AS :TSL_MODULE_PATHNAME, 'ts_remote_exec' +LANGUAGE C; +CREATE OR REPLACE FUNCTION test.remote_exec_get_result_strings(srv_name name[], command text) +RETURNS TABLE("table_record" CSTRING[]) +AS :TSL_MODULE_PATHNAME, 'ts_remote_exec_get_result_strings' +LANGUAGE C; +SELECT node_name, database, node_created, database_created, extension_created +FROM ( + SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* + FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) +) a; + node_name | database | node_created | database_created | extension_created +-----------------------+-----------------------+--------------+------------------+------------------- + db_cagg_ddl_dist_ht_1 | db_cagg_ddl_dist_ht_1 | t | t | t + db_cagg_ddl_dist_ht_2 | db_cagg_ddl_dist_ht_2 | t | t | t + db_cagg_ddl_dist_ht_3 | db_cagg_ddl_dist_ht_3 | t | t | t +(3 rows) + +GRANT USAGE ON FOREIGN SERVER :DATA_NODE_1, :DATA_NODE_2, :DATA_NODE_3 TO PUBLIC; +-- though user on access node has required GRANTS, this will propagate GRANTS to the connected data nodes +GRANT CREATE ON SCHEMA public TO :ROLE_DEFAULT_PERM_USER; +\set IS_DISTRIBUTED TRUE +\ir include/cagg_ddl_common.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- Set this variable to avoid using a hard-coded path each time query +-- results are compared +\set QUERY_RESULT_TEST_EQUAL_RELPATH '../../../../test/sql/include/query_result_test_equal.sql' +\if :IS_DISTRIBUTED +\echo 'Running distributed hypertable tests' +Running distributed hypertable tests +\else +\echo 'Running local hypertable tests' +\endif +SET ROLE :ROLE_DEFAULT_PERM_USER; +--DDL commands on continuous aggregates +CREATE TABLE conditions ( + timec TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + temperature integer NULL, + humidity DOUBLE PRECISION NULL, + timemeasure TIMESTAMPTZ, + timeinterval INTERVAL +); +\if :IS_DISTRIBUTED +SELECT table_name FROM create_distributed_hypertable('conditions', 'timec', replication_factor => 2); + table_name +------------ + conditions +(1 row) + +\else +SELECT table_name FROM create_hypertable('conditions', 'timec'); +\endif +-- schema tests +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER +CREATE TABLESPACE tablespace1 OWNER :ROLE_DEFAULT_PERM_USER LOCATION :TEST_TABLESPACE1_PATH; +CREATE TABLESPACE tablespace2 OWNER :ROLE_DEFAULT_PERM_USER LOCATION :TEST_TABLESPACE2_PATH; +CREATE SCHEMA rename_schema; +GRANT ALL ON SCHEMA rename_schema TO :ROLE_DEFAULT_PERM_USER; +SET ROLE :ROLE_DEFAULT_PERM_USER; +CREATE TABLE foo(time TIMESTAMPTZ NOT NULL, data INTEGER); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('foo', 'time', replication_factor => 2); + create_distributed_hypertable +------------------------------- + (2,public,foo,t) +(1 row) + +\else +SELECT create_hypertable('foo', 'time'); +\endif +CREATE MATERIALIZED VIEW rename_test + WITH ( timescaledb.continuous, timescaledb.materialized_only=true) +AS SELECT time_bucket('1week', time), COUNT(data) + FROM foo + GROUP BY 1 WITH NO DATA; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name +------------------+----------------+-----------------------+------------------- + public | rename_test | _timescaledb_internal | _partial_view_3 +(1 row) + +ALTER MATERIALIZED VIEW rename_test SET SCHEMA rename_schema; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name +------------------+----------------+-----------------------+------------------- + rename_schema | rename_test | _timescaledb_internal | _partial_view_3 +(1 row) + +SELECT ca.raw_hypertable_id as "RAW_HYPERTABLE_ID", + h.schema_name AS "MAT_SCHEMA_NAME", + h.table_name AS "MAT_TABLE_NAME", + partial_view_name as "PART_VIEW_NAME", + partial_view_schema as "PART_VIEW_SCHEMA", + direct_view_name as "DIR_VIEW_NAME", + direct_view_schema as "DIR_VIEW_SCHEMA" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'rename_test' +\gset +RESET ROLE; +SELECT current_user; + current_user +-------------------- + cluster_super_user +(1 row) + +ALTER VIEW :"PART_VIEW_SCHEMA".:"PART_VIEW_NAME" SET SCHEMA public; +SET ROLE :ROLE_DEFAULT_PERM_USER; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name +------------------+----------------+---------------------+------------------- + rename_schema | rename_test | public | _partial_view_3 +(1 row) + +--alter direct view schema +SELECT user_view_schema, user_view_name, direct_view_schema, direct_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | direct_view_schema | direct_view_name +------------------+----------------+-----------------------+------------------ + rename_schema | rename_test | _timescaledb_internal | _direct_view_3 +(1 row) + +RESET ROLE; +SELECT current_user; + current_user +-------------------- + cluster_super_user +(1 row) + +ALTER VIEW :"DIR_VIEW_SCHEMA".:"DIR_VIEW_NAME" SET SCHEMA public; +SET ROLE :ROLE_DEFAULT_PERM_USER; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name, + direct_view_schema, direct_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name | direct_view_schema | direct_view_name +------------------+----------------+---------------------+-------------------+--------------------+------------------ + rename_schema | rename_test | public | _partial_view_3 | public | _direct_view_3 +(1 row) + +RESET ROLE; +SELECT current_user; + current_user +-------------------- + cluster_super_user +(1 row) + +ALTER SCHEMA rename_schema RENAME TO new_name_schema; +SET ROLE :ROLE_DEFAULT_PERM_USER; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name, + direct_view_schema, direct_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name | direct_view_schema | direct_view_name +------------------+----------------+---------------------+-------------------+--------------------+------------------ + new_name_schema | rename_test | public | _partial_view_3 | public | _direct_view_3 +(1 row) + +ALTER VIEW :"PART_VIEW_NAME" SET SCHEMA new_name_schema; +ALTER VIEW :"DIR_VIEW_NAME" SET SCHEMA new_name_schema; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name, + direct_view_schema, direct_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name | direct_view_schema | direct_view_name +------------------+----------------+---------------------+-------------------+--------------------+------------------ + new_name_schema | rename_test | new_name_schema | _partial_view_3 | new_name_schema | _direct_view_3 +(1 row) + +RESET ROLE; +SELECT current_user; + current_user +-------------------- + cluster_super_user +(1 row) + +ALTER SCHEMA new_name_schema RENAME TO foo_name_schema; +SET ROLE :ROLE_DEFAULT_PERM_USER; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name +------------------+----------------+---------------------+------------------- + foo_name_schema | rename_test | foo_name_schema | _partial_view_3 +(1 row) + +ALTER MATERIALIZED VIEW foo_name_schema.rename_test SET SCHEMA public; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name +------------------+----------------+---------------------+------------------- + public | rename_test | foo_name_schema | _partial_view_3 +(1 row) + +RESET ROLE; +SELECT current_user; + current_user +-------------------- + cluster_super_user +(1 row) + +ALTER SCHEMA foo_name_schema RENAME TO rename_schema; +SET ROLE :ROLE_DEFAULT_PERM_USER; +SET client_min_messages TO NOTICE; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name +------------------+----------------+---------------------+------------------- + public | rename_test | rename_schema | _partial_view_3 +(1 row) + +ALTER MATERIALIZED VIEW rename_test RENAME TO rename_c_aggregate; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name +------------------+--------------------+---------------------+------------------- + public | rename_c_aggregate | rename_schema | _partial_view_3 +(1 row) + +SELECT * FROM rename_c_aggregate; + time_bucket | count +-------------+------- +(0 rows) + +ALTER VIEW rename_schema.:"PART_VIEW_NAME" RENAME TO partial_view; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name, + direct_view_schema, direct_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name | direct_view_schema | direct_view_name +------------------+--------------------+---------------------+-------------------+--------------------+------------------ + public | rename_c_aggregate | rename_schema | partial_view | rename_schema | _direct_view_3 +(1 row) + +--rename direct view +ALTER VIEW rename_schema.:"DIR_VIEW_NAME" RENAME TO direct_view; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name, + direct_view_schema, direct_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name | direct_view_schema | direct_view_name +------------------+--------------------+---------------------+-------------------+--------------------+------------------ + public | rename_c_aggregate | rename_schema | partial_view | rename_schema | direct_view +(1 row) + +-- drop_chunks tests +DROP TABLE conditions CASCADE; +DROP TABLE foo CASCADE; +psql:include/cagg_ddl_common.sql:161: NOTICE: drop cascades to 2 other objects +CREATE TABLE drop_chunks_table(time BIGINT NOT NULL, data INTEGER); +\if :IS_DISTRIBUTED +SELECT hypertable_id AS drop_chunks_table_id + FROM create_distributed_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10, replication_factor => 2) \gset +\else +SELECT hypertable_id AS drop_chunks_table_id + FROM create_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10) \gset +\endif +CREATE OR REPLACE FUNCTION integer_now_test() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table $$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ +CREATE OR REPLACE FUNCTION integer_now_test() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table $$; +$DIST$); +\endif +SELECT set_integer_now_func('drop_chunks_table', 'integer_now_test'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE MATERIALIZED VIEW drop_chunks_view + WITH ( + timescaledb.continuous, + timescaledb.materialized_only=true + ) +AS SELECT time_bucket('5', time), COUNT(data) + FROM drop_chunks_table + GROUP BY 1 WITH NO DATA; +SELECT format('%I.%I', schema_name, table_name) AS drop_chunks_mat_table, + schema_name AS drop_chunks_mat_schema, + table_name AS drop_chunks_mat_table_name + FROM _timescaledb_catalog.hypertable, _timescaledb_catalog.continuous_agg + WHERE _timescaledb_catalog.continuous_agg.raw_hypertable_id = :drop_chunks_table_id + AND _timescaledb_catalog.hypertable.id = _timescaledb_catalog.continuous_agg.mat_hypertable_id \gset +-- create 3 chunks, with 3 time bucket +INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(0, 29) AS i; +-- Only refresh up to bucket 15 initially. Matches the old refresh +-- behavior that didn't materialize everything +CALL refresh_continuous_aggregate('drop_chunks_view', 0, 15); +SELECT count(c) FROM show_chunks('drop_chunks_table') AS c; + count +------- + 3 +(1 row) + +SELECT count(c) FROM show_chunks('drop_chunks_view') AS c; + count +------- + 1 +(1 row) + +SELECT * FROM drop_chunks_view ORDER BY 1; + time_bucket | count +-------------+------- + 0 | 5 + 5 | 5 + 10 | 5 +(3 rows) + +-- cannot drop directly from the materialization table without specifying +-- cont. aggregate view name explicitly +\set ON_ERROR_STOP 0 +SELECT drop_chunks(:'drop_chunks_mat_table', + newer_than => -20, + verbose => true); +psql:include/cagg_ddl_common.sql:213: ERROR: operation not supported on materialized hypertable +\set ON_ERROR_STOP 1 +SELECT count(c) FROM show_chunks('drop_chunks_table') AS c; + count +------- + 3 +(1 row) + +SELECT count(c) FROM show_chunks('drop_chunks_view') AS c; + count +------- + 1 +(1 row) + +SELECT * FROM drop_chunks_view ORDER BY 1; + time_bucket | count +-------------+------- + 0 | 5 + 5 | 5 + 10 | 5 +(3 rows) + +-- drop chunks when the chunksize and time_bucket aren't aligned +DROP TABLE drop_chunks_table CASCADE; +psql:include/cagg_ddl_common.sql:222: NOTICE: drop cascades to 2 other objects +psql:include/cagg_ddl_common.sql:222: NOTICE: drop cascades to table _timescaledb_internal._hyper_5_4_chunk +CREATE TABLE drop_chunks_table_u(time BIGINT NOT NULL, data INTEGER); +\if :IS_DISTRIBUTED +SELECT hypertable_id AS drop_chunks_table_u_id + FROM create_distributed_hypertable('drop_chunks_table_u', 'time', chunk_time_interval => 7, replication_factor => 2) \gset +\else +SELECT hypertable_id AS drop_chunks_table_u_id + FROM create_hypertable('drop_chunks_table_u', 'time', chunk_time_interval => 7) \gset +\endif +CREATE OR REPLACE FUNCTION integer_now_test1() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table_u $$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ +CREATE OR REPLACE FUNCTION integer_now_test1() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table_u $$; +$DIST$); +\endif +SELECT set_integer_now_func('drop_chunks_table_u', 'integer_now_test1'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE MATERIALIZED VIEW drop_chunks_view + WITH ( + timescaledb.continuous, + timescaledb.materialized_only=true + ) +AS SELECT time_bucket('3', time), COUNT(data) + FROM drop_chunks_table_u + GROUP BY 1 WITH NO DATA; +SELECT format('%I.%I', schema_name, table_name) AS drop_chunks_mat_table_u, + schema_name AS drop_chunks_mat_schema, + table_name AS drop_chunks_mat_table_u_name + FROM _timescaledb_catalog.hypertable, _timescaledb_catalog.continuous_agg + WHERE _timescaledb_catalog.continuous_agg.raw_hypertable_id = :drop_chunks_table_u_id + AND _timescaledb_catalog.hypertable.id = _timescaledb_catalog.continuous_agg.mat_hypertable_id \gset +-- create 3 chunks, with 3 time bucket +INSERT INTO drop_chunks_table_u SELECT i, i FROM generate_series(0, 21) AS i; +-- Refresh up to bucket 15 to match old materializer behavior +CALL refresh_continuous_aggregate('drop_chunks_view', 0, 15); +SELECT count(c) FROM show_chunks('drop_chunks_table_u') AS c; + count +------- + 4 +(1 row) + +SELECT count(c) FROM show_chunks('drop_chunks_view') AS c; + count +------- + 1 +(1 row) + +SELECT * FROM drop_chunks_view ORDER BY 1; + time_bucket | count +-------------+------- + 0 | 3 + 3 | 3 + 6 | 3 + 9 | 3 + 12 | 3 +(5 rows) + +-- TRUNCATE test +-- Can truncate regular hypertables that have caggs +TRUNCATE drop_chunks_table_u; +\set ON_ERROR_STOP 0 +-- Can't truncate materialized hypertables directly +TRUNCATE :drop_chunks_mat_table_u; +psql:include/cagg_ddl_common.sql:271: ERROR: cannot TRUNCATE a hypertable underlying a continuous aggregate +\set ON_ERROR_STOP 1 +-- Check that we don't interfere with TRUNCATE of normal table and +-- partitioned table +CREATE TABLE truncate (value int); +INSERT INTO truncate VALUES (1), (2); +TRUNCATE truncate; +SELECT * FROM truncate; + value +------- +(0 rows) + +CREATE TABLE truncate_partitioned (value int) + PARTITION BY RANGE(value); +CREATE TABLE truncate_p1 PARTITION OF truncate_partitioned + FOR VALUES FROM (1) TO (3); +INSERT INTO truncate_partitioned VALUES (1), (2); +TRUNCATE truncate_partitioned; +SELECT * FROM truncate_partitioned; + value +------- +(0 rows) + +-- ALTER TABLE tests +\set ON_ERROR_STOP 0 +-- test a variety of ALTER TABLE statements +ALTER TABLE :drop_chunks_mat_table_u RENAME time_bucket TO bad_name; +psql:include/cagg_ddl_common.sql:291: ERROR: renaming columns on materialization tables is not supported +ALTER TABLE :drop_chunks_mat_table_u ADD UNIQUE(time_bucket); +psql:include/cagg_ddl_common.sql:292: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u SET UNLOGGED; +psql:include/cagg_ddl_common.sql:293: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u ENABLE ROW LEVEL SECURITY; +psql:include/cagg_ddl_common.sql:294: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u ADD COLUMN fizzle INTEGER; +psql:include/cagg_ddl_common.sql:295: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u DROP COLUMN time_bucket; +psql:include/cagg_ddl_common.sql:296: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u ALTER COLUMN time_bucket DROP NOT NULL; +psql:include/cagg_ddl_common.sql:297: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u ALTER COLUMN time_bucket SET DEFAULT 1; +psql:include/cagg_ddl_common.sql:298: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u ALTER COLUMN time_bucket SET STORAGE EXTERNAL; +psql:include/cagg_ddl_common.sql:299: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u DISABLE TRIGGER ALL; +psql:include/cagg_ddl_common.sql:300: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u SET TABLESPACE foo; +psql:include/cagg_ddl_common.sql:301: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u NOT OF; +psql:include/cagg_ddl_common.sql:302: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u OWNER TO CURRENT_USER; +psql:include/cagg_ddl_common.sql:303: ERROR: operation not supported on materialization tables +\set ON_ERROR_STOP 1 +ALTER TABLE :drop_chunks_mat_table_u SET SCHEMA public; +ALTER TABLE :drop_chunks_mat_table_u_name RENAME TO new_name; +SET ROLE :ROLE_DEFAULT_PERM_USER; +SET client_min_messages TO NOTICE; +SELECT * FROM new_name; + time_bucket | count +-------------+------- + 0 | 3 + 3 | 3 + 6 | 3 + 9 | 3 + 12 | 3 +(5 rows) + +SELECT * FROM drop_chunks_view ORDER BY 1; + time_bucket | count +-------------+------- + 0 | 3 + 3 | 3 + 6 | 3 + 9 | 3 + 12 | 3 +(5 rows) + +\set ON_ERROR_STOP 0 +-- no continuous aggregates on a continuous aggregate materialization table +CREATE MATERIALIZED VIEW new_name_view + WITH ( + timescaledb.continuous, + timescaledb.materialized_only=true + ) +AS SELECT time_bucket('6', time_bucket), COUNT("count") + FROM new_name + GROUP BY 1 WITH NO DATA; +psql:include/cagg_ddl_common.sql:326: ERROR: hypertable is a continuous aggregate materialization table +\set ON_ERROR_STOP 1 +CREATE TABLE metrics(time timestamptz NOT NULL, device_id int, v1 float, v2 float); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('metrics', 'time', replication_factor => 2); + create_distributed_hypertable +------------------------------- + (8,public,metrics,t) +(1 row) + +\else +SELECT create_hypertable('metrics','time'); +\endif +INSERT INTO metrics SELECT generate_series('2000-01-01'::timestamptz,'2000-01-10','1m'),1,0.25,0.75; +-- check expressions in view definition +CREATE MATERIALIZED VIEW cagg_expr + WITH (timescaledb.continuous, timescaledb.materialized_only=true) +AS +SELECT + time_bucket('1d', time) AS time, + 'Const'::text AS Const, + 4.3::numeric AS "numeric", + first(metrics,time), + CASE WHEN true THEN 'foo' ELSE 'bar' END, + COALESCE(NULL,'coalesce'), + avg(v1) + avg(v2) AS avg1, + avg(v1+v2) AS avg2 +FROM metrics +GROUP BY 1 WITH NO DATA; +CALL refresh_continuous_aggregate('cagg_expr', NULL, NULL); +SELECT * FROM cagg_expr ORDER BY time LIMIT 5; + time | const | numeric | first | case | coalesce | avg1 | avg2 +------------------------------+-------+---------+----------------------------------------------+------+----------+------+------ + Fri Dec 31 16:00:00 1999 PST | Const | 4.3 | ("Sat Jan 01 00:00:00 2000 PST",1,0.25,0.75) | foo | coalesce | 1 | 1 + Sat Jan 01 16:00:00 2000 PST | Const | 4.3 | ("Sat Jan 01 16:00:00 2000 PST",1,0.25,0.75) | foo | coalesce | 1 | 1 + Sun Jan 02 16:00:00 2000 PST | Const | 4.3 | ("Sun Jan 02 16:00:00 2000 PST",1,0.25,0.75) | foo | coalesce | 1 | 1 + Mon Jan 03 16:00:00 2000 PST | Const | 4.3 | ("Mon Jan 03 16:00:00 2000 PST",1,0.25,0.75) | foo | coalesce | 1 | 1 + Tue Jan 04 16:00:00 2000 PST | Const | 4.3 | ("Tue Jan 04 16:00:00 2000 PST",1,0.25,0.75) | foo | coalesce | 1 | 1 +(5 rows) + +--test materialization of invalidation before drop +DROP TABLE IF EXISTS drop_chunks_table CASCADE; +psql:include/cagg_ddl_common.sql:358: NOTICE: table "drop_chunks_table" does not exist, skipping +DROP TABLE IF EXISTS drop_chunks_table_u CASCADE; +psql:include/cagg_ddl_common.sql:359: NOTICE: drop cascades to 2 other objects +psql:include/cagg_ddl_common.sql:359: NOTICE: drop cascades to table _timescaledb_internal._hyper_7_9_chunk +CREATE TABLE drop_chunks_table(time BIGINT NOT NULL, data INTEGER); +\if :IS_DISTRIBUTED +SELECT hypertable_id AS drop_chunks_table_nid + FROM create_distributed_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10, replication_factor => 2) \gset +\else +SELECT hypertable_id AS drop_chunks_table_nid + FROM create_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10) \gset +\endif +CREATE OR REPLACE FUNCTION integer_now_test2() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table $$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ +CREATE OR REPLACE FUNCTION integer_now_test2() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table $$; +$DIST$); +\endif +SELECT set_integer_now_func('drop_chunks_table', 'integer_now_test2'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE MATERIALIZED VIEW drop_chunks_view + WITH ( + timescaledb.continuous, + timescaledb.materialized_only=true + ) +AS SELECT time_bucket('5', time), max(data) + FROM drop_chunks_table + GROUP BY 1 WITH NO DATA; +INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(0, 20) AS i; +--dropping chunks will process the invalidations +SELECT drop_chunks('drop_chunks_table', older_than => (integer_now_test2()-9)); + drop_chunks +----------------------------------------------- + _timescaledb_internal._dist_hyper_10_13_chunk +(1 row) + +SELECT * FROM drop_chunks_table ORDER BY time ASC limit 1; + time | data +------+------ + 10 | 10 +(1 row) + +INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(20, 35) AS i; +CALL refresh_continuous_aggregate('drop_chunks_view', 10, 40); +--this will be seen after the drop its within the invalidation window and will be dropped +INSERT INTO drop_chunks_table VALUES (26, 100); +--this will not be processed by the drop since chunk 30-39 is not dropped but will be seen after refresh +--shows that the drop doesn't do more work than necessary +INSERT INTO drop_chunks_table VALUES (31, 200); +--move the time up to 39 +INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(35, 39) AS i; +--the chunks and ranges we have thus far +SELECT chunk_name, range_start_integer, range_end_integer +FROM timescaledb_information.chunks +WHERE hypertable_name = 'drop_chunks_table'; + chunk_name | range_start_integer | range_end_integer +-------------------------+---------------------+------------------- + _dist_hyper_10_14_chunk | 10 | 20 + _dist_hyper_10_15_chunk | 20 | 30 + _dist_hyper_10_16_chunk | 30 | 40 +(3 rows) + +--the invalidation on 25 not yet seen +SELECT * FROM drop_chunks_view ORDER BY time_bucket DESC; + time_bucket | max +-------------+----- + 35 | 35 + 30 | 34 + 25 | 29 + 20 | 24 + 15 | 19 + 10 | 14 +(6 rows) + +--refresh to process the invalidations and then drop +CALL refresh_continuous_aggregate('drop_chunks_view', NULL, (integer_now_test2()-9)); +SELECT drop_chunks('drop_chunks_table', older_than => (integer_now_test2()-9)); + drop_chunks +----------------------------------------------- + _timescaledb_internal._dist_hyper_10_14_chunk + _timescaledb_internal._dist_hyper_10_15_chunk +(2 rows) + +--new values on 25 now seen in view +SELECT * FROM drop_chunks_view ORDER BY time_bucket DESC; + time_bucket | max +-------------+----- + 35 | 35 + 30 | 34 + 25 | 100 + 20 | 24 + 15 | 19 + 10 | 14 +(6 rows) + +--earliest datapoint now in table +SELECT * FROM drop_chunks_table ORDER BY time ASC limit 1; + time | data +------+------ + 30 | 30 +(1 row) + +--we see the chunks row with the dropped flags set; +SELECT * FROM _timescaledb_catalog.chunk where dropped; + id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk +----+---------------+-----------------------+-------------------------+---------------------+---------+--------+----------- + 13 | 10 | _timescaledb_internal | _dist_hyper_10_13_chunk | | t | 0 | f + 14 | 10 | _timescaledb_internal | _dist_hyper_10_14_chunk | | t | 0 | f + 15 | 10 | _timescaledb_internal | _dist_hyper_10_15_chunk | | t | 0 | f +(3 rows) + +--still see data in the view +SELECT * FROM drop_chunks_view WHERE time_bucket < (integer_now_test2()-9) ORDER BY time_bucket DESC; + time_bucket | max +-------------+----- + 25 | 100 + 20 | 24 + 15 | 19 + 10 | 14 +(4 rows) + +--no data but covers dropped chunks +SELECT * FROM drop_chunks_table WHERE time < (integer_now_test2()-9) ORDER BY time DESC; + time | data +------+------ +(0 rows) + +--recreate the dropped chunk +INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(0, 20) AS i; +--see data from recreated region +SELECT * FROM drop_chunks_table WHERE time < (integer_now_test2()-9) ORDER BY time DESC; + time | data +------+------ + 20 | 20 + 19 | 19 + 18 | 18 + 17 | 17 + 16 | 16 + 15 | 15 + 14 | 14 + 13 | 13 + 12 | 12 + 11 | 11 + 10 | 10 + 9 | 9 + 8 | 8 + 7 | 7 + 6 | 6 + 5 | 5 + 4 | 4 + 3 | 3 + 2 | 2 + 1 | 1 + 0 | 0 +(21 rows) + +--should show chunk with old name and old ranges +SELECT chunk_name, range_start_integer, range_end_integer +FROM timescaledb_information.chunks +WHERE hypertable_name = 'drop_chunks_table' +ORDER BY range_start_integer; + chunk_name | range_start_integer | range_end_integer +-------------------------+---------------------+------------------- + _dist_hyper_10_13_chunk | 0 | 10 + _dist_hyper_10_14_chunk | 10 | 20 + _dist_hyper_10_15_chunk | 20 | 30 + _dist_hyper_10_16_chunk | 30 | 40 +(4 rows) + +--We dropped everything up to the bucket starting at 30 and then +--inserted new data up to and including time 20. Therefore, the +--dropped data should stay the same as long as we only refresh +--buckets that have non-dropped data. +CALL refresh_continuous_aggregate('drop_chunks_view', 30, 40); +SELECT * FROM drop_chunks_view ORDER BY time_bucket DESC; + time_bucket | max +-------------+----- + 35 | 39 + 30 | 200 + 25 | 100 + 20 | 24 + 15 | 19 + 10 | 14 +(6 rows) + +SELECT format('%I.%I', schema_name, table_name) AS drop_chunks_mat_tablen, + schema_name AS drop_chunks_mat_schema, + table_name AS drop_chunks_mat_table_name + FROM _timescaledb_catalog.hypertable, _timescaledb_catalog.continuous_agg + WHERE _timescaledb_catalog.continuous_agg.raw_hypertable_id = :drop_chunks_table_nid + AND _timescaledb_catalog.hypertable.id = _timescaledb_catalog.continuous_agg.mat_hypertable_id \gset +-- TEST drop chunks from continuous aggregates by specifying view name +SELECT drop_chunks('drop_chunks_view', + newer_than => -20, + verbose => true); +psql:include/cagg_ddl_common.sql:454: INFO: dropping chunk _timescaledb_internal._hyper_11_17_chunk + drop_chunks +------------------------------------------ + _timescaledb_internal._hyper_11_17_chunk +(1 row) + +-- Test that we cannot drop chunks when specifying materialized +-- hypertable +INSERT INTO drop_chunks_table SELECT generate_series(45, 55), 500; +CALL refresh_continuous_aggregate('drop_chunks_view', 45, 55); +SELECT chunk_name, range_start_integer, range_end_integer +FROM timescaledb_information.chunks +WHERE hypertable_name = :'drop_chunks_mat_table_name' ORDER BY range_start_integer; + chunk_name | range_start_integer | range_end_integer +--------------------+---------------------+------------------- + _hyper_11_20_chunk | 0 | 100 +(1 row) + +\set ON_ERROR_STOP 0 +\set VERBOSITY default +SELECT drop_chunks(:'drop_chunks_mat_tablen', older_than => 60); +psql:include/cagg_ddl_common.sql:466: ERROR: operation not supported on materialized hypertable +DETAIL: Hypertable "_materialized_hypertable_11" is a materialized hypertable. +HINT: Try the operation on the continuous aggregate instead. +\set VERBOSITY terse +\set ON_ERROR_STOP 1 +----------------------------------------------------------------- +-- Test that refresh_continuous_aggregate on chunk will refresh, +-- but only in the regions covered by the show chunks. +----------------------------------------------------------------- +SELECT chunk_name, range_start_integer, range_end_integer +FROM timescaledb_information.chunks +WHERE hypertable_name = 'drop_chunks_table' +ORDER BY 2,3; + chunk_name | range_start_integer | range_end_integer +-------------------------+---------------------+------------------- + _dist_hyper_10_13_chunk | 0 | 10 + _dist_hyper_10_14_chunk | 10 | 20 + _dist_hyper_10_15_chunk | 20 | 30 + _dist_hyper_10_16_chunk | 30 | 40 + _dist_hyper_10_18_chunk | 40 | 50 + _dist_hyper_10_19_chunk | 50 | 60 +(6 rows) + +-- Pick the second chunk as the one to drop +WITH numbered_chunks AS ( + SELECT row_number() OVER (ORDER BY range_start_integer), chunk_schema, chunk_name, range_start_integer, range_end_integer + FROM timescaledb_information.chunks + WHERE hypertable_name = 'drop_chunks_table' + ORDER BY 1 +) +SELECT format('%I.%I', chunk_schema, chunk_name) AS chunk_to_drop, range_start_integer, range_end_integer +FROM numbered_chunks +WHERE row_number = 2 \gset +-- There's data in the table for the chunk/range we will drop +SELECT * FROM drop_chunks_table +WHERE time >= :range_start_integer +AND time < :range_end_integer +ORDER BY 1; + time | data +------+------ + 10 | 10 + 11 | 11 + 12 | 12 + 13 | 13 + 14 | 14 + 15 | 15 + 16 | 16 + 17 | 17 + 18 | 18 + 19 | 19 +(10 rows) + +-- Make sure there is also data in the continuous aggregate +-- CARE: +-- Note that this behaviour of dropping the materialization table chunks and expecting a refresh +-- that overlaps that time range to NOT update those chunks is undefined. Since CAGGs over +-- distributed hypertables merge the invalidations the refresh region is updated in the distributed +-- case, which may be different than what happens in the normal hypertable case. The command was: +-- SELECT drop_chunks('drop_chunks_view', newer_than => -20, verbose => true); +CALL refresh_continuous_aggregate('drop_chunks_view', 0, 50); +SELECT * FROM drop_chunks_view +ORDER BY 1; + time_bucket | max +-------------+----- + 0 | 4 + 5 | 9 + 10 | 14 + 15 | 19 + 20 | 20 + 30 | 200 + 35 | 39 + 45 | 500 + 50 | 500 +(9 rows) + +-- Drop the second chunk, to leave a gap in the data +\if :IS_DISTRIBUTED +CALL distributed_exec(format('DROP TABLE IF EXISTS %s', :'chunk_to_drop')); +DROP FOREIGN TABLE :chunk_to_drop; +\else +DROP TABLE :chunk_to_drop; +\endif +-- Verify that the second chunk is dropped +SELECT chunk_name, range_start_integer, range_end_integer +FROM timescaledb_information.chunks +WHERE hypertable_name = 'drop_chunks_table' +ORDER BY 2,3; + chunk_name | range_start_integer | range_end_integer +-------------------------+---------------------+------------------- + _dist_hyper_10_13_chunk | 0 | 10 + _dist_hyper_10_15_chunk | 20 | 30 + _dist_hyper_10_16_chunk | 30 | 40 + _dist_hyper_10_18_chunk | 40 | 50 + _dist_hyper_10_19_chunk | 50 | 60 +(5 rows) + +-- Data is no longer in the table but still in the view +SELECT * FROM drop_chunks_table +WHERE time >= :range_start_integer +AND time < :range_end_integer +ORDER BY 1; + time | data +------+------ +(0 rows) + +SELECT * FROM drop_chunks_view +WHERE time_bucket >= :range_start_integer +AND time_bucket < :range_end_integer +ORDER BY 1; + time_bucket | max +-------------+----- + 10 | 14 + 15 | 19 +(2 rows) + +-- Insert a large value in one of the chunks that will be dropped +INSERT INTO drop_chunks_table VALUES (:range_start_integer-1, 100); +-- Now refresh and drop the two adjecent chunks +CALL refresh_continuous_aggregate('drop_chunks_view', NULL, 30); +SELECT drop_chunks('drop_chunks_table', older_than=>30); + drop_chunks +----------------------------------------------- + _timescaledb_internal._dist_hyper_10_13_chunk + _timescaledb_internal._dist_hyper_10_15_chunk +(2 rows) + +-- Verify that the chunks are dropped +SELECT chunk_name, range_start_integer, range_end_integer +FROM timescaledb_information.chunks +WHERE hypertable_name = 'drop_chunks_table' +ORDER BY 2,3; + chunk_name | range_start_integer | range_end_integer +-------------------------+---------------------+------------------- + _dist_hyper_10_16_chunk | 30 | 40 + _dist_hyper_10_18_chunk | 40 | 50 + _dist_hyper_10_19_chunk | 50 | 60 +(3 rows) + +-- The continuous aggregate should be refreshed in the regions covered +-- by the dropped chunks, but not in the "gap" region, i.e., the +-- region of the chunk that was dropped via DROP TABLE. +SELECT * FROM drop_chunks_view +ORDER BY 1; + time_bucket | max +-------------+----- + 0 | 4 + 5 | 100 + 20 | 20 + 30 | 200 + 35 | 39 + 45 | 500 + 50 | 500 +(7 rows) + +-- Now refresh in the region of the first two dropped chunks +CALL refresh_continuous_aggregate('drop_chunks_view', 0, :range_end_integer); +-- Aggregate data in the refreshed range should no longer exist since +-- the underlying data was dropped. +SELECT * FROM drop_chunks_view +ORDER BY 1; + time_bucket | max +-------------+----- + 20 | 20 + 30 | 200 + 35 | 39 + 45 | 500 + 50 | 500 +(5 rows) + +-------------------------------------------------------------------- +-- Check that we can create a materialized table in a tablespace. We +-- create one with tablespace and one without and compare them. +CREATE VIEW cagg_info AS +WITH + caggs AS ( + SELECT format('%I.%I', user_view_schema, user_view_name)::regclass AS user_view, + format('%I.%I', direct_view_schema, direct_view_name)::regclass AS direct_view, + format('%I.%I', partial_view_schema, partial_view_name)::regclass AS partial_view, + format('%I.%I', ht.schema_name, ht.table_name)::regclass AS mat_relid + FROM _timescaledb_catalog.hypertable ht, + _timescaledb_catalog.continuous_agg cagg + WHERE ht.id = cagg.mat_hypertable_id + ) +SELECT user_view, + pg_get_userbyid(relowner) AS user_view_owner, + relname AS mat_table, + (SELECT pg_get_userbyid(relowner) FROM pg_class WHERE oid = mat_relid) AS mat_table_owner, + direct_view, + (SELECT pg_get_userbyid(relowner) FROM pg_class WHERE oid = direct_view) AS direct_view_owner, + partial_view, + (SELECT pg_get_userbyid(relowner) FROM pg_class WHERE oid = partial_view) AS partial_view_owner, + (SELECT spcname FROM pg_tablespace WHERE oid = reltablespace) AS tablespace + FROM pg_class JOIN caggs ON pg_class.oid = caggs.mat_relid; +GRANT SELECT ON cagg_info TO PUBLIC; +CREATE VIEW chunk_info AS +SELECT ht.schema_name, ht.table_name, relname AS chunk_name, + (SELECT spcname FROM pg_tablespace WHERE oid = reltablespace) AS tablespace + FROM pg_class c, + _timescaledb_catalog.hypertable ht, + _timescaledb_catalog.chunk ch + WHERE ch.table_name = c.relname AND ht.id = ch.hypertable_id; +CREATE TABLE whatever(time BIGINT NOT NULL, data INTEGER); +\if :IS_DISTRIBUTED +SELECT hypertable_id AS whatever_nid + FROM create_distributed_hypertable('whatever', 'time', chunk_time_interval => 10, replication_factor => 2) +\gset +\else +SELECT hypertable_id AS whatever_nid + FROM create_hypertable('whatever', 'time', chunk_time_interval => 10) +\gset +\endif +SELECT set_integer_now_func('whatever', 'integer_now_test'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE MATERIALIZED VIEW whatever_view_1 +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT time_bucket('5', time), COUNT(data) + FROM whatever GROUP BY 1 WITH NO DATA; +CREATE MATERIALIZED VIEW whatever_view_2 +WITH (timescaledb.continuous, timescaledb.materialized_only=true) +TABLESPACE tablespace1 AS +SELECT time_bucket('5', time), COUNT(data) + FROM whatever GROUP BY 1 WITH NO DATA; +INSERT INTO whatever SELECT i, i FROM generate_series(0, 29) AS i; +CALL refresh_continuous_aggregate('whatever_view_1', NULL, NULL); +CALL refresh_continuous_aggregate('whatever_view_2', NULL, NULL); +SELECT user_view, + mat_table, + cagg_info.tablespace AS mat_tablespace, + chunk_name, + chunk_info.tablespace AS chunk_tablespace + FROM cagg_info, chunk_info + WHERE mat_table::text = table_name + AND user_view::text LIKE 'whatever_view%'; + user_view | mat_table | mat_tablespace | chunk_name | chunk_tablespace +-----------------+-----------------------------+----------------+--------------------+------------------ + whatever_view_1 | _materialized_hypertable_13 | | _hyper_13_24_chunk | + whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_25_chunk | tablespace1 +(2 rows) + +ALTER MATERIALIZED VIEW whatever_view_1 SET TABLESPACE tablespace2; +SELECT user_view, + mat_table, + cagg_info.tablespace AS mat_tablespace, + chunk_name, + chunk_info.tablespace AS chunk_tablespace + FROM cagg_info, chunk_info + WHERE mat_table::text = table_name + AND user_view::text LIKE 'whatever_view%'; + user_view | mat_table | mat_tablespace | chunk_name | chunk_tablespace +-----------------+-----------------------------+----------------+--------------------+------------------ + whatever_view_1 | _materialized_hypertable_13 | tablespace2 | _hyper_13_24_chunk | tablespace2 + whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_25_chunk | tablespace1 +(2 rows) + +DROP MATERIALIZED VIEW whatever_view_1; +psql:include/cagg_ddl_common.sql:644: NOTICE: drop cascades to table _timescaledb_internal._hyper_13_24_chunk +DROP MATERIALIZED VIEW whatever_view_2; +psql:include/cagg_ddl_common.sql:645: NOTICE: drop cascades to table _timescaledb_internal._hyper_14_25_chunk +-- test bucket width expressions on integer hypertables +CREATE TABLE metrics_int2 ( + time int2 NOT NULL, + device_id int, + v1 float, + v2 float +); +CREATE TABLE metrics_int4 ( + time int4 NOT NULL, + device_id int, + v1 float, + v2 float +); +CREATE TABLE metrics_int8 ( + time int8 NOT NULL, + device_id int, + v1 float, + v2 float +); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable (('metrics_' || dt)::regclass, 'time', chunk_time_interval => 10, replication_factor => 2) +FROM ( + VALUES ('int2'), + ('int4'), + ('int8')) v (dt); + create_distributed_hypertable +------------------------------- + (15,public,metrics_int2,t) + (16,public,metrics_int4,t) + (17,public,metrics_int8,t) +(3 rows) + +\else +SELECT create_hypertable (('metrics_' || dt)::regclass, 'time', chunk_time_interval => 10) +FROM ( + VALUES ('int2'), + ('int4'), + ('int8')) v (dt); +\endif +CREATE OR REPLACE FUNCTION int2_now () + RETURNS int2 + LANGUAGE SQL + STABLE + AS $$ + SELECT 10::int2 +$$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ +CREATE OR REPLACE FUNCTION int2_now () + RETURNS int2 + LANGUAGE SQL + STABLE + AS $$ + SELECT 10::int2 +$$; +$DIST$); +\endif +CREATE OR REPLACE FUNCTION int4_now () + RETURNS int4 + LANGUAGE SQL + STABLE + AS $$ + SELECT 10::int4 +$$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ +CREATE OR REPLACE FUNCTION int4_now () + RETURNS int4 + LANGUAGE SQL + STABLE + AS $$ + SELECT 10::int4 +$$; +$DIST$); +\endif +CREATE OR REPLACE FUNCTION int8_now () + RETURNS int8 + LANGUAGE SQL + STABLE + AS $$ + SELECT 10::int8 +$$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ +CREATE OR REPLACE FUNCTION int8_now () + RETURNS int8 + LANGUAGE SQL + STABLE + AS $$ + SELECT 10::int8 +$$; +$DIST$); +\endif +SELECT set_integer_now_func (('metrics_' || dt)::regclass, (dt || '_now')::regproc) +FROM ( + VALUES ('int2'), + ('int4'), + ('int8')) v (dt); + set_integer_now_func +---------------------- + + + +(3 rows) + +-- width expression for int2 hypertables +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(1::smallint, time) +FROM metrics_int2 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:750: NOTICE: continuous aggregate "width_expr" is already up-to-date +DROP MATERIALIZED VIEW width_expr; +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(1::smallint + 2::smallint, time) +FROM metrics_int2 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:757: NOTICE: continuous aggregate "width_expr" is already up-to-date +DROP MATERIALIZED VIEW width_expr; +-- width expression for int4 hypertables +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(1, time) +FROM metrics_int4 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:765: NOTICE: continuous aggregate "width_expr" is already up-to-date +DROP MATERIALIZED VIEW width_expr; +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(1 + 2, time) +FROM metrics_int4 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:772: NOTICE: continuous aggregate "width_expr" is already up-to-date +DROP MATERIALIZED VIEW width_expr; +-- width expression for int8 hypertables +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(1, time) +FROM metrics_int8 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:780: NOTICE: continuous aggregate "width_expr" is already up-to-date +DROP MATERIALIZED VIEW width_expr; +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(1 + 2, time) +FROM metrics_int8 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:787: NOTICE: continuous aggregate "width_expr" is already up-to-date +DROP MATERIALIZED VIEW width_expr; +\set ON_ERROR_STOP 0 +-- non-immutable expresions should be rejected +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(extract(year FROM now())::smallint, time) +FROM metrics_int2 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:796: ERROR: only immutable expressions allowed in time bucket function +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(extract(year FROM now())::int, time) +FROM metrics_int4 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:801: ERROR: only immutable expressions allowed in time bucket function +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(extract(year FROM now())::int, time) +FROM metrics_int8 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:806: ERROR: only immutable expressions allowed in time bucket function +\set ON_ERROR_STOP 1 +-- Test various ALTER MATERIALIZED VIEW statements. +SET ROLE :ROLE_DEFAULT_PERM_USER; +CREATE MATERIALIZED VIEW owner_check WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(1 + 2, time) +FROM metrics_int8 +GROUP BY 1 +WITH NO DATA; +\x on +SELECT * FROM cagg_info WHERE user_view::text = 'owner_check'; +-[ RECORD 1 ]------+--------------------------------------- +user_view | owner_check +user_view_owner | default_perm_user +mat_table | _materialized_hypertable_24 +mat_table_owner | default_perm_user +direct_view | _timescaledb_internal._direct_view_24 +direct_view_owner | default_perm_user +partial_view | _timescaledb_internal._partial_view_24 +partial_view_owner | default_perm_user +tablespace | + +\x off +-- This should not work since the target user has the wrong role, but +-- we test that the normal checks are done when changing the owner. +\set ON_ERROR_STOP 0 +ALTER MATERIALIZED VIEW owner_check OWNER TO :ROLE_1; +psql:include/cagg_ddl_common.sql:826: ERROR: must be member of role "test_role_1" +\set ON_ERROR_STOP 1 +-- Superuser can always change owner +SET ROLE :ROLE_CLUSTER_SUPERUSER; +ALTER MATERIALIZED VIEW owner_check OWNER TO :ROLE_1; +\x on +SELECT * FROM cagg_info WHERE user_view::text = 'owner_check'; +-[ RECORD 1 ]------+--------------------------------------- +user_view | owner_check +user_view_owner | test_role_1 +mat_table | _materialized_hypertable_24 +mat_table_owner | test_role_1 +direct_view | _timescaledb_internal._direct_view_24 +direct_view_owner | test_role_1 +partial_view | _timescaledb_internal._partial_view_24 +partial_view_owner | test_role_1 +tablespace | + +\x off +-- +-- Test drop continuous aggregate cases +-- +-- Issue: #2608 +-- +CREATE OR REPLACE FUNCTION test_int_now() + RETURNS INT LANGUAGE SQL STABLE AS +$BODY$ + SELECT 50; +$BODY$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ + CREATE OR REPLACE FUNCTION test_int_now() + RETURNS INT LANGUAGE SQL STABLE AS + $BODY$ + SELECT 50; + $BODY$; +$DIST$); +\endif +CREATE TABLE conditionsnm(time_int INT NOT NULL, device INT, value FLOAT); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('conditionsnm', 'time_int', chunk_time_interval => 10, replication_factor => 2); + create_distributed_hypertable +------------------------------- + (25,public,conditionsnm,t) +(1 row) + +\else +SELECT create_hypertable('conditionsnm', 'time_int', chunk_time_interval => 10); +\endif +SELECT set_integer_now_func('conditionsnm', 'test_int_now'); + set_integer_now_func +---------------------- + +(1 row) + +INSERT INTO conditionsnm +SELECT time_val, time_val % 4, 3.14 FROM generate_series(0,100,1) AS time_val; +-- Case 1: DROP +CREATE MATERIALIZED VIEW conditionsnm_4 +WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) +AS +SELECT time_bucket(7, time_int) as bucket, +SUM(value), COUNT(value) +FROM conditionsnm GROUP BY bucket WITH DATA; +psql:include/cagg_ddl_common.sql:874: NOTICE: refreshing continuous aggregate "conditionsnm_4" +DROP materialized view conditionsnm_4; +psql:include/cagg_ddl_common.sql:876: NOTICE: drop cascades to table _timescaledb_internal._hyper_26_37_chunk +-- Case 2: DROP CASCADE should have similar behaviour as DROP +CREATE MATERIALIZED VIEW conditionsnm_4 +WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) +AS +SELECT time_bucket(7, time_int) as bucket, +SUM(value), COUNT(value) +FROM conditionsnm GROUP BY bucket WITH DATA; +psql:include/cagg_ddl_common.sql:884: NOTICE: refreshing continuous aggregate "conditionsnm_4" +DROP materialized view conditionsnm_4 CASCADE; +psql:include/cagg_ddl_common.sql:886: NOTICE: drop cascades to table _timescaledb_internal._hyper_27_38_chunk +-- Case 3: require CASCADE in case of dependent object +CREATE MATERIALIZED VIEW conditionsnm_4 +WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) +AS +SELECT time_bucket(7, time_int) as bucket, +SUM(value), COUNT(value) +FROM conditionsnm GROUP BY bucket WITH DATA; +psql:include/cagg_ddl_common.sql:894: NOTICE: refreshing continuous aggregate "conditionsnm_4" +CREATE VIEW see_cagg as select * from conditionsnm_4; +\set ON_ERROR_STOP 0 +DROP MATERIALIZED VIEW conditionsnm_4; +psql:include/cagg_ddl_common.sql:898: ERROR: cannot drop view conditionsnm_4 because other objects depend on it +\set ON_ERROR_STOP 1 +-- Case 4: DROP CASCADE with dependency +DROP MATERIALIZED VIEW conditionsnm_4 CASCADE; +psql:include/cagg_ddl_common.sql:902: NOTICE: drop cascades to view see_cagg +psql:include/cagg_ddl_common.sql:902: NOTICE: drop cascades to table _timescaledb_internal._hyper_28_39_chunk +-- Test DROP SCHEMA CASCADE with continuous aggregates +-- +-- Issue: #2350 +-- +-- Case 1: DROP SCHEMA CASCADE +CREATE SCHEMA test_schema; +CREATE TABLE test_schema.telemetry_raw ( + ts TIMESTAMP WITH TIME ZONE NOT NULL, + value DOUBLE PRECISION +); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('test_schema.telemetry_raw', 'ts', replication_factor => 2); + create_distributed_hypertable +---------------------------------- + (29,test_schema,telemetry_raw,t) +(1 row) + +\else +SELECT create_hypertable('test_schema.telemetry_raw', 'ts'); +\endif +CREATE MATERIALIZED VIEW test_schema.telemetry_1s + WITH (timescaledb.continuous, timescaledb.materialized_only=false) + AS +SELECT time_bucket(INTERVAL '1s', ts) AS ts_1s, + avg(value) + FROM test_schema.telemetry_raw + GROUP BY ts_1s WITH NO DATA; +SELECT ca.raw_hypertable_id, + h.schema_name, + h.table_name AS "MAT_TABLE_NAME", + partial_view_name as "PART_VIEW_NAME", + partial_view_schema +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'telemetry_1s'; + raw_hypertable_id | schema_name | MAT_TABLE_NAME | PART_VIEW_NAME | partial_view_schema +-------------------+-----------------------+-----------------------------+------------------+----------------------- + 29 | _timescaledb_internal | _materialized_hypertable_30 | _partial_view_30 | _timescaledb_internal +(1 row) + +\gset +DROP SCHEMA test_schema CASCADE; +psql:include/cagg_ddl_common.sql:941: NOTICE: drop cascades to 4 other objects +SELECT count(*) FROM pg_class WHERE relname = :'MAT_TABLE_NAME'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_class WHERE relname = :'PART_VIEW_NAME'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_class WHERE relname = 'telemetry_1s'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_namespace WHERE nspname = 'test_schema'; + count +------- + 0 +(1 row) + +-- Case 2: DROP SCHEMA CASCADE with multiple caggs +CREATE SCHEMA test_schema; +CREATE TABLE test_schema.telemetry_raw ( + ts TIMESTAMP WITH TIME ZONE NOT NULL, + value DOUBLE PRECISION +); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('test_schema.telemetry_raw', 'ts', replication_factor => 2); + create_distributed_hypertable +---------------------------------- + (31,test_schema,telemetry_raw,t) +(1 row) + +\else +SELECT create_hypertable('test_schema.telemetry_raw', 'ts'); +\endif +CREATE MATERIALIZED VIEW test_schema.cagg1 + WITH (timescaledb.continuous, timescaledb.materialized_only=false) + AS +SELECT time_bucket(INTERVAL '1s', ts) AS ts_1s, + avg(value) + FROM test_schema.telemetry_raw + GROUP BY ts_1s WITH NO DATA; +CREATE MATERIALIZED VIEW test_schema.cagg2 + WITH (timescaledb.continuous, timescaledb.materialized_only=false) + AS +SELECT time_bucket(INTERVAL '1s', ts) AS ts_1s, + avg(value) + FROM test_schema.telemetry_raw + GROUP BY ts_1s WITH NO DATA; +SELECT ca.raw_hypertable_id, + h.schema_name, + h.table_name AS "MAT_TABLE_NAME1", + partial_view_name as "PART_VIEW_NAME1", + partial_view_schema +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'cagg1'; + raw_hypertable_id | schema_name | MAT_TABLE_NAME1 | PART_VIEW_NAME1 | partial_view_schema +-------------------+-----------------------+-----------------------------+------------------+----------------------- + 31 | _timescaledb_internal | _materialized_hypertable_32 | _partial_view_32 | _timescaledb_internal +(1 row) + +\gset +SELECT ca.raw_hypertable_id, + h.schema_name, + h.table_name AS "MAT_TABLE_NAME2", + partial_view_name as "PART_VIEW_NAME2", + partial_view_schema +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'cagg2'; + raw_hypertable_id | schema_name | MAT_TABLE_NAME2 | PART_VIEW_NAME2 | partial_view_schema +-------------------+-----------------------+-----------------------------+------------------+----------------------- + 31 | _timescaledb_internal | _materialized_hypertable_33 | _partial_view_33 | _timescaledb_internal +(1 row) + +\gset +DROP SCHEMA test_schema CASCADE; +psql:include/cagg_ddl_common.sql:998: NOTICE: drop cascades to 7 other objects +SELECT count(*) FROM pg_class WHERE relname = :'MAT_TABLE_NAME1'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_class WHERE relname = :'PART_VIEW_NAME1'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_class WHERE relname = 'cagg1'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_class WHERE relname = :'MAT_TABLE_NAME2'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_class WHERE relname = :'PART_VIEW_NAME2'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_class WHERE relname = 'cagg2'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_namespace WHERE nspname = 'test_schema'; + count +------- + 0 +(1 row) + +DROP TABLESPACE tablespace1; +DROP TABLESPACE tablespace2; +-- Check that we can rename a column of a materialized view and still +-- rebuild it after (#3051, #3405) +CREATE TABLE conditions ( + time TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL +); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('conditions', 'time', replication_factor => 2); + create_distributed_hypertable +------------------------------- + (34,public,conditions,t) +(1 row) + +\else +SELECT create_hypertable('conditions', 'time'); +\endif +INSERT INTO conditions VALUES ( '2018-01-01 09:20:00-08', 'SFO', 55); +INSERT INTO conditions VALUES ( '2018-01-02 09:30:00-08', 'por', 100); +INSERT INTO conditions VALUES ( '2018-01-02 09:20:00-08', 'SFO', 65); +INSERT INTO conditions VALUES ( '2018-01-02 09:10:00-08', 'NYC', 65); +INSERT INTO conditions VALUES ( '2018-11-01 09:20:00-08', 'NYC', 45); +INSERT INTO conditions VALUES ( '2018-11-01 10:40:00-08', 'NYC', 55); +INSERT INTO conditions VALUES ( '2018-11-01 11:50:00-08', 'NYC', 65); +INSERT INTO conditions VALUES ( '2018-11-01 12:10:00-08', 'NYC', 75); +INSERT INTO conditions VALUES ( '2018-11-01 13:10:00-08', 'NYC', 85); +INSERT INTO conditions VALUES ( '2018-11-02 09:20:00-08', 'NYC', 10); +INSERT INTO conditions VALUES ( '2018-11-02 10:30:00-08', 'NYC', 20); +CREATE MATERIALIZED VIEW conditions_daily +WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS +SELECT location, + time_bucket(INTERVAL '1 day', time) AS bucket, + AVG(temperature) + FROM conditions +GROUP BY location, bucket +WITH NO DATA; +SELECT format('%I.%I', '_timescaledb_internal', h.table_name) AS "MAT_TABLE_NAME", + format('%I.%I', '_timescaledb_internal', partial_view_name) AS "PART_VIEW_NAME", + format('%I.%I', '_timescaledb_internal', direct_view_name) AS "DIRECT_VIEW_NAME" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'conditions_daily' +\gset +-- Show both the columns and the view definitions to see that +-- references are correct in the view as well. +SELECT * FROM test.show_columns('conditions_daily'); + Column | Type | NotNull +----------+--------------------------+--------- + location | text | f + bucket | timestamp with time zone | f + avg | double precision | f +(3 rows) + +SELECT * FROM test.show_columns(:'DIRECT_VIEW_NAME'); + Column | Type | NotNull +----------+--------------------------+--------- + location | text | f + bucket | timestamp with time zone | f + avg | double precision | f +(3 rows) + +SELECT * FROM test.show_columns(:'PART_VIEW_NAME'); + Column | Type | NotNull +----------+--------------------------+--------- + location | text | f + bucket | timestamp with time zone | f + avg | double precision | f +(3 rows) + +SELECT * FROM test.show_columns(:'MAT_TABLE_NAME'); + Column | Type | NotNull +----------+--------------------------+--------- + location | text | f + bucket | timestamp with time zone | t + avg | double precision | f +(3 rows) + +ALTER MATERIALIZED VIEW conditions_daily RENAME COLUMN bucket to "time"; +-- Show both the columns and the view definitions to see that +-- references are correct in the view as well. +SELECT * FROM test.show_columns(' conditions_daily'); + Column | Type | NotNull +----------+--------------------------+--------- + location | text | f + time | timestamp with time zone | f + avg | double precision | f +(3 rows) + +SELECT * FROM test.show_columns(:'DIRECT_VIEW_NAME'); + Column | Type | NotNull +----------+--------------------------+--------- + location | text | f + time | timestamp with time zone | f + avg | double precision | f +(3 rows) + +SELECT * FROM test.show_columns(:'PART_VIEW_NAME'); + Column | Type | NotNull +----------+--------------------------+--------- + location | text | f + time | timestamp with time zone | f + avg | double precision | f +(3 rows) + +SELECT * FROM test.show_columns(:'MAT_TABLE_NAME'); + Column | Type | NotNull +----------+--------------------------+--------- + location | text | f + time | timestamp with time zone | t + avg | double precision | f +(3 rows) + +-- This will rebuild the materialized view and should succeed. +ALTER MATERIALIZED VIEW conditions_daily SET (timescaledb.materialized_only = false); +-- Refresh the continuous aggregate to check that it works after the +-- rename. +\set VERBOSITY verbose +CALL refresh_continuous_aggregate('conditions_daily', NULL, NULL); +\set VERBOSITY terse +-- +-- Indexes on continuous aggregate +-- +\set ON_ERROR_STOP 0 +-- unique indexes are not supported +CREATE UNIQUE INDEX index_unique_error ON conditions_daily ("time", location); +psql:include/cagg_ddl_common.sql:1084: ERROR: continuous aggregates do not support UNIQUE indexes +-- concurrently index creation not supported +CREATE INDEX CONCURRENTLY index_concurrently_avg ON conditions_daily (avg); +psql:include/cagg_ddl_common.sql:1086: ERROR: hypertables do not support concurrent index creation +\set ON_ERROR_STOP 1 +CREATE INDEX index_avg ON conditions_daily (avg); +CREATE INDEX index_avg_only ON ONLY conditions_daily (avg); +CREATE INDEX index_avg_include ON conditions_daily (avg) INCLUDE (location); +CREATE INDEX index_avg_expr ON conditions_daily ((avg + 1)); +CREATE INDEX index_avg_location_sfo ON conditions_daily (avg) WHERE location = 'SFO'; +CREATE INDEX index_avg_expr_location_sfo ON conditions_daily ((avg + 2)) WHERE location = 'SFO'; +SELECT * FROM test.show_indexespred(:'MAT_TABLE_NAME'); + Index | Columns | Expr | Pred | Unique | Primary | Exclusion | Tablespace +-----------------------------------------------------------------------+-------------------+---------------------------+------------------------+--------+---------+-----------+------------ + _timescaledb_internal._materialized_hypertable_35_bucket_idx | {bucket} | | | f | f | f | + _timescaledb_internal._materialized_hypertable_35_location_bucket_idx | {location,bucket} | | | f | f | f | + _timescaledb_internal.index_avg | {avg} | | | f | f | f | + _timescaledb_internal.index_avg_expr | {expr} | avg + 1::double precision | | f | f | f | + _timescaledb_internal.index_avg_expr_location_sfo | {expr} | avg + 2::double precision | location = 'SFO'::text | f | f | f | + _timescaledb_internal.index_avg_include | {avg,location} | | | f | f | f | + _timescaledb_internal.index_avg_location_sfo | {avg} | | location = 'SFO'::text | f | f | f | + _timescaledb_internal.index_avg_only | {avg} | | | f | f | f | +(8 rows) + +-- #3696 assertion failure when referencing columns not present in result +CREATE TABLE i3696(time timestamptz NOT NULL, search_query text, cnt integer, cnt2 integer); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('i3696', 'time', replication_factor => 2); + create_distributed_hypertable +------------------------------- + (36,public,i3696,t) +(1 row) + +\else +SELECT table_name FROM create_hypertable('i3696','time'); +\endif +CREATE MATERIALIZED VIEW i3696_cagg1 WITH (timescaledb.continuous, timescaledb.materialized_only=false) +AS + SELECT search_query,count(search_query) as count, sum(cnt), time_bucket(INTERVAL '1 minute', time) AS bucket + FROM i3696 GROUP BY cnt +cnt2 , bucket, search_query; +psql:include/cagg_ddl_common.sql:1108: NOTICE: continuous aggregate "i3696_cagg1" is already up-to-date +ALTER MATERIALIZED VIEW i3696_cagg1 SET (timescaledb.materialized_only = 'true'); +CREATE MATERIALIZED VIEW i3696_cagg2 WITH (timescaledb.continuous, timescaledb.materialized_only=false) +AS + SELECT search_query,count(search_query) as count, sum(cnt), time_bucket(INTERVAL '1 minute', time) AS bucket + FROM i3696 GROUP BY cnt + cnt2, bucket, search_query + HAVING cnt + cnt2 + sum(cnt) > 2 or count(cnt2) > 10; +psql:include/cagg_ddl_common.sql:1116: NOTICE: continuous aggregate "i3696_cagg2" is already up-to-date +ALTER MATERIALIZED VIEW i3696_cagg2 SET (timescaledb.materialized_only = 'true'); +--TEST test with multiple settings on continuous aggregates -- +-- test for materialized_only + compress combinations (real time aggs enabled initially) +CREATE TABLE test_setting(time timestamptz not null, val numeric); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('test_setting', 'time', replication_factor => 2); + create_distributed_hypertable +------------------------------- + (39,public,test_setting,t) +(1 row) + +\else +SELECT create_hypertable('test_setting', 'time'); +\endif +CREATE MATERIALIZED VIEW test_setting_cagg with (timescaledb.continuous, timescaledb.materialized_only=false) +AS SELECT time_bucket('1h',time), avg(val), count(*) FROM test_setting GROUP BY 1; +psql:include/cagg_ddl_common.sql:1130: NOTICE: continuous aggregate "test_setting_cagg" is already up-to-date +INSERT INTO test_setting +SELECT generate_series( '2020-01-10 8:00'::timestamp, '2020-01-30 10:00+00'::timestamptz, '1 day'::interval), 10.0; +CALL refresh_continuous_aggregate('test_setting_cagg', NULL, '2020-05-30 10:00+00'::timestamptz); +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 20 +(1 row) + +--this row is not in the materialized result --- +INSERT INTO test_setting VALUES( '2020-11-01', 20); +--try out 2 settings here -- +ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='true'); +psql:include/cagg_ddl_common.sql:1141: NOTICE: defaulting compress_orderby to time_bucket +SELECT view_name, compression_enabled, materialized_only +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_setting_cagg'; + view_name | compression_enabled | materialized_only +-------------------+---------------------+------------------- + test_setting_cagg | t | t +(1 row) + +--real time aggs is off now , should return 20 -- +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 20 +(1 row) + +--now set it back to false -- +ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='true'); +psql:include/cagg_ddl_common.sql:1149: NOTICE: defaulting compress_orderby to time_bucket +SELECT view_name, compression_enabled, materialized_only +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_setting_cagg'; + view_name | compression_enabled | materialized_only +-------------------+---------------------+------------------- + test_setting_cagg | t | f +(1 row) + +--count should return additional data since we have real time aggs on +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 21 +(1 row) + +ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='false'); +SELECT view_name, compression_enabled, materialized_only +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_setting_cagg'; + view_name | compression_enabled | materialized_only +-------------------+---------------------+------------------- + test_setting_cagg | f | t +(1 row) + +--real time aggs is off now , should return 20 -- +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 20 +(1 row) + +ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='false'); +SELECT view_name, compression_enabled, materialized_only +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_setting_cagg'; + view_name | compression_enabled | materialized_only +-------------------+---------------------+------------------- + test_setting_cagg | f | f +(1 row) + +--count should return additional data since we have real time aggs on +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 21 +(1 row) + +DELETE FROM test_setting WHERE val = 20; +--TEST test with multiple settings on continuous aggregates with real time aggregates turned off initially -- +-- test for materialized_only + compress combinations (real time aggs enabled initially) +DROP MATERIALIZED VIEW test_setting_cagg; +psql:include/cagg_ddl_common.sql:1174: NOTICE: drop cascades to table _timescaledb_internal._hyper_40_47_chunk +CREATE MATERIALIZED VIEW test_setting_cagg with (timescaledb.continuous, timescaledb.materialized_only = true) +AS SELECT time_bucket('1h',time), avg(val), count(*) FROM test_setting GROUP BY 1; +psql:include/cagg_ddl_common.sql:1177: NOTICE: refreshing continuous aggregate "test_setting_cagg" +CALL refresh_continuous_aggregate('test_setting_cagg', NULL, '2020-05-30 10:00+00'::timestamptz); +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 20 +(1 row) + +--this row is not in the materialized result --- +INSERT INTO test_setting VALUES( '2020-11-01', 20); +--try out 2 settings here -- +ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='true'); +psql:include/cagg_ddl_common.sql:1185: NOTICE: defaulting compress_orderby to time_bucket +SELECT view_name, compression_enabled, materialized_only +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_setting_cagg'; + view_name | compression_enabled | materialized_only +-------------------+---------------------+------------------- + test_setting_cagg | t | f +(1 row) + +--count should return additional data since we have real time aggs on +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 21 +(1 row) + +--now set it back to false -- +ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='true'); +psql:include/cagg_ddl_common.sql:1193: NOTICE: defaulting compress_orderby to time_bucket +SELECT view_name, compression_enabled, materialized_only +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_setting_cagg'; + view_name | compression_enabled | materialized_only +-------------------+---------------------+------------------- + test_setting_cagg | t | t +(1 row) + +--real time aggs is off now , should return 20 -- +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 20 +(1 row) + +ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='false'); +SELECT view_name, compression_enabled, materialized_only +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_setting_cagg'; + view_name | compression_enabled | materialized_only +-------------------+---------------------+------------------- + test_setting_cagg | f | f +(1 row) + +--count should return additional data since we have real time aggs on +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 21 +(1 row) + +ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='false'); +SELECT view_name, compression_enabled, materialized_only +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_setting_cagg'; + view_name | compression_enabled | materialized_only +-------------------+---------------------+------------------- + test_setting_cagg | f | t +(1 row) + +--real time aggs is off now , should return 20 -- +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 20 +(1 row) + +-- END TEST with multiple settings +-- Test View Target Entries that contain both aggrefs and Vars in the same expression +CREATE TABLE transactions +( + "time" timestamp with time zone NOT NULL, + dummy1 integer, + dummy2 integer, + dummy3 integer, + dummy4 integer, + dummy5 integer, + amount integer, + fiat_value integer +); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('transactions', 'time', replication_factor => 2); + create_distributed_hypertable +------------------------------- + (46,public,transactions,t) +(1 row) + +\else +SELECT create_hypertable('transactions', 'time'); +\endif +INSERT INTO transactions VALUES ( '2018-01-01 09:20:00-08', 0, 0, 0, 0, 0, 1, 10); +INSERT INTO transactions VALUES ( '2018-01-02 09:30:00-08', 0, 0, 0, 0, 0, -1, 10); +INSERT INTO transactions VALUES ( '2018-01-02 09:20:00-08', 0, 0, 0, 0, 0, -1, 10); +INSERT INTO transactions VALUES ( '2018-01-02 09:10:00-08', 0, 0, 0, 0, 0, -1, 10); +INSERT INTO transactions VALUES ( '2018-11-01 09:20:00-08', 0, 0, 0, 0, 0, 1, 10); +INSERT INTO transactions VALUES ( '2018-11-01 10:40:00-08', 0, 0, 0, 0, 0, 1, 10); +INSERT INTO transactions VALUES ( '2018-11-01 11:50:00-08', 0, 0, 0, 0, 0, 1, 10); +INSERT INTO transactions VALUES ( '2018-11-01 12:10:00-08', 0, 0, 0, 0, 0, -1, 10); +INSERT INTO transactions VALUES ( '2018-11-01 13:10:00-08', 0, 0, 0, 0, 0, -1, 10); +INSERT INTO transactions VALUES ( '2018-11-02 09:20:00-08', 0, 0, 0, 0, 0, 1, 10); +INSERT INTO transactions VALUES ( '2018-11-02 10:30:00-08', 0, 0, 0, 0, 0, -1, 10); +CREATE materialized view cashflows( + bucket, + amount, + cashflow, + cashflow2 +) WITH ( + timescaledb.continuous, + timescaledb.materialized_only = true +) AS +SELECT time_bucket ('1 day', time) AS bucket, + amount, + CASE + WHEN amount < 0 THEN (0 - sum(fiat_value)) + ELSE sum(fiat_value) + END AS cashflow, + amount + sum(fiat_value) +FROM transactions +GROUP BY bucket, amount; +psql:include/cagg_ddl_common.sql:1267: NOTICE: refreshing continuous aggregate "cashflows" +SELECT h.table_name AS "MAT_TABLE_NAME", + partial_view_name AS "PART_VIEW_NAME", + direct_view_name AS "DIRECT_VIEW_NAME" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'cashflows' +\gset +-- Show both the columns and the view definitions to see that +-- references are correct in the view as well. +\d+ "_timescaledb_internal".:"DIRECT_VIEW_NAME" + View "_timescaledb_internal._direct_view_47" + Column | Type | Collation | Nullable | Default | Storage | Description +-----------+--------------------------+-----------+----------+---------+---------+------------- + bucket | timestamp with time zone | | | | plain | + amount | integer | | | | plain | + cashflow | bigint | | | | plain | + cashflow2 | bigint | | | | plain | +View definition: + SELECT time_bucket('@ 1 day'::interval, transactions."time") AS bucket, + transactions.amount, + CASE + WHEN transactions.amount < 0 THEN 0 - sum(transactions.fiat_value) + ELSE sum(transactions.fiat_value) + END AS cashflow, + transactions.amount + sum(transactions.fiat_value) AS cashflow2 + FROM transactions + GROUP BY (time_bucket('@ 1 day'::interval, transactions."time")), transactions.amount; + +\d+ "_timescaledb_internal".:"PART_VIEW_NAME" + View "_timescaledb_internal._partial_view_47" + Column | Type | Collation | Nullable | Default | Storage | Description +-----------+--------------------------+-----------+----------+---------+---------+------------- + bucket | timestamp with time zone | | | | plain | + amount | integer | | | | plain | + cashflow | bigint | | | | plain | + cashflow2 | bigint | | | | plain | +View definition: + SELECT time_bucket('@ 1 day'::interval, transactions."time") AS bucket, + transactions.amount, + CASE + WHEN transactions.amount < 0 THEN 0 - sum(transactions.fiat_value) + ELSE sum(transactions.fiat_value) + END AS cashflow, + transactions.amount + sum(transactions.fiat_value) AS cashflow2 + FROM transactions + GROUP BY (time_bucket('@ 1 day'::interval, transactions."time")), transactions.amount; + +\d+ "_timescaledb_internal".:"MAT_TABLE_NAME" + Table "_timescaledb_internal._materialized_hypertable_47" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +-----------+--------------------------+-----------+----------+---------+---------+--------------+------------- + bucket | timestamp with time zone | | not null | | plain | | + amount | integer | | | | plain | | + cashflow | bigint | | | | plain | | + cashflow2 | bigint | | | | plain | | +Indexes: + "_materialized_hypertable_47_amount_bucket_idx" btree (amount, bucket DESC) + "_materialized_hypertable_47_bucket_idx" btree (bucket DESC) +Triggers: + ts_insert_blocker BEFORE INSERT ON _timescaledb_internal._materialized_hypertable_47 FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker() +Child tables: _timescaledb_internal._hyper_47_52_chunk, + _timescaledb_internal._hyper_47_53_chunk + +\d+ 'cashflows' + View "public.cashflows" + Column | Type | Collation | Nullable | Default | Storage | Description +-----------+--------------------------+-----------+----------+---------+---------+------------- + bucket | timestamp with time zone | | | | plain | + amount | integer | | | | plain | + cashflow | bigint | | | | plain | + cashflow2 | bigint | | | | plain | +View definition: + SELECT _materialized_hypertable_47.bucket, + _materialized_hypertable_47.amount, + _materialized_hypertable_47.cashflow, + _materialized_hypertable_47.cashflow2 + FROM _timescaledb_internal._materialized_hypertable_47; + +SELECT * FROM cashflows; + bucket | amount | cashflow | cashflow2 +------------------------------+--------+----------+----------- + Sun Dec 31 16:00:00 2017 PST | 1 | 10 | 11 + Mon Jan 01 16:00:00 2018 PST | -1 | -30 | 29 + Wed Oct 31 17:00:00 2018 PDT | -1 | -20 | 19 + Wed Oct 31 17:00:00 2018 PDT | 1 | 30 | 31 + Thu Nov 01 17:00:00 2018 PDT | -1 | -10 | 9 + Thu Nov 01 17:00:00 2018 PDT | 1 | 10 | 11 +(6 rows) + +-- test cagg creation with named arguments in time_bucket +-- note that positional arguments cannot follow named arguments +-- 1. test named origin +-- 2. test named timezone +-- 3. test named ts +-- 4. test named bucket width +-- named origin +CREATE MATERIALIZED VIEW cagg_named_origin WITH +(timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket('1h', time, 'UTC', origin => '2001-01-03 01:23:45') AS bucket, +avg(amount) as avg_amount +FROM transactions GROUP BY 1 WITH NO DATA; +-- named timezone +CREATE MATERIALIZED VIEW cagg_named_tz_origin WITH +(timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket('1h', time, timezone => 'UTC', origin => '2001-01-03 01:23:45') AS bucket, +avg(amount) as avg_amount +FROM transactions GROUP BY 1 WITH NO DATA; +-- named ts +CREATE MATERIALIZED VIEW cagg_named_ts_tz_origin WITH +(timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket('1h', ts => time, timezone => 'UTC', origin => '2001-01-03 01:23:45') AS bucket, +avg(amount) as avg_amount +FROM transactions GROUP BY 1 WITH NO DATA; +-- named bucket width +CREATE MATERIALIZED VIEW cagg_named_all WITH +(timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(bucket_width => '1h', ts => time, timezone => 'UTC', origin => '2001-01-03 01:23:45') AS bucket, +avg(amount) as avg_amount +FROM transactions GROUP BY 1 WITH NO DATA; +-- Refreshing from the beginning (NULL) of a CAGG with variable time bucket and +-- using an INTERVAL for the end timestamp (issue #5534) +CREATE MATERIALIZED VIEW transactions_montly +WITH (timescaledb.continuous, timescaledb.materialized_only = true) AS +SELECT time_bucket(INTERVAL '1 month', time) AS bucket, + SUM(fiat_value), + MAX(fiat_value), + MIN(fiat_value) + FROM transactions +GROUP BY 1 +WITH NO DATA; +-- No rows +SELECT * FROM transactions_montly ORDER BY bucket; + bucket | sum | max | min +--------+-----+-----+----- +(0 rows) + +-- Refresh from beginning of the CAGG for 1 month +CALL refresh_continuous_aggregate('transactions_montly', NULL, INTERVAL '1 month'); +SELECT * FROM transactions_montly ORDER BY bucket; + bucket | sum | max | min +------------------------------+-----+-----+----- + Sun Dec 31 16:00:00 2017 PST | 40 | 10 | 10 + Wed Oct 31 17:00:00 2018 PDT | 70 | 10 | 10 +(2 rows) + +TRUNCATE transactions_montly; +-- Partial refresh the CAGG from beginning to an specific timestamp +CALL refresh_continuous_aggregate('transactions_montly', NULL, '2018-11-01 11:50:00-08'::timestamptz); +SELECT * FROM transactions_montly ORDER BY bucket; + bucket | sum | max | min +------------------------------+-----+-----+----- + Sun Dec 31 16:00:00 2017 PST | 40 | 10 | 10 +(1 row) + +-- Full refresh the CAGG +CALL refresh_continuous_aggregate('transactions_montly', NULL, NULL); +SELECT * FROM transactions_montly ORDER BY bucket; + bucket | sum | max | min +------------------------------+-----+-----+----- + Sun Dec 31 16:00:00 2017 PST | 40 | 10 | 10 + Wed Oct 31 17:00:00 2018 PDT | 70 | 10 | 10 +(2 rows) + +-- Check set_chunk_time_interval on continuous aggregate +CREATE MATERIALIZED VIEW cagg_set_chunk_time_interval +WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(INTERVAL '1 month', time) AS bucket, + SUM(fiat_value), + MAX(fiat_value), + MIN(fiat_value) +FROM transactions +GROUP BY 1 +WITH NO DATA; +SELECT set_chunk_time_interval('cagg_set_chunk_time_interval', chunk_time_interval => interval '1 month'); + set_chunk_time_interval +------------------------- + +(1 row) + +CALL refresh_continuous_aggregate('cagg_set_chunk_time_interval', NULL, NULL); +SELECT _timescaledb_functions.to_interval(d.interval_length) = interval '1 month' +FROM _timescaledb_catalog.dimension d + RIGHT JOIN _timescaledb_catalog.continuous_agg ca ON ca.user_view_name = 'cagg_set_chunk_time_interval' +WHERE d.hypertable_id = ca.mat_hypertable_id; + ?column? +---------- + t +(1 row) + +-- Since #6077 CAggs are materialized only by default +DROP TABLE conditions CASCADE; +psql:include/cagg_ddl_common.sql:1365: NOTICE: drop cascades to 3 other objects +psql:include/cagg_ddl_common.sql:1365: NOTICE: drop cascades to 2 other objects +CREATE TABLE conditions ( + time TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL +); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('conditions', 'time', replication_factor => 2); + create_distributed_hypertable +------------------------------- + (54,public,conditions,t) +(1 row) + +\else +SELECT create_hypertable('conditions', 'time'); +\endif +INSERT INTO conditions VALUES ( '2018-01-01 09:20:00-08', 'SFO', 55); +INSERT INTO conditions VALUES ( '2018-01-02 09:30:00-08', 'por', 100); +INSERT INTO conditions VALUES ( '2018-01-02 09:20:00-08', 'SFO', 65); +INSERT INTO conditions VALUES ( '2018-01-02 09:10:00-08', 'NYC', 65); +INSERT INTO conditions VALUES ( '2018-11-01 09:20:00-08', 'NYC', 45); +INSERT INTO conditions VALUES ( '2018-11-01 10:40:00-08', 'NYC', 55); +INSERT INTO conditions VALUES ( '2018-11-01 11:50:00-08', 'NYC', 65); +INSERT INTO conditions VALUES ( '2018-11-01 12:10:00-08', 'NYC', 75); +INSERT INTO conditions VALUES ( '2018-11-01 13:10:00-08', 'NYC', 85); +INSERT INTO conditions VALUES ( '2018-11-02 09:20:00-08', 'NYC', 10); +INSERT INTO conditions VALUES ( '2018-11-02 10:30:00-08', 'NYC', 20); +CREATE MATERIALIZED VIEW conditions_daily +WITH (timescaledb.continuous) AS +SELECT location, + time_bucket(INTERVAL '1 day', time) AS bucket, + AVG(temperature) + FROM conditions +GROUP BY location, bucket +WITH NO DATA; +\d+ conditions_daily + View "public.conditions_daily" + Column | Type | Collation | Nullable | Default | Storage | Description +----------+--------------------------+-----------+----------+---------+----------+------------- + location | text | | | | extended | + bucket | timestamp with time zone | | | | plain | + avg | double precision | | | | plain | +View definition: + SELECT _materialized_hypertable_55.location, + _materialized_hypertable_55.bucket, + _materialized_hypertable_55.avg + FROM _timescaledb_internal._materialized_hypertable_55; + +-- Should return NO ROWS +SELECT * FROM conditions_daily ORDER BY bucket, avg; + location | bucket | avg +----------+--------+----- +(0 rows) + +ALTER MATERIALIZED VIEW conditions_daily SET (timescaledb.materialized_only=false); +\d+ conditions_daily + View "public.conditions_daily" + Column | Type | Collation | Nullable | Default | Storage | Description +----------+--------------------------+-----------+----------+---------+----------+------------- + location | text | | | | extended | + bucket | timestamp with time zone | | | | plain | + avg | double precision | | | | plain | +View definition: + SELECT _materialized_hypertable_55.location, + _materialized_hypertable_55.bucket, + _materialized_hypertable_55.avg + FROM _timescaledb_internal._materialized_hypertable_55 + WHERE _materialized_hypertable_55.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(55)), '-infinity'::timestamp with time zone) +UNION ALL + SELECT conditions.location, + time_bucket('@ 1 day'::interval, conditions."time") AS bucket, + avg(conditions.temperature) AS avg + FROM conditions + WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(55)), '-infinity'::timestamp with time zone) + GROUP BY conditions.location, (time_bucket('@ 1 day'::interval, conditions."time")); + +-- Should return ROWS because now it is realtime +SELECT * FROM conditions_daily ORDER BY bucket, avg; + location | bucket | avg +----------+------------------------------+----- + SFO | Sun Dec 31 16:00:00 2017 PST | 55 + NYC | Mon Jan 01 16:00:00 2018 PST | 65 + SFO | Mon Jan 01 16:00:00 2018 PST | 65 + por | Mon Jan 01 16:00:00 2018 PST | 100 + NYC | Wed Oct 31 17:00:00 2018 PDT | 65 + NYC | Thu Nov 01 17:00:00 2018 PDT | 15 +(6 rows) + +-- Should return ROWS because we refreshed it +ALTER MATERIALIZED VIEW conditions_daily SET (timescaledb.materialized_only=true); +\d+ conditions_daily + View "public.conditions_daily" + Column | Type | Collation | Nullable | Default | Storage | Description +----------+--------------------------+-----------+----------+---------+----------+------------- + location | text | | | | extended | + bucket | timestamp with time zone | | | | plain | + avg | double precision | | | | plain | +View definition: + SELECT _materialized_hypertable_55.location, + _materialized_hypertable_55.bucket, + _materialized_hypertable_55.avg + FROM _timescaledb_internal._materialized_hypertable_55; + +CALL refresh_continuous_aggregate('conditions_daily', NULL, NULL); +SELECT * FROM conditions_daily ORDER BY bucket, avg; + location | bucket | avg +----------+------------------------------+----- + SFO | Sun Dec 31 16:00:00 2017 PST | 55 + SFO | Mon Jan 01 16:00:00 2018 PST | 65 + NYC | Mon Jan 01 16:00:00 2018 PST | 65 + por | Mon Jan 01 16:00:00 2018 PST | 100 + NYC | Wed Oct 31 17:00:00 2018 PDT | 65 + NYC | Thu Nov 01 17:00:00 2018 PDT | 15 +(6 rows) + +-- cleanup +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; +DROP DATABASE :DATA_NODE_1 WITH (FORCE); +DROP DATABASE :DATA_NODE_2 WITH (FORCE); +DROP DATABASE :DATA_NODE_3 WITH (FORCE); diff --git a/tsl/test/expected/cagg_ddl_dist_ht-16.out b/tsl/test/expected/cagg_ddl_dist_ht-16.out new file mode 100644 index 00000000000..5652a51defe --- /dev/null +++ b/tsl/test/expected/cagg_ddl_dist_ht-16.out @@ -0,0 +1,2207 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +------------------------------------ +-- Set up a distributed environment +------------------------------------ +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER +\set DATA_NODE_1 :TEST_DBNAME _1 +\set DATA_NODE_2 :TEST_DBNAME _2 +\set DATA_NODE_3 :TEST_DBNAME _3 +\ir include/remote_exec.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +CREATE SCHEMA IF NOT EXISTS test; +psql:include/remote_exec.sql:5: NOTICE: schema "test" already exists, skipping +GRANT USAGE ON SCHEMA test TO PUBLIC; +CREATE OR REPLACE FUNCTION test.remote_exec(srv_name name[], command text) +RETURNS VOID +AS :TSL_MODULE_PATHNAME, 'ts_remote_exec' +LANGUAGE C; +CREATE OR REPLACE FUNCTION test.remote_exec_get_result_strings(srv_name name[], command text) +RETURNS TABLE("table_record" CSTRING[]) +AS :TSL_MODULE_PATHNAME, 'ts_remote_exec_get_result_strings' +LANGUAGE C; +SELECT node_name, database, node_created, database_created, extension_created +FROM ( + SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* + FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) +) a; + node_name | database | node_created | database_created | extension_created +-----------------------+-----------------------+--------------+------------------+------------------- + db_cagg_ddl_dist_ht_1 | db_cagg_ddl_dist_ht_1 | t | t | t + db_cagg_ddl_dist_ht_2 | db_cagg_ddl_dist_ht_2 | t | t | t + db_cagg_ddl_dist_ht_3 | db_cagg_ddl_dist_ht_3 | t | t | t +(3 rows) + +GRANT USAGE ON FOREIGN SERVER :DATA_NODE_1, :DATA_NODE_2, :DATA_NODE_3 TO PUBLIC; +-- though user on access node has required GRANTS, this will propagate GRANTS to the connected data nodes +GRANT CREATE ON SCHEMA public TO :ROLE_DEFAULT_PERM_USER; +\set IS_DISTRIBUTED TRUE +\ir include/cagg_ddl_common.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- Set this variable to avoid using a hard-coded path each time query +-- results are compared +\set QUERY_RESULT_TEST_EQUAL_RELPATH '../../../../test/sql/include/query_result_test_equal.sql' +\if :IS_DISTRIBUTED +\echo 'Running distributed hypertable tests' +Running distributed hypertable tests +\else +\echo 'Running local hypertable tests' +\endif +SET ROLE :ROLE_DEFAULT_PERM_USER; +--DDL commands on continuous aggregates +CREATE TABLE conditions ( + timec TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + temperature integer NULL, + humidity DOUBLE PRECISION NULL, + timemeasure TIMESTAMPTZ, + timeinterval INTERVAL +); +\if :IS_DISTRIBUTED +SELECT table_name FROM create_distributed_hypertable('conditions', 'timec', replication_factor => 2); + table_name +------------ + conditions +(1 row) + +\else +SELECT table_name FROM create_hypertable('conditions', 'timec'); +\endif +-- schema tests +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER +CREATE TABLESPACE tablespace1 OWNER :ROLE_DEFAULT_PERM_USER LOCATION :TEST_TABLESPACE1_PATH; +CREATE TABLESPACE tablespace2 OWNER :ROLE_DEFAULT_PERM_USER LOCATION :TEST_TABLESPACE2_PATH; +CREATE SCHEMA rename_schema; +GRANT ALL ON SCHEMA rename_schema TO :ROLE_DEFAULT_PERM_USER; +SET ROLE :ROLE_DEFAULT_PERM_USER; +CREATE TABLE foo(time TIMESTAMPTZ NOT NULL, data INTEGER); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('foo', 'time', replication_factor => 2); + create_distributed_hypertable +------------------------------- + (2,public,foo,t) +(1 row) + +\else +SELECT create_hypertable('foo', 'time'); +\endif +CREATE MATERIALIZED VIEW rename_test + WITH ( timescaledb.continuous, timescaledb.materialized_only=true) +AS SELECT time_bucket('1week', time), COUNT(data) + FROM foo + GROUP BY 1 WITH NO DATA; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name +------------------+----------------+-----------------------+------------------- + public | rename_test | _timescaledb_internal | _partial_view_3 +(1 row) + +ALTER MATERIALIZED VIEW rename_test SET SCHEMA rename_schema; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name +------------------+----------------+-----------------------+------------------- + rename_schema | rename_test | _timescaledb_internal | _partial_view_3 +(1 row) + +SELECT ca.raw_hypertable_id as "RAW_HYPERTABLE_ID", + h.schema_name AS "MAT_SCHEMA_NAME", + h.table_name AS "MAT_TABLE_NAME", + partial_view_name as "PART_VIEW_NAME", + partial_view_schema as "PART_VIEW_SCHEMA", + direct_view_name as "DIR_VIEW_NAME", + direct_view_schema as "DIR_VIEW_SCHEMA" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'rename_test' +\gset +RESET ROLE; +SELECT current_user; + current_user +-------------------- + cluster_super_user +(1 row) + +ALTER VIEW :"PART_VIEW_SCHEMA".:"PART_VIEW_NAME" SET SCHEMA public; +SET ROLE :ROLE_DEFAULT_PERM_USER; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name +------------------+----------------+---------------------+------------------- + rename_schema | rename_test | public | _partial_view_3 +(1 row) + +--alter direct view schema +SELECT user_view_schema, user_view_name, direct_view_schema, direct_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | direct_view_schema | direct_view_name +------------------+----------------+-----------------------+------------------ + rename_schema | rename_test | _timescaledb_internal | _direct_view_3 +(1 row) + +RESET ROLE; +SELECT current_user; + current_user +-------------------- + cluster_super_user +(1 row) + +ALTER VIEW :"DIR_VIEW_SCHEMA".:"DIR_VIEW_NAME" SET SCHEMA public; +SET ROLE :ROLE_DEFAULT_PERM_USER; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name, + direct_view_schema, direct_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name | direct_view_schema | direct_view_name +------------------+----------------+---------------------+-------------------+--------------------+------------------ + rename_schema | rename_test | public | _partial_view_3 | public | _direct_view_3 +(1 row) + +RESET ROLE; +SELECT current_user; + current_user +-------------------- + cluster_super_user +(1 row) + +ALTER SCHEMA rename_schema RENAME TO new_name_schema; +SET ROLE :ROLE_DEFAULT_PERM_USER; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name, + direct_view_schema, direct_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name | direct_view_schema | direct_view_name +------------------+----------------+---------------------+-------------------+--------------------+------------------ + new_name_schema | rename_test | public | _partial_view_3 | public | _direct_view_3 +(1 row) + +ALTER VIEW :"PART_VIEW_NAME" SET SCHEMA new_name_schema; +ALTER VIEW :"DIR_VIEW_NAME" SET SCHEMA new_name_schema; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name, + direct_view_schema, direct_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name | direct_view_schema | direct_view_name +------------------+----------------+---------------------+-------------------+--------------------+------------------ + new_name_schema | rename_test | new_name_schema | _partial_view_3 | new_name_schema | _direct_view_3 +(1 row) + +RESET ROLE; +SELECT current_user; + current_user +-------------------- + cluster_super_user +(1 row) + +ALTER SCHEMA new_name_schema RENAME TO foo_name_schema; +SET ROLE :ROLE_DEFAULT_PERM_USER; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name +------------------+----------------+---------------------+------------------- + foo_name_schema | rename_test | foo_name_schema | _partial_view_3 +(1 row) + +ALTER MATERIALIZED VIEW foo_name_schema.rename_test SET SCHEMA public; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name +------------------+----------------+---------------------+------------------- + public | rename_test | foo_name_schema | _partial_view_3 +(1 row) + +RESET ROLE; +SELECT current_user; + current_user +-------------------- + cluster_super_user +(1 row) + +ALTER SCHEMA foo_name_schema RENAME TO rename_schema; +SET ROLE :ROLE_DEFAULT_PERM_USER; +SET client_min_messages TO NOTICE; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name +------------------+----------------+---------------------+------------------- + public | rename_test | rename_schema | _partial_view_3 +(1 row) + +ALTER MATERIALIZED VIEW rename_test RENAME TO rename_c_aggregate; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name +------------------+--------------------+---------------------+------------------- + public | rename_c_aggregate | rename_schema | _partial_view_3 +(1 row) + +SELECT * FROM rename_c_aggregate; + time_bucket | count +-------------+------- +(0 rows) + +ALTER VIEW rename_schema.:"PART_VIEW_NAME" RENAME TO partial_view; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name, + direct_view_schema, direct_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name | direct_view_schema | direct_view_name +------------------+--------------------+---------------------+-------------------+--------------------+------------------ + public | rename_c_aggregate | rename_schema | partial_view | rename_schema | _direct_view_3 +(1 row) + +--rename direct view +ALTER VIEW rename_schema.:"DIR_VIEW_NAME" RENAME TO direct_view; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name, + direct_view_schema, direct_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name | direct_view_schema | direct_view_name +------------------+--------------------+---------------------+-------------------+--------------------+------------------ + public | rename_c_aggregate | rename_schema | partial_view | rename_schema | direct_view +(1 row) + +-- drop_chunks tests +DROP TABLE conditions CASCADE; +DROP TABLE foo CASCADE; +psql:include/cagg_ddl_common.sql:161: NOTICE: drop cascades to 2 other objects +CREATE TABLE drop_chunks_table(time BIGINT NOT NULL, data INTEGER); +\if :IS_DISTRIBUTED +SELECT hypertable_id AS drop_chunks_table_id + FROM create_distributed_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10, replication_factor => 2) \gset +\else +SELECT hypertable_id AS drop_chunks_table_id + FROM create_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10) \gset +\endif +CREATE OR REPLACE FUNCTION integer_now_test() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table $$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ +CREATE OR REPLACE FUNCTION integer_now_test() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table $$; +$DIST$); +\endif +SELECT set_integer_now_func('drop_chunks_table', 'integer_now_test'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE MATERIALIZED VIEW drop_chunks_view + WITH ( + timescaledb.continuous, + timescaledb.materialized_only=true + ) +AS SELECT time_bucket('5', time), COUNT(data) + FROM drop_chunks_table + GROUP BY 1 WITH NO DATA; +SELECT format('%I.%I', schema_name, table_name) AS drop_chunks_mat_table, + schema_name AS drop_chunks_mat_schema, + table_name AS drop_chunks_mat_table_name + FROM _timescaledb_catalog.hypertable, _timescaledb_catalog.continuous_agg + WHERE _timescaledb_catalog.continuous_agg.raw_hypertable_id = :drop_chunks_table_id + AND _timescaledb_catalog.hypertable.id = _timescaledb_catalog.continuous_agg.mat_hypertable_id \gset +-- create 3 chunks, with 3 time bucket +INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(0, 29) AS i; +-- Only refresh up to bucket 15 initially. Matches the old refresh +-- behavior that didn't materialize everything +CALL refresh_continuous_aggregate('drop_chunks_view', 0, 15); +SELECT count(c) FROM show_chunks('drop_chunks_table') AS c; + count +------- + 3 +(1 row) + +SELECT count(c) FROM show_chunks('drop_chunks_view') AS c; + count +------- + 1 +(1 row) + +SELECT * FROM drop_chunks_view ORDER BY 1; + time_bucket | count +-------------+------- + 0 | 5 + 5 | 5 + 10 | 5 +(3 rows) + +-- cannot drop directly from the materialization table without specifying +-- cont. aggregate view name explicitly +\set ON_ERROR_STOP 0 +SELECT drop_chunks(:'drop_chunks_mat_table', + newer_than => -20, + verbose => true); +psql:include/cagg_ddl_common.sql:213: ERROR: operation not supported on materialized hypertable +\set ON_ERROR_STOP 1 +SELECT count(c) FROM show_chunks('drop_chunks_table') AS c; + count +------- + 3 +(1 row) + +SELECT count(c) FROM show_chunks('drop_chunks_view') AS c; + count +------- + 1 +(1 row) + +SELECT * FROM drop_chunks_view ORDER BY 1; + time_bucket | count +-------------+------- + 0 | 5 + 5 | 5 + 10 | 5 +(3 rows) + +-- drop chunks when the chunksize and time_bucket aren't aligned +DROP TABLE drop_chunks_table CASCADE; +psql:include/cagg_ddl_common.sql:222: NOTICE: drop cascades to 2 other objects +psql:include/cagg_ddl_common.sql:222: NOTICE: drop cascades to table _timescaledb_internal._hyper_5_4_chunk +CREATE TABLE drop_chunks_table_u(time BIGINT NOT NULL, data INTEGER); +\if :IS_DISTRIBUTED +SELECT hypertable_id AS drop_chunks_table_u_id + FROM create_distributed_hypertable('drop_chunks_table_u', 'time', chunk_time_interval => 7, replication_factor => 2) \gset +\else +SELECT hypertable_id AS drop_chunks_table_u_id + FROM create_hypertable('drop_chunks_table_u', 'time', chunk_time_interval => 7) \gset +\endif +CREATE OR REPLACE FUNCTION integer_now_test1() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table_u $$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ +CREATE OR REPLACE FUNCTION integer_now_test1() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table_u $$; +$DIST$); +\endif +SELECT set_integer_now_func('drop_chunks_table_u', 'integer_now_test1'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE MATERIALIZED VIEW drop_chunks_view + WITH ( + timescaledb.continuous, + timescaledb.materialized_only=true + ) +AS SELECT time_bucket('3', time), COUNT(data) + FROM drop_chunks_table_u + GROUP BY 1 WITH NO DATA; +SELECT format('%I.%I', schema_name, table_name) AS drop_chunks_mat_table_u, + schema_name AS drop_chunks_mat_schema, + table_name AS drop_chunks_mat_table_u_name + FROM _timescaledb_catalog.hypertable, _timescaledb_catalog.continuous_agg + WHERE _timescaledb_catalog.continuous_agg.raw_hypertable_id = :drop_chunks_table_u_id + AND _timescaledb_catalog.hypertable.id = _timescaledb_catalog.continuous_agg.mat_hypertable_id \gset +-- create 3 chunks, with 3 time bucket +INSERT INTO drop_chunks_table_u SELECT i, i FROM generate_series(0, 21) AS i; +-- Refresh up to bucket 15 to match old materializer behavior +CALL refresh_continuous_aggregate('drop_chunks_view', 0, 15); +SELECT count(c) FROM show_chunks('drop_chunks_table_u') AS c; + count +------- + 4 +(1 row) + +SELECT count(c) FROM show_chunks('drop_chunks_view') AS c; + count +------- + 1 +(1 row) + +SELECT * FROM drop_chunks_view ORDER BY 1; + time_bucket | count +-------------+------- + 0 | 3 + 3 | 3 + 6 | 3 + 9 | 3 + 12 | 3 +(5 rows) + +-- TRUNCATE test +-- Can truncate regular hypertables that have caggs +TRUNCATE drop_chunks_table_u; +\set ON_ERROR_STOP 0 +-- Can't truncate materialized hypertables directly +TRUNCATE :drop_chunks_mat_table_u; +psql:include/cagg_ddl_common.sql:271: ERROR: cannot TRUNCATE a hypertable underlying a continuous aggregate +\set ON_ERROR_STOP 1 +-- Check that we don't interfere with TRUNCATE of normal table and +-- partitioned table +CREATE TABLE truncate (value int); +INSERT INTO truncate VALUES (1), (2); +TRUNCATE truncate; +SELECT * FROM truncate; + value +------- +(0 rows) + +CREATE TABLE truncate_partitioned (value int) + PARTITION BY RANGE(value); +CREATE TABLE truncate_p1 PARTITION OF truncate_partitioned + FOR VALUES FROM (1) TO (3); +INSERT INTO truncate_partitioned VALUES (1), (2); +TRUNCATE truncate_partitioned; +SELECT * FROM truncate_partitioned; + value +------- +(0 rows) + +-- ALTER TABLE tests +\set ON_ERROR_STOP 0 +-- test a variety of ALTER TABLE statements +ALTER TABLE :drop_chunks_mat_table_u RENAME time_bucket TO bad_name; +psql:include/cagg_ddl_common.sql:291: ERROR: renaming columns on materialization tables is not supported +ALTER TABLE :drop_chunks_mat_table_u ADD UNIQUE(time_bucket); +psql:include/cagg_ddl_common.sql:292: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u SET UNLOGGED; +psql:include/cagg_ddl_common.sql:293: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u ENABLE ROW LEVEL SECURITY; +psql:include/cagg_ddl_common.sql:294: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u ADD COLUMN fizzle INTEGER; +psql:include/cagg_ddl_common.sql:295: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u DROP COLUMN time_bucket; +psql:include/cagg_ddl_common.sql:296: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u ALTER COLUMN time_bucket DROP NOT NULL; +psql:include/cagg_ddl_common.sql:297: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u ALTER COLUMN time_bucket SET DEFAULT 1; +psql:include/cagg_ddl_common.sql:298: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u ALTER COLUMN time_bucket SET STORAGE EXTERNAL; +psql:include/cagg_ddl_common.sql:299: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u DISABLE TRIGGER ALL; +psql:include/cagg_ddl_common.sql:300: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u SET TABLESPACE foo; +psql:include/cagg_ddl_common.sql:301: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u NOT OF; +psql:include/cagg_ddl_common.sql:302: ERROR: operation not supported on materialization tables +ALTER TABLE :drop_chunks_mat_table_u OWNER TO CURRENT_USER; +psql:include/cagg_ddl_common.sql:303: ERROR: operation not supported on materialization tables +\set ON_ERROR_STOP 1 +ALTER TABLE :drop_chunks_mat_table_u SET SCHEMA public; +ALTER TABLE :drop_chunks_mat_table_u_name RENAME TO new_name; +SET ROLE :ROLE_DEFAULT_PERM_USER; +SET client_min_messages TO NOTICE; +SELECT * FROM new_name; + time_bucket | count +-------------+------- + 0 | 3 + 3 | 3 + 6 | 3 + 9 | 3 + 12 | 3 +(5 rows) + +SELECT * FROM drop_chunks_view ORDER BY 1; + time_bucket | count +-------------+------- + 0 | 3 + 3 | 3 + 6 | 3 + 9 | 3 + 12 | 3 +(5 rows) + +\set ON_ERROR_STOP 0 +-- no continuous aggregates on a continuous aggregate materialization table +CREATE MATERIALIZED VIEW new_name_view + WITH ( + timescaledb.continuous, + timescaledb.materialized_only=true + ) +AS SELECT time_bucket('6', time_bucket), COUNT("count") + FROM new_name + GROUP BY 1 WITH NO DATA; +psql:include/cagg_ddl_common.sql:326: ERROR: hypertable is a continuous aggregate materialization table +\set ON_ERROR_STOP 1 +CREATE TABLE metrics(time timestamptz NOT NULL, device_id int, v1 float, v2 float); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('metrics', 'time', replication_factor => 2); + create_distributed_hypertable +------------------------------- + (8,public,metrics,t) +(1 row) + +\else +SELECT create_hypertable('metrics','time'); +\endif +INSERT INTO metrics SELECT generate_series('2000-01-01'::timestamptz,'2000-01-10','1m'),1,0.25,0.75; +-- check expressions in view definition +CREATE MATERIALIZED VIEW cagg_expr + WITH (timescaledb.continuous, timescaledb.materialized_only=true) +AS +SELECT + time_bucket('1d', time) AS time, + 'Const'::text AS Const, + 4.3::numeric AS "numeric", + first(metrics,time), + CASE WHEN true THEN 'foo' ELSE 'bar' END, + COALESCE(NULL,'coalesce'), + avg(v1) + avg(v2) AS avg1, + avg(v1+v2) AS avg2 +FROM metrics +GROUP BY 1 WITH NO DATA; +CALL refresh_continuous_aggregate('cagg_expr', NULL, NULL); +SELECT * FROM cagg_expr ORDER BY time LIMIT 5; + time | const | numeric | first | case | coalesce | avg1 | avg2 +------------------------------+-------+---------+----------------------------------------------+------+----------+------+------ + Fri Dec 31 16:00:00 1999 PST | Const | 4.3 | ("Sat Jan 01 00:00:00 2000 PST",1,0.25,0.75) | foo | coalesce | 1 | 1 + Sat Jan 01 16:00:00 2000 PST | Const | 4.3 | ("Sat Jan 01 16:00:00 2000 PST",1,0.25,0.75) | foo | coalesce | 1 | 1 + Sun Jan 02 16:00:00 2000 PST | Const | 4.3 | ("Sun Jan 02 16:00:00 2000 PST",1,0.25,0.75) | foo | coalesce | 1 | 1 + Mon Jan 03 16:00:00 2000 PST | Const | 4.3 | ("Mon Jan 03 16:00:00 2000 PST",1,0.25,0.75) | foo | coalesce | 1 | 1 + Tue Jan 04 16:00:00 2000 PST | Const | 4.3 | ("Tue Jan 04 16:00:00 2000 PST",1,0.25,0.75) | foo | coalesce | 1 | 1 +(5 rows) + +--test materialization of invalidation before drop +DROP TABLE IF EXISTS drop_chunks_table CASCADE; +psql:include/cagg_ddl_common.sql:358: NOTICE: table "drop_chunks_table" does not exist, skipping +DROP TABLE IF EXISTS drop_chunks_table_u CASCADE; +psql:include/cagg_ddl_common.sql:359: NOTICE: drop cascades to 2 other objects +psql:include/cagg_ddl_common.sql:359: NOTICE: drop cascades to table _timescaledb_internal._hyper_7_9_chunk +CREATE TABLE drop_chunks_table(time BIGINT NOT NULL, data INTEGER); +\if :IS_DISTRIBUTED +SELECT hypertable_id AS drop_chunks_table_nid + FROM create_distributed_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10, replication_factor => 2) \gset +\else +SELECT hypertable_id AS drop_chunks_table_nid + FROM create_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10) \gset +\endif +CREATE OR REPLACE FUNCTION integer_now_test2() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table $$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ +CREATE OR REPLACE FUNCTION integer_now_test2() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table $$; +$DIST$); +\endif +SELECT set_integer_now_func('drop_chunks_table', 'integer_now_test2'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE MATERIALIZED VIEW drop_chunks_view + WITH ( + timescaledb.continuous, + timescaledb.materialized_only=true + ) +AS SELECT time_bucket('5', time), max(data) + FROM drop_chunks_table + GROUP BY 1 WITH NO DATA; +INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(0, 20) AS i; +--dropping chunks will process the invalidations +SELECT drop_chunks('drop_chunks_table', older_than => (integer_now_test2()-9)); + drop_chunks +----------------------------------------------- + _timescaledb_internal._dist_hyper_10_13_chunk +(1 row) + +SELECT * FROM drop_chunks_table ORDER BY time ASC limit 1; + time | data +------+------ + 10 | 10 +(1 row) + +INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(20, 35) AS i; +CALL refresh_continuous_aggregate('drop_chunks_view', 10, 40); +--this will be seen after the drop its within the invalidation window and will be dropped +INSERT INTO drop_chunks_table VALUES (26, 100); +--this will not be processed by the drop since chunk 30-39 is not dropped but will be seen after refresh +--shows that the drop doesn't do more work than necessary +INSERT INTO drop_chunks_table VALUES (31, 200); +--move the time up to 39 +INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(35, 39) AS i; +--the chunks and ranges we have thus far +SELECT chunk_name, range_start_integer, range_end_integer +FROM timescaledb_information.chunks +WHERE hypertable_name = 'drop_chunks_table'; + chunk_name | range_start_integer | range_end_integer +-------------------------+---------------------+------------------- + _dist_hyper_10_14_chunk | 10 | 20 + _dist_hyper_10_15_chunk | 20 | 30 + _dist_hyper_10_16_chunk | 30 | 40 +(3 rows) + +--the invalidation on 25 not yet seen +SELECT * FROM drop_chunks_view ORDER BY time_bucket DESC; + time_bucket | max +-------------+----- + 35 | 35 + 30 | 34 + 25 | 29 + 20 | 24 + 15 | 19 + 10 | 14 +(6 rows) + +--refresh to process the invalidations and then drop +CALL refresh_continuous_aggregate('drop_chunks_view', NULL, (integer_now_test2()-9)); +SELECT drop_chunks('drop_chunks_table', older_than => (integer_now_test2()-9)); + drop_chunks +----------------------------------------------- + _timescaledb_internal._dist_hyper_10_14_chunk + _timescaledb_internal._dist_hyper_10_15_chunk +(2 rows) + +--new values on 25 now seen in view +SELECT * FROM drop_chunks_view ORDER BY time_bucket DESC; + time_bucket | max +-------------+----- + 35 | 35 + 30 | 34 + 25 | 100 + 20 | 24 + 15 | 19 + 10 | 14 +(6 rows) + +--earliest datapoint now in table +SELECT * FROM drop_chunks_table ORDER BY time ASC limit 1; + time | data +------+------ + 30 | 30 +(1 row) + +--we see the chunks row with the dropped flags set; +SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk where dropped; + id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk +----+---------------+-----------------------+-------------------------+---------------------+---------+--------+----------- + 13 | 10 | _timescaledb_internal | _dist_hyper_10_13_chunk | | t | 0 | f + 14 | 10 | _timescaledb_internal | _dist_hyper_10_14_chunk | | t | 0 | f + 15 | 10 | _timescaledb_internal | _dist_hyper_10_15_chunk | | t | 0 | f +(3 rows) + +--still see data in the view +SELECT * FROM drop_chunks_view WHERE time_bucket < (integer_now_test2()-9) ORDER BY time_bucket DESC; + time_bucket | max +-------------+----- + 25 | 100 + 20 | 24 + 15 | 19 + 10 | 14 +(4 rows) + +--no data but covers dropped chunks +SELECT * FROM drop_chunks_table WHERE time < (integer_now_test2()-9) ORDER BY time DESC; + time | data +------+------ +(0 rows) + +--recreate the dropped chunk +INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(0, 20) AS i; +--see data from recreated region +SELECT * FROM drop_chunks_table WHERE time < (integer_now_test2()-9) ORDER BY time DESC; + time | data +------+------ + 20 | 20 + 19 | 19 + 18 | 18 + 17 | 17 + 16 | 16 + 15 | 15 + 14 | 14 + 13 | 13 + 12 | 12 + 11 | 11 + 10 | 10 + 9 | 9 + 8 | 8 + 7 | 7 + 6 | 6 + 5 | 5 + 4 | 4 + 3 | 3 + 2 | 2 + 1 | 1 + 0 | 0 +(21 rows) + +--should show chunk with old name and old ranges +SELECT chunk_name, range_start_integer, range_end_integer +FROM timescaledb_information.chunks +WHERE hypertable_name = 'drop_chunks_table' +ORDER BY range_start_integer; + chunk_name | range_start_integer | range_end_integer +-------------------------+---------------------+------------------- + _dist_hyper_10_13_chunk | 0 | 10 + _dist_hyper_10_14_chunk | 10 | 20 + _dist_hyper_10_15_chunk | 20 | 30 + _dist_hyper_10_16_chunk | 30 | 40 +(4 rows) + +--We dropped everything up to the bucket starting at 30 and then +--inserted new data up to and including time 20. Therefore, the +--dropped data should stay the same as long as we only refresh +--buckets that have non-dropped data. +CALL refresh_continuous_aggregate('drop_chunks_view', 30, 40); +SELECT * FROM drop_chunks_view ORDER BY time_bucket DESC; + time_bucket | max +-------------+----- + 35 | 39 + 30 | 200 + 25 | 100 + 20 | 24 + 15 | 19 + 10 | 14 +(6 rows) + +SELECT format('%I.%I', schema_name, table_name) AS drop_chunks_mat_tablen, + schema_name AS drop_chunks_mat_schema, + table_name AS drop_chunks_mat_table_name + FROM _timescaledb_catalog.hypertable, _timescaledb_catalog.continuous_agg + WHERE _timescaledb_catalog.continuous_agg.raw_hypertable_id = :drop_chunks_table_nid + AND _timescaledb_catalog.hypertable.id = _timescaledb_catalog.continuous_agg.mat_hypertable_id \gset +-- TEST drop chunks from continuous aggregates by specifying view name +SELECT drop_chunks('drop_chunks_view', + newer_than => -20, + verbose => true); +psql:include/cagg_ddl_common.sql:454: INFO: dropping chunk _timescaledb_internal._hyper_11_17_chunk + drop_chunks +------------------------------------------ + _timescaledb_internal._hyper_11_17_chunk +(1 row) + +-- Test that we cannot drop chunks when specifying materialized +-- hypertable +INSERT INTO drop_chunks_table SELECT generate_series(45, 55), 500; +CALL refresh_continuous_aggregate('drop_chunks_view', 45, 55); +SELECT chunk_name, range_start_integer, range_end_integer +FROM timescaledb_information.chunks +WHERE hypertable_name = :'drop_chunks_mat_table_name' ORDER BY range_start_integer; + chunk_name | range_start_integer | range_end_integer +--------------------+---------------------+------------------- + _hyper_11_20_chunk | 0 | 100 +(1 row) + +\set ON_ERROR_STOP 0 +\set VERBOSITY default +SELECT drop_chunks(:'drop_chunks_mat_tablen', older_than => 60); +psql:include/cagg_ddl_common.sql:466: ERROR: operation not supported on materialized hypertable +DETAIL: Hypertable "_materialized_hypertable_11" is a materialized hypertable. +HINT: Try the operation on the continuous aggregate instead. +\set VERBOSITY terse +\set ON_ERROR_STOP 1 +----------------------------------------------------------------- +-- Test that refresh_continuous_aggregate on chunk will refresh, +-- but only in the regions covered by the show chunks. +----------------------------------------------------------------- +SELECT chunk_name, range_start_integer, range_end_integer +FROM timescaledb_information.chunks +WHERE hypertable_name = 'drop_chunks_table' +ORDER BY 2,3; + chunk_name | range_start_integer | range_end_integer +-------------------------+---------------------+------------------- + _dist_hyper_10_13_chunk | 0 | 10 + _dist_hyper_10_14_chunk | 10 | 20 + _dist_hyper_10_15_chunk | 20 | 30 + _dist_hyper_10_16_chunk | 30 | 40 + _dist_hyper_10_18_chunk | 40 | 50 + _dist_hyper_10_19_chunk | 50 | 60 +(6 rows) + +-- Pick the second chunk as the one to drop +WITH numbered_chunks AS ( + SELECT row_number() OVER (ORDER BY range_start_integer), chunk_schema, chunk_name, range_start_integer, range_end_integer + FROM timescaledb_information.chunks + WHERE hypertable_name = 'drop_chunks_table' + ORDER BY 1 +) +SELECT format('%I.%I', chunk_schema, chunk_name) AS chunk_to_drop, range_start_integer, range_end_integer +FROM numbered_chunks +WHERE row_number = 2 \gset +-- There's data in the table for the chunk/range we will drop +SELECT * FROM drop_chunks_table +WHERE time >= :range_start_integer +AND time < :range_end_integer +ORDER BY 1; + time | data +------+------ + 10 | 10 + 11 | 11 + 12 | 12 + 13 | 13 + 14 | 14 + 15 | 15 + 16 | 16 + 17 | 17 + 18 | 18 + 19 | 19 +(10 rows) + +-- Make sure there is also data in the continuous aggregate +-- CARE: +-- Note that this behaviour of dropping the materialization table chunks and expecting a refresh +-- that overlaps that time range to NOT update those chunks is undefined. Since CAGGs over +-- distributed hypertables merge the invalidations the refresh region is updated in the distributed +-- case, which may be different than what happens in the normal hypertable case. The command was: +-- SELECT drop_chunks('drop_chunks_view', newer_than => -20, verbose => true); +CALL refresh_continuous_aggregate('drop_chunks_view', 0, 50); +SELECT * FROM drop_chunks_view +ORDER BY 1; + time_bucket | max +-------------+----- + 0 | 4 + 5 | 9 + 10 | 14 + 15 | 19 + 20 | 20 + 30 | 200 + 35 | 39 + 45 | 500 + 50 | 500 +(9 rows) + +-- Drop the second chunk, to leave a gap in the data +\if :IS_DISTRIBUTED +CALL distributed_exec(format('DROP TABLE IF EXISTS %s', :'chunk_to_drop')); +DROP FOREIGN TABLE :chunk_to_drop; +\else +DROP TABLE :chunk_to_drop; +\endif +-- Verify that the second chunk is dropped +SELECT chunk_name, range_start_integer, range_end_integer +FROM timescaledb_information.chunks +WHERE hypertable_name = 'drop_chunks_table' +ORDER BY 2,3; + chunk_name | range_start_integer | range_end_integer +-------------------------+---------------------+------------------- + _dist_hyper_10_13_chunk | 0 | 10 + _dist_hyper_10_15_chunk | 20 | 30 + _dist_hyper_10_16_chunk | 30 | 40 + _dist_hyper_10_18_chunk | 40 | 50 + _dist_hyper_10_19_chunk | 50 | 60 +(5 rows) + +-- Data is no longer in the table but still in the view +SELECT * FROM drop_chunks_table +WHERE time >= :range_start_integer +AND time < :range_end_integer +ORDER BY 1; + time | data +------+------ +(0 rows) + +SELECT * FROM drop_chunks_view +WHERE time_bucket >= :range_start_integer +AND time_bucket < :range_end_integer +ORDER BY 1; + time_bucket | max +-------------+----- + 10 | 14 + 15 | 19 +(2 rows) + +-- Insert a large value in one of the chunks that will be dropped +INSERT INTO drop_chunks_table VALUES (:range_start_integer-1, 100); +-- Now refresh and drop the two adjecent chunks +CALL refresh_continuous_aggregate('drop_chunks_view', NULL, 30); +SELECT drop_chunks('drop_chunks_table', older_than=>30); + drop_chunks +----------------------------------------------- + _timescaledb_internal._dist_hyper_10_13_chunk + _timescaledb_internal._dist_hyper_10_15_chunk +(2 rows) + +-- Verify that the chunks are dropped +SELECT chunk_name, range_start_integer, range_end_integer +FROM timescaledb_information.chunks +WHERE hypertable_name = 'drop_chunks_table' +ORDER BY 2,3; + chunk_name | range_start_integer | range_end_integer +-------------------------+---------------------+------------------- + _dist_hyper_10_16_chunk | 30 | 40 + _dist_hyper_10_18_chunk | 40 | 50 + _dist_hyper_10_19_chunk | 50 | 60 +(3 rows) + +-- The continuous aggregate should be refreshed in the regions covered +-- by the dropped chunks, but not in the "gap" region, i.e., the +-- region of the chunk that was dropped via DROP TABLE. +SELECT * FROM drop_chunks_view +ORDER BY 1; + time_bucket | max +-------------+----- + 0 | 4 + 5 | 100 + 20 | 20 + 30 | 200 + 35 | 39 + 45 | 500 + 50 | 500 +(7 rows) + +-- Now refresh in the region of the first two dropped chunks +CALL refresh_continuous_aggregate('drop_chunks_view', 0, :range_end_integer); +-- Aggregate data in the refreshed range should no longer exist since +-- the underlying data was dropped. +SELECT * FROM drop_chunks_view +ORDER BY 1; + time_bucket | max +-------------+----- + 20 | 20 + 30 | 200 + 35 | 39 + 45 | 500 + 50 | 500 +(5 rows) + +-------------------------------------------------------------------- +-- Check that we can create a materialized table in a tablespace. We +-- create one with tablespace and one without and compare them. +CREATE VIEW cagg_info AS +WITH + caggs AS ( + SELECT format('%I.%I', user_view_schema, user_view_name)::regclass AS user_view, + format('%I.%I', direct_view_schema, direct_view_name)::regclass AS direct_view, + format('%I.%I', partial_view_schema, partial_view_name)::regclass AS partial_view, + format('%I.%I', ht.schema_name, ht.table_name)::regclass AS mat_relid + FROM _timescaledb_catalog.hypertable ht, + _timescaledb_catalog.continuous_agg cagg + WHERE ht.id = cagg.mat_hypertable_id + ) +SELECT user_view, + pg_get_userbyid(relowner) AS user_view_owner, + relname AS mat_table, + (SELECT pg_get_userbyid(relowner) FROM pg_class WHERE oid = mat_relid) AS mat_table_owner, + direct_view, + (SELECT pg_get_userbyid(relowner) FROM pg_class WHERE oid = direct_view) AS direct_view_owner, + partial_view, + (SELECT pg_get_userbyid(relowner) FROM pg_class WHERE oid = partial_view) AS partial_view_owner, + (SELECT spcname FROM pg_tablespace WHERE oid = reltablespace) AS tablespace + FROM pg_class JOIN caggs ON pg_class.oid = caggs.mat_relid; +GRANT SELECT ON cagg_info TO PUBLIC; +CREATE VIEW chunk_info AS +SELECT ht.schema_name, ht.table_name, relname AS chunk_name, + (SELECT spcname FROM pg_tablespace WHERE oid = reltablespace) AS tablespace + FROM pg_class c, + _timescaledb_catalog.hypertable ht, + _timescaledb_catalog.chunk ch + WHERE ch.table_name = c.relname AND ht.id = ch.hypertable_id; +CREATE TABLE whatever(time BIGINT NOT NULL, data INTEGER); +\if :IS_DISTRIBUTED +SELECT hypertable_id AS whatever_nid + FROM create_distributed_hypertable('whatever', 'time', chunk_time_interval => 10, replication_factor => 2) +\gset +\else +SELECT hypertable_id AS whatever_nid + FROM create_hypertable('whatever', 'time', chunk_time_interval => 10) +\gset +\endif +SELECT set_integer_now_func('whatever', 'integer_now_test'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE MATERIALIZED VIEW whatever_view_1 +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT time_bucket('5', time), COUNT(data) + FROM whatever GROUP BY 1 WITH NO DATA; +CREATE MATERIALIZED VIEW whatever_view_2 +WITH (timescaledb.continuous, timescaledb.materialized_only=true) +TABLESPACE tablespace1 AS +SELECT time_bucket('5', time), COUNT(data) + FROM whatever GROUP BY 1 WITH NO DATA; +INSERT INTO whatever SELECT i, i FROM generate_series(0, 29) AS i; +CALL refresh_continuous_aggregate('whatever_view_1', NULL, NULL); +CALL refresh_continuous_aggregate('whatever_view_2', NULL, NULL); +SELECT user_view, + mat_table, + cagg_info.tablespace AS mat_tablespace, + chunk_name, + chunk_info.tablespace AS chunk_tablespace + FROM cagg_info, chunk_info + WHERE mat_table::text = table_name + AND user_view::text LIKE 'whatever_view%'; + user_view | mat_table | mat_tablespace | chunk_name | chunk_tablespace +-----------------+-----------------------------+----------------+--------------------+------------------ + whatever_view_1 | _materialized_hypertable_13 | | _hyper_13_24_chunk | + whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_25_chunk | tablespace1 +(2 rows) + +ALTER MATERIALIZED VIEW whatever_view_1 SET TABLESPACE tablespace2; +SELECT user_view, + mat_table, + cagg_info.tablespace AS mat_tablespace, + chunk_name, + chunk_info.tablespace AS chunk_tablespace + FROM cagg_info, chunk_info + WHERE mat_table::text = table_name + AND user_view::text LIKE 'whatever_view%'; + user_view | mat_table | mat_tablespace | chunk_name | chunk_tablespace +-----------------+-----------------------------+----------------+--------------------+------------------ + whatever_view_1 | _materialized_hypertable_13 | tablespace2 | _hyper_13_24_chunk | tablespace2 + whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_25_chunk | tablespace1 +(2 rows) + +DROP MATERIALIZED VIEW whatever_view_1; +psql:include/cagg_ddl_common.sql:644: NOTICE: drop cascades to table _timescaledb_internal._hyper_13_24_chunk +DROP MATERIALIZED VIEW whatever_view_2; +psql:include/cagg_ddl_common.sql:645: NOTICE: drop cascades to table _timescaledb_internal._hyper_14_25_chunk +-- test bucket width expressions on integer hypertables +CREATE TABLE metrics_int2 ( + time int2 NOT NULL, + device_id int, + v1 float, + v2 float +); +CREATE TABLE metrics_int4 ( + time int4 NOT NULL, + device_id int, + v1 float, + v2 float +); +CREATE TABLE metrics_int8 ( + time int8 NOT NULL, + device_id int, + v1 float, + v2 float +); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable (('metrics_' || dt)::regclass, 'time', chunk_time_interval => 10, replication_factor => 2) +FROM ( + VALUES ('int2'), + ('int4'), + ('int8')) v (dt); + create_distributed_hypertable +------------------------------- + (15,public,metrics_int2,t) + (16,public,metrics_int4,t) + (17,public,metrics_int8,t) +(3 rows) + +\else +SELECT create_hypertable (('metrics_' || dt)::regclass, 'time', chunk_time_interval => 10) +FROM ( + VALUES ('int2'), + ('int4'), + ('int8')) v (dt); +\endif +CREATE OR REPLACE FUNCTION int2_now () + RETURNS int2 + LANGUAGE SQL + STABLE + AS $$ + SELECT 10::int2 +$$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ +CREATE OR REPLACE FUNCTION int2_now () + RETURNS int2 + LANGUAGE SQL + STABLE + AS $$ + SELECT 10::int2 +$$; +$DIST$); +\endif +CREATE OR REPLACE FUNCTION int4_now () + RETURNS int4 + LANGUAGE SQL + STABLE + AS $$ + SELECT 10::int4 +$$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ +CREATE OR REPLACE FUNCTION int4_now () + RETURNS int4 + LANGUAGE SQL + STABLE + AS $$ + SELECT 10::int4 +$$; +$DIST$); +\endif +CREATE OR REPLACE FUNCTION int8_now () + RETURNS int8 + LANGUAGE SQL + STABLE + AS $$ + SELECT 10::int8 +$$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ +CREATE OR REPLACE FUNCTION int8_now () + RETURNS int8 + LANGUAGE SQL + STABLE + AS $$ + SELECT 10::int8 +$$; +$DIST$); +\endif +SELECT set_integer_now_func (('metrics_' || dt)::regclass, (dt || '_now')::regproc) +FROM ( + VALUES ('int2'), + ('int4'), + ('int8')) v (dt); + set_integer_now_func +---------------------- + + + +(3 rows) + +-- width expression for int2 hypertables +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(1::smallint, time) +FROM metrics_int2 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:750: NOTICE: continuous aggregate "width_expr" is already up-to-date +DROP MATERIALIZED VIEW width_expr; +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(1::smallint + 2::smallint, time) +FROM metrics_int2 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:757: NOTICE: continuous aggregate "width_expr" is already up-to-date +DROP MATERIALIZED VIEW width_expr; +-- width expression for int4 hypertables +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(1, time) +FROM metrics_int4 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:765: NOTICE: continuous aggregate "width_expr" is already up-to-date +DROP MATERIALIZED VIEW width_expr; +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(1 + 2, time) +FROM metrics_int4 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:772: NOTICE: continuous aggregate "width_expr" is already up-to-date +DROP MATERIALIZED VIEW width_expr; +-- width expression for int8 hypertables +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(1, time) +FROM metrics_int8 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:780: NOTICE: continuous aggregate "width_expr" is already up-to-date +DROP MATERIALIZED VIEW width_expr; +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(1 + 2, time) +FROM metrics_int8 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:787: NOTICE: continuous aggregate "width_expr" is already up-to-date +DROP MATERIALIZED VIEW width_expr; +\set ON_ERROR_STOP 0 +-- non-immutable expresions should be rejected +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(extract(year FROM now())::smallint, time) +FROM metrics_int2 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:796: ERROR: only immutable expressions allowed in time bucket function +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(extract(year FROM now())::int, time) +FROM metrics_int4 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:801: ERROR: only immutable expressions allowed in time bucket function +CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(extract(year FROM now())::int, time) +FROM metrics_int8 +GROUP BY 1; +psql:include/cagg_ddl_common.sql:806: ERROR: only immutable expressions allowed in time bucket function +\set ON_ERROR_STOP 1 +-- Test various ALTER MATERIALIZED VIEW statements. +SET ROLE :ROLE_DEFAULT_PERM_USER; +CREATE MATERIALIZED VIEW owner_check WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(1 + 2, time) +FROM metrics_int8 +GROUP BY 1 +WITH NO DATA; +\x on +SELECT * FROM cagg_info WHERE user_view::text = 'owner_check'; +-[ RECORD 1 ]------+--------------------------------------- +user_view | owner_check +user_view_owner | default_perm_user +mat_table | _materialized_hypertable_24 +mat_table_owner | default_perm_user +direct_view | _timescaledb_internal._direct_view_24 +direct_view_owner | default_perm_user +partial_view | _timescaledb_internal._partial_view_24 +partial_view_owner | default_perm_user +tablespace | + +\x off +-- This should not work since the target user has the wrong role, but +-- we test that the normal checks are done when changing the owner. +\set ON_ERROR_STOP 0 +ALTER MATERIALIZED VIEW owner_check OWNER TO :ROLE_1; +psql:include/cagg_ddl_common.sql:826: ERROR: must be able to SET ROLE "test_role_1" +\set ON_ERROR_STOP 1 +-- Superuser can always change owner +SET ROLE :ROLE_CLUSTER_SUPERUSER; +ALTER MATERIALIZED VIEW owner_check OWNER TO :ROLE_1; +\x on +SELECT * FROM cagg_info WHERE user_view::text = 'owner_check'; +-[ RECORD 1 ]------+--------------------------------------- +user_view | owner_check +user_view_owner | test_role_1 +mat_table | _materialized_hypertable_24 +mat_table_owner | test_role_1 +direct_view | _timescaledb_internal._direct_view_24 +direct_view_owner | test_role_1 +partial_view | _timescaledb_internal._partial_view_24 +partial_view_owner | test_role_1 +tablespace | + +\x off +-- +-- Test drop continuous aggregate cases +-- +-- Issue: #2608 +-- +CREATE OR REPLACE FUNCTION test_int_now() + RETURNS INT LANGUAGE SQL STABLE AS +$BODY$ + SELECT 50; +$BODY$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ + CREATE OR REPLACE FUNCTION test_int_now() + RETURNS INT LANGUAGE SQL STABLE AS + $BODY$ + SELECT 50; + $BODY$; +$DIST$); +\endif +CREATE TABLE conditionsnm(time_int INT NOT NULL, device INT, value FLOAT); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('conditionsnm', 'time_int', chunk_time_interval => 10, replication_factor => 2); + create_distributed_hypertable +------------------------------- + (25,public,conditionsnm,t) +(1 row) + +\else +SELECT create_hypertable('conditionsnm', 'time_int', chunk_time_interval => 10); +\endif +SELECT set_integer_now_func('conditionsnm', 'test_int_now'); + set_integer_now_func +---------------------- + +(1 row) + +INSERT INTO conditionsnm +SELECT time_val, time_val % 4, 3.14 FROM generate_series(0,100,1) AS time_val; +-- Case 1: DROP +CREATE MATERIALIZED VIEW conditionsnm_4 +WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) +AS +SELECT time_bucket(7, time_int) as bucket, +SUM(value), COUNT(value) +FROM conditionsnm GROUP BY bucket WITH DATA; +psql:include/cagg_ddl_common.sql:874: NOTICE: refreshing continuous aggregate "conditionsnm_4" +DROP materialized view conditionsnm_4; +psql:include/cagg_ddl_common.sql:876: NOTICE: drop cascades to table _timescaledb_internal._hyper_26_37_chunk +-- Case 2: DROP CASCADE should have similar behaviour as DROP +CREATE MATERIALIZED VIEW conditionsnm_4 +WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) +AS +SELECT time_bucket(7, time_int) as bucket, +SUM(value), COUNT(value) +FROM conditionsnm GROUP BY bucket WITH DATA; +psql:include/cagg_ddl_common.sql:884: NOTICE: refreshing continuous aggregate "conditionsnm_4" +DROP materialized view conditionsnm_4 CASCADE; +psql:include/cagg_ddl_common.sql:886: NOTICE: drop cascades to table _timescaledb_internal._hyper_27_38_chunk +-- Case 3: require CASCADE in case of dependent object +CREATE MATERIALIZED VIEW conditionsnm_4 +WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) +AS +SELECT time_bucket(7, time_int) as bucket, +SUM(value), COUNT(value) +FROM conditionsnm GROUP BY bucket WITH DATA; +psql:include/cagg_ddl_common.sql:894: NOTICE: refreshing continuous aggregate "conditionsnm_4" +CREATE VIEW see_cagg as select * from conditionsnm_4; +\set ON_ERROR_STOP 0 +DROP MATERIALIZED VIEW conditionsnm_4; +psql:include/cagg_ddl_common.sql:898: ERROR: cannot drop view conditionsnm_4 because other objects depend on it +\set ON_ERROR_STOP 1 +-- Case 4: DROP CASCADE with dependency +DROP MATERIALIZED VIEW conditionsnm_4 CASCADE; +psql:include/cagg_ddl_common.sql:902: NOTICE: drop cascades to view see_cagg +psql:include/cagg_ddl_common.sql:902: NOTICE: drop cascades to table _timescaledb_internal._hyper_28_39_chunk +-- Test DROP SCHEMA CASCADE with continuous aggregates +-- +-- Issue: #2350 +-- +-- Case 1: DROP SCHEMA CASCADE +CREATE SCHEMA test_schema; +CREATE TABLE test_schema.telemetry_raw ( + ts TIMESTAMP WITH TIME ZONE NOT NULL, + value DOUBLE PRECISION +); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('test_schema.telemetry_raw', 'ts', replication_factor => 2); + create_distributed_hypertable +---------------------------------- + (29,test_schema,telemetry_raw,t) +(1 row) + +\else +SELECT create_hypertable('test_schema.telemetry_raw', 'ts'); +\endif +CREATE MATERIALIZED VIEW test_schema.telemetry_1s + WITH (timescaledb.continuous, timescaledb.materialized_only=false) + AS +SELECT time_bucket(INTERVAL '1s', ts) AS ts_1s, + avg(value) + FROM test_schema.telemetry_raw + GROUP BY ts_1s WITH NO DATA; +SELECT ca.raw_hypertable_id, + h.schema_name, + h.table_name AS "MAT_TABLE_NAME", + partial_view_name as "PART_VIEW_NAME", + partial_view_schema +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'telemetry_1s'; + raw_hypertable_id | schema_name | MAT_TABLE_NAME | PART_VIEW_NAME | partial_view_schema +-------------------+-----------------------+-----------------------------+------------------+----------------------- + 29 | _timescaledb_internal | _materialized_hypertable_30 | _partial_view_30 | _timescaledb_internal +(1 row) + +\gset +DROP SCHEMA test_schema CASCADE; +psql:include/cagg_ddl_common.sql:941: NOTICE: drop cascades to 4 other objects +SELECT count(*) FROM pg_class WHERE relname = :'MAT_TABLE_NAME'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_class WHERE relname = :'PART_VIEW_NAME'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_class WHERE relname = 'telemetry_1s'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_namespace WHERE nspname = 'test_schema'; + count +------- + 0 +(1 row) + +-- Case 2: DROP SCHEMA CASCADE with multiple caggs +CREATE SCHEMA test_schema; +CREATE TABLE test_schema.telemetry_raw ( + ts TIMESTAMP WITH TIME ZONE NOT NULL, + value DOUBLE PRECISION +); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('test_schema.telemetry_raw', 'ts', replication_factor => 2); + create_distributed_hypertable +---------------------------------- + (31,test_schema,telemetry_raw,t) +(1 row) + +\else +SELECT create_hypertable('test_schema.telemetry_raw', 'ts'); +\endif +CREATE MATERIALIZED VIEW test_schema.cagg1 + WITH (timescaledb.continuous, timescaledb.materialized_only=false) + AS +SELECT time_bucket(INTERVAL '1s', ts) AS ts_1s, + avg(value) + FROM test_schema.telemetry_raw + GROUP BY ts_1s WITH NO DATA; +CREATE MATERIALIZED VIEW test_schema.cagg2 + WITH (timescaledb.continuous, timescaledb.materialized_only=false) + AS +SELECT time_bucket(INTERVAL '1s', ts) AS ts_1s, + avg(value) + FROM test_schema.telemetry_raw + GROUP BY ts_1s WITH NO DATA; +SELECT ca.raw_hypertable_id, + h.schema_name, + h.table_name AS "MAT_TABLE_NAME1", + partial_view_name as "PART_VIEW_NAME1", + partial_view_schema +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'cagg1'; + raw_hypertable_id | schema_name | MAT_TABLE_NAME1 | PART_VIEW_NAME1 | partial_view_schema +-------------------+-----------------------+-----------------------------+------------------+----------------------- + 31 | _timescaledb_internal | _materialized_hypertable_32 | _partial_view_32 | _timescaledb_internal +(1 row) + +\gset +SELECT ca.raw_hypertable_id, + h.schema_name, + h.table_name AS "MAT_TABLE_NAME2", + partial_view_name as "PART_VIEW_NAME2", + partial_view_schema +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'cagg2'; + raw_hypertable_id | schema_name | MAT_TABLE_NAME2 | PART_VIEW_NAME2 | partial_view_schema +-------------------+-----------------------+-----------------------------+------------------+----------------------- + 31 | _timescaledb_internal | _materialized_hypertable_33 | _partial_view_33 | _timescaledb_internal +(1 row) + +\gset +DROP SCHEMA test_schema CASCADE; +psql:include/cagg_ddl_common.sql:998: NOTICE: drop cascades to 7 other objects +SELECT count(*) FROM pg_class WHERE relname = :'MAT_TABLE_NAME1'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_class WHERE relname = :'PART_VIEW_NAME1'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_class WHERE relname = 'cagg1'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_class WHERE relname = :'MAT_TABLE_NAME2'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_class WHERE relname = :'PART_VIEW_NAME2'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_class WHERE relname = 'cagg2'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_namespace WHERE nspname = 'test_schema'; + count +------- + 0 +(1 row) + +DROP TABLESPACE tablespace1; +DROP TABLESPACE tablespace2; +-- Check that we can rename a column of a materialized view and still +-- rebuild it after (#3051, #3405) +CREATE TABLE conditions ( + time TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL +); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('conditions', 'time', replication_factor => 2); + create_distributed_hypertable +------------------------------- + (34,public,conditions,t) +(1 row) + +\else +SELECT create_hypertable('conditions', 'time'); +\endif +INSERT INTO conditions VALUES ( '2018-01-01 09:20:00-08', 'SFO', 55); +INSERT INTO conditions VALUES ( '2018-01-02 09:30:00-08', 'por', 100); +INSERT INTO conditions VALUES ( '2018-01-02 09:20:00-08', 'SFO', 65); +INSERT INTO conditions VALUES ( '2018-01-02 09:10:00-08', 'NYC', 65); +INSERT INTO conditions VALUES ( '2018-11-01 09:20:00-08', 'NYC', 45); +INSERT INTO conditions VALUES ( '2018-11-01 10:40:00-08', 'NYC', 55); +INSERT INTO conditions VALUES ( '2018-11-01 11:50:00-08', 'NYC', 65); +INSERT INTO conditions VALUES ( '2018-11-01 12:10:00-08', 'NYC', 75); +INSERT INTO conditions VALUES ( '2018-11-01 13:10:00-08', 'NYC', 85); +INSERT INTO conditions VALUES ( '2018-11-02 09:20:00-08', 'NYC', 10); +INSERT INTO conditions VALUES ( '2018-11-02 10:30:00-08', 'NYC', 20); +CREATE MATERIALIZED VIEW conditions_daily +WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS +SELECT location, + time_bucket(INTERVAL '1 day', time) AS bucket, + AVG(temperature) + FROM conditions +GROUP BY location, bucket +WITH NO DATA; +SELECT format('%I.%I', '_timescaledb_internal', h.table_name) AS "MAT_TABLE_NAME", + format('%I.%I', '_timescaledb_internal', partial_view_name) AS "PART_VIEW_NAME", + format('%I.%I', '_timescaledb_internal', direct_view_name) AS "DIRECT_VIEW_NAME" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'conditions_daily' +\gset +-- Show both the columns and the view definitions to see that +-- references are correct in the view as well. +SELECT * FROM test.show_columns('conditions_daily'); + Column | Type | NotNull +----------+--------------------------+--------- + location | text | f + bucket | timestamp with time zone | f + avg | double precision | f +(3 rows) + +SELECT * FROM test.show_columns(:'DIRECT_VIEW_NAME'); + Column | Type | NotNull +----------+--------------------------+--------- + location | text | f + bucket | timestamp with time zone | f + avg | double precision | f +(3 rows) + +SELECT * FROM test.show_columns(:'PART_VIEW_NAME'); + Column | Type | NotNull +----------+--------------------------+--------- + location | text | f + bucket | timestamp with time zone | f + avg | double precision | f +(3 rows) + +SELECT * FROM test.show_columns(:'MAT_TABLE_NAME'); + Column | Type | NotNull +----------+--------------------------+--------- + location | text | f + bucket | timestamp with time zone | t + avg | double precision | f +(3 rows) + +ALTER MATERIALIZED VIEW conditions_daily RENAME COLUMN bucket to "time"; +-- Show both the columns and the view definitions to see that +-- references are correct in the view as well. +SELECT * FROM test.show_columns(' conditions_daily'); + Column | Type | NotNull +----------+--------------------------+--------- + location | text | f + time | timestamp with time zone | f + avg | double precision | f +(3 rows) + +SELECT * FROM test.show_columns(:'DIRECT_VIEW_NAME'); + Column | Type | NotNull +----------+--------------------------+--------- + location | text | f + time | timestamp with time zone | f + avg | double precision | f +(3 rows) + +SELECT * FROM test.show_columns(:'PART_VIEW_NAME'); + Column | Type | NotNull +----------+--------------------------+--------- + location | text | f + time | timestamp with time zone | f + avg | double precision | f +(3 rows) + +SELECT * FROM test.show_columns(:'MAT_TABLE_NAME'); + Column | Type | NotNull +----------+--------------------------+--------- + location | text | f + time | timestamp with time zone | t + avg | double precision | f +(3 rows) + +-- This will rebuild the materialized view and should succeed. +ALTER MATERIALIZED VIEW conditions_daily SET (timescaledb.materialized_only = false); +-- Refresh the continuous aggregate to check that it works after the +-- rename. +\set VERBOSITY verbose +CALL refresh_continuous_aggregate('conditions_daily', NULL, NULL); +\set VERBOSITY terse +-- +-- Indexes on continuous aggregate +-- +\set ON_ERROR_STOP 0 +-- unique indexes are not supported +CREATE UNIQUE INDEX index_unique_error ON conditions_daily ("time", location); +psql:include/cagg_ddl_common.sql:1084: ERROR: continuous aggregates do not support UNIQUE indexes +-- concurrently index creation not supported +CREATE INDEX CONCURRENTLY index_concurrently_avg ON conditions_daily (avg); +psql:include/cagg_ddl_common.sql:1086: ERROR: hypertables do not support concurrent index creation +\set ON_ERROR_STOP 1 +CREATE INDEX index_avg ON conditions_daily (avg); +CREATE INDEX index_avg_only ON ONLY conditions_daily (avg); +CREATE INDEX index_avg_include ON conditions_daily (avg) INCLUDE (location); +CREATE INDEX index_avg_expr ON conditions_daily ((avg + 1)); +CREATE INDEX index_avg_location_sfo ON conditions_daily (avg) WHERE location = 'SFO'; +CREATE INDEX index_avg_expr_location_sfo ON conditions_daily ((avg + 2)) WHERE location = 'SFO'; +SELECT * FROM test.show_indexespred(:'MAT_TABLE_NAME'); + Index | Columns | Expr | Pred | Unique | Primary | Exclusion | Tablespace +-----------------------------------------------------------------------+-------------------+---------------------------+------------------------+--------+---------+-----------+------------ + _timescaledb_internal._materialized_hypertable_35_bucket_idx | {bucket} | | | f | f | f | + _timescaledb_internal._materialized_hypertable_35_location_bucket_idx | {location,bucket} | | | f | f | f | + _timescaledb_internal.index_avg | {avg} | | | f | f | f | + _timescaledb_internal.index_avg_expr | {expr} | avg + 1::double precision | | f | f | f | + _timescaledb_internal.index_avg_expr_location_sfo | {expr} | avg + 2::double precision | location = 'SFO'::text | f | f | f | + _timescaledb_internal.index_avg_include | {avg,location} | | | f | f | f | + _timescaledb_internal.index_avg_location_sfo | {avg} | | location = 'SFO'::text | f | f | f | + _timescaledb_internal.index_avg_only | {avg} | | | f | f | f | +(8 rows) + +-- #3696 assertion failure when referencing columns not present in result +CREATE TABLE i3696(time timestamptz NOT NULL, search_query text, cnt integer, cnt2 integer); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('i3696', 'time', replication_factor => 2); + create_distributed_hypertable +------------------------------- + (36,public,i3696,t) +(1 row) + +\else +SELECT table_name FROM create_hypertable('i3696','time'); +\endif +CREATE MATERIALIZED VIEW i3696_cagg1 WITH (timescaledb.continuous, timescaledb.materialized_only=false) +AS + SELECT search_query,count(search_query) as count, sum(cnt), time_bucket(INTERVAL '1 minute', time) AS bucket + FROM i3696 GROUP BY cnt +cnt2 , bucket, search_query; +psql:include/cagg_ddl_common.sql:1108: NOTICE: continuous aggregate "i3696_cagg1" is already up-to-date +ALTER MATERIALIZED VIEW i3696_cagg1 SET (timescaledb.materialized_only = 'true'); +CREATE MATERIALIZED VIEW i3696_cagg2 WITH (timescaledb.continuous, timescaledb.materialized_only=false) +AS + SELECT search_query,count(search_query) as count, sum(cnt), time_bucket(INTERVAL '1 minute', time) AS bucket + FROM i3696 GROUP BY cnt + cnt2, bucket, search_query + HAVING cnt + cnt2 + sum(cnt) > 2 or count(cnt2) > 10; +psql:include/cagg_ddl_common.sql:1116: NOTICE: continuous aggregate "i3696_cagg2" is already up-to-date +ALTER MATERIALIZED VIEW i3696_cagg2 SET (timescaledb.materialized_only = 'true'); +--TEST test with multiple settings on continuous aggregates -- +-- test for materialized_only + compress combinations (real time aggs enabled initially) +CREATE TABLE test_setting(time timestamptz not null, val numeric); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('test_setting', 'time', replication_factor => 2); + create_distributed_hypertable +------------------------------- + (39,public,test_setting,t) +(1 row) + +\else +SELECT create_hypertable('test_setting', 'time'); +\endif +CREATE MATERIALIZED VIEW test_setting_cagg with (timescaledb.continuous, timescaledb.materialized_only=false) +AS SELECT time_bucket('1h',time), avg(val), count(*) FROM test_setting GROUP BY 1; +psql:include/cagg_ddl_common.sql:1130: NOTICE: continuous aggregate "test_setting_cagg" is already up-to-date +INSERT INTO test_setting +SELECT generate_series( '2020-01-10 8:00'::timestamp, '2020-01-30 10:00+00'::timestamptz, '1 day'::interval), 10.0; +CALL refresh_continuous_aggregate('test_setting_cagg', NULL, '2020-05-30 10:00+00'::timestamptz); +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 20 +(1 row) + +--this row is not in the materialized result --- +INSERT INTO test_setting VALUES( '2020-11-01', 20); +--try out 2 settings here -- +ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='true'); +psql:include/cagg_ddl_common.sql:1141: NOTICE: defaulting compress_orderby to time_bucket +SELECT view_name, compression_enabled, materialized_only +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_setting_cagg'; + view_name | compression_enabled | materialized_only +-------------------+---------------------+------------------- + test_setting_cagg | t | t +(1 row) + +--real time aggs is off now , should return 20 -- +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 20 +(1 row) + +--now set it back to false -- +ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='true'); +psql:include/cagg_ddl_common.sql:1149: NOTICE: defaulting compress_orderby to time_bucket +SELECT view_name, compression_enabled, materialized_only +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_setting_cagg'; + view_name | compression_enabled | materialized_only +-------------------+---------------------+------------------- + test_setting_cagg | t | f +(1 row) + +--count should return additional data since we have real time aggs on +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 21 +(1 row) + +ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='false'); +SELECT view_name, compression_enabled, materialized_only +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_setting_cagg'; + view_name | compression_enabled | materialized_only +-------------------+---------------------+------------------- + test_setting_cagg | f | t +(1 row) + +--real time aggs is off now , should return 20 -- +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 20 +(1 row) + +ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='false'); +SELECT view_name, compression_enabled, materialized_only +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_setting_cagg'; + view_name | compression_enabled | materialized_only +-------------------+---------------------+------------------- + test_setting_cagg | f | f +(1 row) + +--count should return additional data since we have real time aggs on +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 21 +(1 row) + +DELETE FROM test_setting WHERE val = 20; +--TEST test with multiple settings on continuous aggregates with real time aggregates turned off initially -- +-- test for materialized_only + compress combinations (real time aggs enabled initially) +DROP MATERIALIZED VIEW test_setting_cagg; +psql:include/cagg_ddl_common.sql:1174: NOTICE: drop cascades to table _timescaledb_internal._hyper_40_47_chunk +CREATE MATERIALIZED VIEW test_setting_cagg with (timescaledb.continuous, timescaledb.materialized_only = true) +AS SELECT time_bucket('1h',time), avg(val), count(*) FROM test_setting GROUP BY 1; +psql:include/cagg_ddl_common.sql:1177: NOTICE: refreshing continuous aggregate "test_setting_cagg" +CALL refresh_continuous_aggregate('test_setting_cagg', NULL, '2020-05-30 10:00+00'::timestamptz); +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 20 +(1 row) + +--this row is not in the materialized result --- +INSERT INTO test_setting VALUES( '2020-11-01', 20); +--try out 2 settings here -- +ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='true'); +psql:include/cagg_ddl_common.sql:1185: NOTICE: defaulting compress_orderby to time_bucket +SELECT view_name, compression_enabled, materialized_only +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_setting_cagg'; + view_name | compression_enabled | materialized_only +-------------------+---------------------+------------------- + test_setting_cagg | t | f +(1 row) + +--count should return additional data since we have real time aggs on +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 21 +(1 row) + +--now set it back to false -- +ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='true'); +psql:include/cagg_ddl_common.sql:1193: NOTICE: defaulting compress_orderby to time_bucket +SELECT view_name, compression_enabled, materialized_only +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_setting_cagg'; + view_name | compression_enabled | materialized_only +-------------------+---------------------+------------------- + test_setting_cagg | t | t +(1 row) + +--real time aggs is off now , should return 20 -- +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 20 +(1 row) + +ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='false'); +SELECT view_name, compression_enabled, materialized_only +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_setting_cagg'; + view_name | compression_enabled | materialized_only +-------------------+---------------------+------------------- + test_setting_cagg | f | f +(1 row) + +--count should return additional data since we have real time aggs on +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 21 +(1 row) + +ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='false'); +SELECT view_name, compression_enabled, materialized_only +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_setting_cagg'; + view_name | compression_enabled | materialized_only +-------------------+---------------------+------------------- + test_setting_cagg | f | t +(1 row) + +--real time aggs is off now , should return 20 -- +SELECT count(*) from test_setting_cagg ORDER BY 1; + count +------- + 20 +(1 row) + +-- END TEST with multiple settings +-- Test View Target Entries that contain both aggrefs and Vars in the same expression +CREATE TABLE transactions +( + "time" timestamp with time zone NOT NULL, + dummy1 integer, + dummy2 integer, + dummy3 integer, + dummy4 integer, + dummy5 integer, + amount integer, + fiat_value integer +); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('transactions', 'time', replication_factor => 2); + create_distributed_hypertable +------------------------------- + (46,public,transactions,t) +(1 row) + +\else +SELECT create_hypertable('transactions', 'time'); +\endif +INSERT INTO transactions VALUES ( '2018-01-01 09:20:00-08', 0, 0, 0, 0, 0, 1, 10); +INSERT INTO transactions VALUES ( '2018-01-02 09:30:00-08', 0, 0, 0, 0, 0, -1, 10); +INSERT INTO transactions VALUES ( '2018-01-02 09:20:00-08', 0, 0, 0, 0, 0, -1, 10); +INSERT INTO transactions VALUES ( '2018-01-02 09:10:00-08', 0, 0, 0, 0, 0, -1, 10); +INSERT INTO transactions VALUES ( '2018-11-01 09:20:00-08', 0, 0, 0, 0, 0, 1, 10); +INSERT INTO transactions VALUES ( '2018-11-01 10:40:00-08', 0, 0, 0, 0, 0, 1, 10); +INSERT INTO transactions VALUES ( '2018-11-01 11:50:00-08', 0, 0, 0, 0, 0, 1, 10); +INSERT INTO transactions VALUES ( '2018-11-01 12:10:00-08', 0, 0, 0, 0, 0, -1, 10); +INSERT INTO transactions VALUES ( '2018-11-01 13:10:00-08', 0, 0, 0, 0, 0, -1, 10); +INSERT INTO transactions VALUES ( '2018-11-02 09:20:00-08', 0, 0, 0, 0, 0, 1, 10); +INSERT INTO transactions VALUES ( '2018-11-02 10:30:00-08', 0, 0, 0, 0, 0, -1, 10); +CREATE materialized view cashflows( + bucket, + amount, + cashflow, + cashflow2 +) WITH ( + timescaledb.continuous, + timescaledb.materialized_only = true +) AS +SELECT time_bucket ('1 day', time) AS bucket, + amount, + CASE + WHEN amount < 0 THEN (0 - sum(fiat_value)) + ELSE sum(fiat_value) + END AS cashflow, + amount + sum(fiat_value) +FROM transactions +GROUP BY bucket, amount; +psql:include/cagg_ddl_common.sql:1267: NOTICE: refreshing continuous aggregate "cashflows" +SELECT h.table_name AS "MAT_TABLE_NAME", + partial_view_name AS "PART_VIEW_NAME", + direct_view_name AS "DIRECT_VIEW_NAME" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'cashflows' +\gset +-- Show both the columns and the view definitions to see that +-- references are correct in the view as well. +\d+ "_timescaledb_internal".:"DIRECT_VIEW_NAME" + View "_timescaledb_internal._direct_view_47" + Column | Type | Collation | Nullable | Default | Storage | Description +-----------+--------------------------+-----------+----------+---------+---------+------------- + bucket | timestamp with time zone | | | | plain | + amount | integer | | | | plain | + cashflow | bigint | | | | plain | + cashflow2 | bigint | | | | plain | +View definition: + SELECT time_bucket('@ 1 day'::interval, "time") AS bucket, + amount, + CASE + WHEN amount < 0 THEN 0 - sum(fiat_value) + ELSE sum(fiat_value) + END AS cashflow, + amount + sum(fiat_value) AS cashflow2 + FROM transactions + GROUP BY (time_bucket('@ 1 day'::interval, "time")), amount; + +\d+ "_timescaledb_internal".:"PART_VIEW_NAME" + View "_timescaledb_internal._partial_view_47" + Column | Type | Collation | Nullable | Default | Storage | Description +-----------+--------------------------+-----------+----------+---------+---------+------------- + bucket | timestamp with time zone | | | | plain | + amount | integer | | | | plain | + cashflow | bigint | | | | plain | + cashflow2 | bigint | | | | plain | +View definition: + SELECT time_bucket('@ 1 day'::interval, "time") AS bucket, + amount, + CASE + WHEN amount < 0 THEN 0 - sum(fiat_value) + ELSE sum(fiat_value) + END AS cashflow, + amount + sum(fiat_value) AS cashflow2 + FROM transactions + GROUP BY (time_bucket('@ 1 day'::interval, "time")), amount; + +\d+ "_timescaledb_internal".:"MAT_TABLE_NAME" + Table "_timescaledb_internal._materialized_hypertable_47" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +-----------+--------------------------+-----------+----------+---------+---------+--------------+------------- + bucket | timestamp with time zone | | not null | | plain | | + amount | integer | | | | plain | | + cashflow | bigint | | | | plain | | + cashflow2 | bigint | | | | plain | | +Indexes: + "_materialized_hypertable_47_amount_bucket_idx" btree (amount, bucket DESC) + "_materialized_hypertable_47_bucket_idx" btree (bucket DESC) +Triggers: + ts_insert_blocker BEFORE INSERT ON _timescaledb_internal._materialized_hypertable_47 FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker() +Child tables: _timescaledb_internal._hyper_47_52_chunk, + _timescaledb_internal._hyper_47_53_chunk + +\d+ 'cashflows' + View "public.cashflows" + Column | Type | Collation | Nullable | Default | Storage | Description +-----------+--------------------------+-----------+----------+---------+---------+------------- + bucket | timestamp with time zone | | | | plain | + amount | integer | | | | plain | + cashflow | bigint | | | | plain | + cashflow2 | bigint | | | | plain | +View definition: + SELECT bucket, + amount, + cashflow, + cashflow2 + FROM _timescaledb_internal._materialized_hypertable_47; + +SELECT * FROM cashflows; + bucket | amount | cashflow | cashflow2 +------------------------------+--------+----------+----------- + Sun Dec 31 16:00:00 2017 PST | 1 | 10 | 11 + Mon Jan 01 16:00:00 2018 PST | -1 | -30 | 29 + Wed Oct 31 17:00:00 2018 PDT | -1 | -20 | 19 + Wed Oct 31 17:00:00 2018 PDT | 1 | 30 | 31 + Thu Nov 01 17:00:00 2018 PDT | -1 | -10 | 9 + Thu Nov 01 17:00:00 2018 PDT | 1 | 10 | 11 +(6 rows) + +-- test cagg creation with named arguments in time_bucket +-- note that positional arguments cannot follow named arguments +-- 1. test named origin +-- 2. test named timezone +-- 3. test named ts +-- 4. test named bucket width +-- named origin +CREATE MATERIALIZED VIEW cagg_named_origin WITH +(timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket('1h', time, 'UTC', origin => '2001-01-03 01:23:45') AS bucket, +avg(amount) as avg_amount +FROM transactions GROUP BY 1 WITH NO DATA; +-- named timezone +CREATE MATERIALIZED VIEW cagg_named_tz_origin WITH +(timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket('1h', time, timezone => 'UTC', origin => '2001-01-03 01:23:45') AS bucket, +avg(amount) as avg_amount +FROM transactions GROUP BY 1 WITH NO DATA; +-- named ts +CREATE MATERIALIZED VIEW cagg_named_ts_tz_origin WITH +(timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket('1h', ts => time, timezone => 'UTC', origin => '2001-01-03 01:23:45') AS bucket, +avg(amount) as avg_amount +FROM transactions GROUP BY 1 WITH NO DATA; +-- named bucket width +CREATE MATERIALIZED VIEW cagg_named_all WITH +(timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(bucket_width => '1h', ts => time, timezone => 'UTC', origin => '2001-01-03 01:23:45') AS bucket, +avg(amount) as avg_amount +FROM transactions GROUP BY 1 WITH NO DATA; +-- Refreshing from the beginning (NULL) of a CAGG with variable time bucket and +-- using an INTERVAL for the end timestamp (issue #5534) +CREATE MATERIALIZED VIEW transactions_montly +WITH (timescaledb.continuous, timescaledb.materialized_only = true) AS +SELECT time_bucket(INTERVAL '1 month', time) AS bucket, + SUM(fiat_value), + MAX(fiat_value), + MIN(fiat_value) + FROM transactions +GROUP BY 1 +WITH NO DATA; +-- No rows +SELECT * FROM transactions_montly ORDER BY bucket; + bucket | sum | max | min +--------+-----+-----+----- +(0 rows) + +-- Refresh from beginning of the CAGG for 1 month +CALL refresh_continuous_aggregate('transactions_montly', NULL, INTERVAL '1 month'); +SELECT * FROM transactions_montly ORDER BY bucket; + bucket | sum | max | min +------------------------------+-----+-----+----- + Sun Dec 31 16:00:00 2017 PST | 40 | 10 | 10 + Wed Oct 31 17:00:00 2018 PDT | 70 | 10 | 10 +(2 rows) + +TRUNCATE transactions_montly; +-- Partial refresh the CAGG from beginning to an specific timestamp +CALL refresh_continuous_aggregate('transactions_montly', NULL, '2018-11-01 11:50:00-08'::timestamptz); +SELECT * FROM transactions_montly ORDER BY bucket; + bucket | sum | max | min +------------------------------+-----+-----+----- + Sun Dec 31 16:00:00 2017 PST | 40 | 10 | 10 +(1 row) + +-- Full refresh the CAGG +CALL refresh_continuous_aggregate('transactions_montly', NULL, NULL); +SELECT * FROM transactions_montly ORDER BY bucket; + bucket | sum | max | min +------------------------------+-----+-----+----- + Sun Dec 31 16:00:00 2017 PST | 40 | 10 | 10 + Wed Oct 31 17:00:00 2018 PDT | 70 | 10 | 10 +(2 rows) + +-- Check set_chunk_time_interval on continuous aggregate +CREATE MATERIALIZED VIEW cagg_set_chunk_time_interval +WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT time_bucket(INTERVAL '1 month', time) AS bucket, + SUM(fiat_value), + MAX(fiat_value), + MIN(fiat_value) +FROM transactions +GROUP BY 1 +WITH NO DATA; +SELECT set_chunk_time_interval('cagg_set_chunk_time_interval', chunk_time_interval => interval '1 month'); + set_chunk_time_interval +------------------------- + +(1 row) + +CALL refresh_continuous_aggregate('cagg_set_chunk_time_interval', NULL, NULL); +SELECT _timescaledb_functions.to_interval(d.interval_length) = interval '1 month' +FROM _timescaledb_catalog.dimension d + RIGHT JOIN _timescaledb_catalog.continuous_agg ca ON ca.user_view_name = 'cagg_set_chunk_time_interval' +WHERE d.hypertable_id = ca.mat_hypertable_id; + ?column? +---------- + t +(1 row) + +-- Since #6077 CAggs are materialized only by default +DROP TABLE conditions CASCADE; +psql:include/cagg_ddl_common.sql:1365: NOTICE: drop cascades to 3 other objects +psql:include/cagg_ddl_common.sql:1365: NOTICE: drop cascades to 2 other objects +CREATE TABLE conditions ( + time TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL +); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('conditions', 'time', replication_factor => 2); + create_distributed_hypertable +------------------------------- + (54,public,conditions,t) +(1 row) + +\else +SELECT create_hypertable('conditions', 'time'); +\endif +INSERT INTO conditions VALUES ( '2018-01-01 09:20:00-08', 'SFO', 55); +INSERT INTO conditions VALUES ( '2018-01-02 09:30:00-08', 'por', 100); +INSERT INTO conditions VALUES ( '2018-01-02 09:20:00-08', 'SFO', 65); +INSERT INTO conditions VALUES ( '2018-01-02 09:10:00-08', 'NYC', 65); +INSERT INTO conditions VALUES ( '2018-11-01 09:20:00-08', 'NYC', 45); +INSERT INTO conditions VALUES ( '2018-11-01 10:40:00-08', 'NYC', 55); +INSERT INTO conditions VALUES ( '2018-11-01 11:50:00-08', 'NYC', 65); +INSERT INTO conditions VALUES ( '2018-11-01 12:10:00-08', 'NYC', 75); +INSERT INTO conditions VALUES ( '2018-11-01 13:10:00-08', 'NYC', 85); +INSERT INTO conditions VALUES ( '2018-11-02 09:20:00-08', 'NYC', 10); +INSERT INTO conditions VALUES ( '2018-11-02 10:30:00-08', 'NYC', 20); +CREATE MATERIALIZED VIEW conditions_daily +WITH (timescaledb.continuous) AS +SELECT location, + time_bucket(INTERVAL '1 day', time) AS bucket, + AVG(temperature) + FROM conditions +GROUP BY location, bucket +WITH NO DATA; +\d+ conditions_daily + View "public.conditions_daily" + Column | Type | Collation | Nullable | Default | Storage | Description +----------+--------------------------+-----------+----------+---------+----------+------------- + location | text | | | | extended | + bucket | timestamp with time zone | | | | plain | + avg | double precision | | | | plain | +View definition: + SELECT location, + bucket, + avg + FROM _timescaledb_internal._materialized_hypertable_55; + +-- Should return NO ROWS +SELECT * FROM conditions_daily ORDER BY bucket, avg; + location | bucket | avg +----------+--------+----- +(0 rows) + +ALTER MATERIALIZED VIEW conditions_daily SET (timescaledb.materialized_only=false); +\d+ conditions_daily + View "public.conditions_daily" + Column | Type | Collation | Nullable | Default | Storage | Description +----------+--------------------------+-----------+----------+---------+----------+------------- + location | text | | | | extended | + bucket | timestamp with time zone | | | | plain | + avg | double precision | | | | plain | +View definition: + SELECT _materialized_hypertable_55.location, + _materialized_hypertable_55.bucket, + _materialized_hypertable_55.avg + FROM _timescaledb_internal._materialized_hypertable_55 + WHERE _materialized_hypertable_55.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(55)), '-infinity'::timestamp with time zone) +UNION ALL + SELECT conditions.location, + time_bucket('@ 1 day'::interval, conditions."time") AS bucket, + avg(conditions.temperature) AS avg + FROM conditions + WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(55)), '-infinity'::timestamp with time zone) + GROUP BY conditions.location, (time_bucket('@ 1 day'::interval, conditions."time")); + +-- Should return ROWS because now it is realtime +SELECT * FROM conditions_daily ORDER BY bucket, avg; + location | bucket | avg +----------+------------------------------+----- + SFO | Sun Dec 31 16:00:00 2017 PST | 55 + NYC | Mon Jan 01 16:00:00 2018 PST | 65 + SFO | Mon Jan 01 16:00:00 2018 PST | 65 + por | Mon Jan 01 16:00:00 2018 PST | 100 + NYC | Wed Oct 31 17:00:00 2018 PDT | 65 + NYC | Thu Nov 01 17:00:00 2018 PDT | 15 +(6 rows) + +-- Should return ROWS because we refreshed it +ALTER MATERIALIZED VIEW conditions_daily SET (timescaledb.materialized_only=true); +\d+ conditions_daily + View "public.conditions_daily" + Column | Type | Collation | Nullable | Default | Storage | Description +----------+--------------------------+-----------+----------+---------+----------+------------- + location | text | | | | extended | + bucket | timestamp with time zone | | | | plain | + avg | double precision | | | | plain | +View definition: + SELECT location, + bucket, + avg + FROM _timescaledb_internal._materialized_hypertable_55; + +CALL refresh_continuous_aggregate('conditions_daily', NULL, NULL); +SELECT * FROM conditions_daily ORDER BY bucket, avg; + location | bucket | avg +----------+------------------------------+----- + SFO | Sun Dec 31 16:00:00 2017 PST | 55 + SFO | Mon Jan 01 16:00:00 2018 PST | 65 + NYC | Mon Jan 01 16:00:00 2018 PST | 65 + por | Mon Jan 01 16:00:00 2018 PST | 100 + NYC | Wed Oct 31 17:00:00 2018 PDT | 65 + NYC | Thu Nov 01 17:00:00 2018 PDT | 15 +(6 rows) + +-- cleanup +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; +DROP DATABASE :DATA_NODE_1 WITH (FORCE); +DROP DATABASE :DATA_NODE_2 WITH (FORCE); +DROP DATABASE :DATA_NODE_3 WITH (FORCE); diff --git a/tsl/test/expected/cagg_errors_deprecated.out b/tsl/test/expected/cagg_errors_deprecated-13.out similarity index 97% rename from tsl/test/expected/cagg_errors_deprecated.out rename to tsl/test/expected/cagg_errors_deprecated-13.out index 29131835628..d504e9687b4 100644 --- a/tsl/test/expected/cagg_errors_deprecated.out +++ b/tsl/test/expected/cagg_errors_deprecated-13.out @@ -145,6 +145,8 @@ from conditions group by time_bucket('1week', timec) , location WITH NO DATA; ERROR: aggregates which are not parallelizable are not supported ; +-- Starting on PG16 this test will pass because array_agg is parallel safe +-- https://github.com/postgres/postgres/commit/16fd03e956540d1b47b743f6a84f37c54ac93dd4 CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) AS Select sum(humidity), avg(temperature), array_agg(location) @@ -158,6 +160,8 @@ CREATE AGGREGATE newavg ( finalfunc = int8_avg, initcond1 = '{0,0}' ); +DROP MATERIALIZED VIEW IF EXISTS mat_m1; +NOTICE: materialized view "mat_m1" does not exist, skipping CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) AS Select sum(humidity), newavg(temperature::int4) @@ -531,6 +535,11 @@ CREATE MATERIALIZED VIEW measurements_summary WITH (timescaledb.continuous, time SELECT time_bucket('1 day', time), COUNT(time) FROM measurements GROUP BY 1 WITH NO DATA; +SELECT ca.mat_hypertable_id AS "MAT_HYPERTABLE_ID" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'measurements_summary' +\gset -- First test that add_job checks the config. It is currently possible -- to add non-custom jobs using the add_job function so we need to -- test that the function actually checks the config parameters. These @@ -548,14 +557,14 @@ SELECT add_job( '_timescaledb_functions.policy_refresh_continuous_aggregate'::regproc, '1 hour'::interval, check_config => '_timescaledb_functions.policy_refresh_continuous_aggregate_check'::regproc, - config => '{"end_offset": null, "start_offset": "1 fortnight", "mat_hypertable_id": 11}'); + config => ('{"end_offset": null, "start_offset": "1 fortnight", "mat_hypertable_id": '||:'MAT_HYPERTABLE_ID'||'}')::jsonb); ERROR: invalid input syntax for type interval: "1 fortnight" -- ... this one because it has a bad value for end_offset SELECT add_job( '_timescaledb_functions.policy_refresh_continuous_aggregate'::regproc, '1 hour'::interval, check_config => '_timescaledb_functions.policy_refresh_continuous_aggregate_check'::regproc, - config => '{"end_offset": "chicken", "start_offset": null, "mat_hypertable_id": 11}'); + config => ('{"end_offset": "chicken", "start_offset": null, "mat_hypertable_id": '||:'MAT_HYPERTABLE_ID'||'}')::jsonb); ERROR: invalid input syntax for type interval: "chicken" \set ON_ERROR_STOP 1 SELECT add_continuous_aggregate_policy('measurements_summary', NULL, NULL, '1 h'::interval) AS job_id @@ -589,10 +598,10 @@ timezone | SELECT alter_job(:job_id, config => '{"end_offset": "1 week", "start_offset": "2 fortnights"}'); ERROR: could not find "mat_hypertable_id" in config for job SELECT alter_job(:job_id, - config => '{"mat_hypertable_id": 11, "end_offset": "chicken", "start_offset": "1 fortnights"}'); + config => ('{"mat_hypertable_id": '||:'MAT_HYPERTABLE_ID'||', "end_offset": "chicken", "start_offset": "1 fortnights"}')::jsonb); ERROR: invalid input syntax for type interval: "1 fortnights" SELECT alter_job(:job_id, - config => '{"mat_hypertable_id": 11, "end_offset": "chicken", "start_offset": "1 week"}'); + config => ('{"mat_hypertable_id": '||:'MAT_HYPERTABLE_ID'||', "end_offset": "chicken", "start_offset": "1 week"}')::jsonb); ERROR: invalid input syntax for type interval: "chicken" \set ON_ERROR_STOP 1 DROP TABLE measurements CASCADE; diff --git a/tsl/test/expected/cagg_errors_deprecated-14.out b/tsl/test/expected/cagg_errors_deprecated-14.out new file mode 100644 index 00000000000..d504e9687b4 --- /dev/null +++ b/tsl/test/expected/cagg_errors_deprecated-14.out @@ -0,0 +1,714 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\set ON_ERROR_STOP 0 +\set VERBOSITY default +--negative tests for query validation +create table mat_t1( a integer, b integer,c TEXT); +CREATE TABLE conditions ( + timec TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + temperature integer NULL, + humidity DOUBLE PRECISION NULL, + timemeasure TIMESTAMPTZ, + timeinterval INTERVAL + ); +select table_name from create_hypertable( 'conditions', 'timec'); + table_name +------------ + conditions +(1 row) + +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false, timescaledb.myfill = 1) +as +select location , min(temperature) +from conditions +group by time_bucket('1d', timec), location WITH NO DATA; +ERROR: unrecognized parameter "timescaledb.myfill" +--valid PG option +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false, check_option = LOCAL ) +as +select * from conditions , mat_t1 WITH NO DATA; +ERROR: unsupported combination of storage parameters +DETAIL: A continuous aggregate does not support standard storage parameters. +HINT: Use only parameters with the "timescaledb." prefix when creating a continuous aggregate. +--non-hypertable +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +as +select a, count(*) from mat_t1 +group by a WITH NO DATA; +ERROR: table "mat_t1" is not a hypertable +-- no group by +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +as +select count(*) from conditions WITH NO DATA; +ERROR: invalid continuous aggregate query +HINT: Include at least one aggregate function and a GROUP BY clause with time bucket. +-- no time_bucket in group by +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +as +select count(*) from conditions group by location WITH NO DATA; +ERROR: continuous aggregate view must include a valid time bucket function +-- with valid query in a CTE +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +with m1 as ( +Select location, count(*) from conditions + group by time_bucket('1week', timec) , location) +select * from m1 WITH NO DATA; +ERROR: invalid continuous aggregate query +DETAIL: CTEs, subqueries and set-returning functions are not supported by continuous aggregates. +--with DISTINCT ON +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +as + select distinct on ( location ) count(*) from conditions group by location, time_bucket('1week', timec) WITH NO DATA; +ERROR: invalid continuous aggregate query +DETAIL: DISTINCT / DISTINCT ON queries are not supported by continuous aggregates. +--aggregate with DISTINCT +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select time_bucket('1week', timec), + count(location) , sum(distinct temperature) from conditions + group by time_bucket('1week', timec) , location WITH NO DATA; +ERROR: aggregates with FILTER / DISTINCT / ORDER BY are not supported +--aggregate with FILTER +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select time_bucket('1week', timec), + sum(temperature) filter ( where humidity > 20 ) from conditions + group by time_bucket('1week', timec) , location WITH NO DATA; +ERROR: aggregates with FILTER / DISTINCT / ORDER BY are not supported +-- aggregate with filter in having clause +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select time_bucket('1week', timec), max(temperature) +from conditions + group by time_bucket('1week', timec) , location + having sum(temperature) filter ( where humidity > 20 ) > 50 WITH NO DATA; +ERROR: aggregates with FILTER / DISTINCT / ORDER BY are not supported +-- time_bucket on non partitioning column of hypertable +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select max(temperature) +from conditions + group by time_bucket('1week', timemeasure) , location WITH NO DATA; +ERROR: time bucket function must reference a hypertable dimension column +--time_bucket on expression +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select max(temperature) +from conditions + group by time_bucket('1week', timec+ '10 minutes'::interval) , location WITH NO DATA; +ERROR: time bucket function must reference a hypertable dimension column +--multiple time_bucket functions +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select max(temperature) +from conditions + group by time_bucket('1week', timec) , time_bucket('1month', timec), location WITH NO DATA; +ERROR: continuous aggregate view cannot contain multiple time bucket functions +--time_bucket using additional args +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select max(temperature) +from conditions + group by time_bucket( INTERVAL '5 minutes', timec, INTERVAL '-2.5 minutes') , location WITH NO DATA; +ERROR: continuous aggregate view must include a valid time bucket function +--time_bucket using non-const for first argument +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select max(temperature) +from conditions + group by time_bucket( timeinterval, timec) , location WITH NO DATA; +ERROR: only immutable expressions allowed in time bucket function +HINT: Use an immutable expression as first argument to the time bucket function. +-- ordered set aggr +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select mode() within group( order by humidity) +from conditions + group by time_bucket('1week', timec) WITH NO DATA; +ERROR: aggregates with FILTER / DISTINCT / ORDER BY are not supported +--window function +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select avg(temperature) over( order by humidity) +from conditions + WITH NO DATA; +ERROR: invalid continuous aggregate query +DETAIL: Window functions are not supported by continuous aggregates. +--aggregate without combine function +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select json_agg(location) +from conditions + group by time_bucket('1week', timec) , location WITH NO DATA; +ERROR: aggregates which are not parallelizable are not supported +; +-- Starting on PG16 this test will pass because array_agg is parallel safe +-- https://github.com/postgres/postgres/commit/16fd03e956540d1b47b743f6a84f37c54ac93dd4 +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), avg(temperature), array_agg(location) +from conditions + group by time_bucket('1week', timec) , location WITH NO DATA; +ERROR: aggregates which are not parallelizable are not supported +; +-- userdefined aggregate without combine function +CREATE AGGREGATE newavg ( + sfunc = int4_avg_accum, basetype = int4, stype = _int8, + finalfunc = int8_avg, + initcond1 = '{0,0}' +); +DROP MATERIALIZED VIEW IF EXISTS mat_m1; +NOTICE: materialized view "mat_m1" does not exist, skipping +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), newavg(temperature::int4) +from conditions + group by time_bucket('1week', timec) , location WITH NO DATA; +ERROR: aggregates which are not parallelizable are not supported +; +-- using subqueries +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), avg(temperature::int4) +from +( select humidity, temperature, location, timec +from conditions ) q + group by time_bucket('1week', timec) , location WITH NO DATA; +ERROR: invalid continuous aggregate view +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +select * from +( Select sum(humidity), avg(temperature::int4) +from conditions + group by time_bucket('1week', timec) , location ) q WITH NO DATA; +ERROR: invalid continuous aggregate query +HINT: Include at least one aggregate function and a GROUP BY clause with time bucket. +--using limit /limit offset +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), avg(temperature::int4) +from conditions + group by time_bucket('1week', timec) , location +limit 10 WITH NO DATA; +ERROR: invalid continuous aggregate query +DETAIL: LIMIT and LIMIT OFFSET are not supported in queries defining continuous aggregates. +HINT: Use LIMIT and LIMIT OFFSET in SELECTS from the continuous aggregate view instead. +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), avg(temperature::int4) +from conditions + group by time_bucket('1week', timec) , location +offset 10 WITH NO DATA; +ERROR: invalid continuous aggregate query +DETAIL: LIMIT and LIMIT OFFSET are not supported in queries defining continuous aggregates. +HINT: Use LIMIT and LIMIT OFFSET in SELECTS from the continuous aggregate view instead. +--using ORDER BY in view defintion +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), avg(temperature::int4) +from conditions + group by time_bucket('1week', timec) , location +ORDER BY 1 WITH NO DATA; +ERROR: invalid continuous aggregate query +DETAIL: ORDER BY is not supported in queries defining continuous aggregates. +HINT: Use ORDER BY clauses in SELECTS from the continuous aggregate view instead. +--using FETCH +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), avg(temperature::int4) +from conditions + group by time_bucket('1week', timec) , location +fetch first 10 rows only WITH NO DATA; +ERROR: invalid continuous aggregate query +DETAIL: LIMIT and LIMIT OFFSET are not supported in queries defining continuous aggregates. +HINT: Use LIMIT and LIMIT OFFSET in SELECTS from the continuous aggregate view instead. +--using locking clauses FOR clause +--all should be disabled. we cannot guarntee locks on the hypertable +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), avg(temperature::int4) +from conditions + group by time_bucket('1week', timec) , location +FOR KEY SHARE WITH NO DATA; +ERROR: FOR KEY SHARE is not allowed with GROUP BY clause +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), avg(temperature::int4) +from conditions + group by time_bucket('1week', timec) , location +FOR SHARE WITH NO DATA; +ERROR: FOR SHARE is not allowed with GROUP BY clause +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), avg(temperature::int4) +from conditions + group by time_bucket('1week', timec) , location +FOR UPDATE WITH NO DATA; +ERROR: FOR UPDATE is not allowed with GROUP BY clause +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), avg(temperature::int4) +from conditions + group by time_bucket('1week', timec) , location +FOR NO KEY UPDATE WITH NO DATA; +ERROR: FOR NO KEY UPDATE is not allowed with GROUP BY clause +--tablesample clause +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), avg(temperature::int4) +from conditions tablesample bernoulli(0.2) + group by time_bucket('1week', timec) , location + WITH NO DATA; +ERROR: invalid continuous aggregate view +-- ONLY in from clause +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), avg(temperature::int4) +from ONLY conditions + group by time_bucket('1week', timec) , location WITH NO DATA; +ERROR: invalid continuous aggregate view +--grouping sets and variants +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), avg(temperature::int4) +from conditions + group by grouping sets(time_bucket('1week', timec) , location ) WITH NO DATA; +ERROR: invalid continuous aggregate query +DETAIL: GROUP BY GROUPING SETS, ROLLUP and CUBE are not supported by continuous aggregates +HINT: Define multiple continuous aggregates with different grouping levels. +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), avg(temperature::int4) +from conditions +group by rollup(time_bucket('1week', timec) , location ) WITH NO DATA; +ERROR: invalid continuous aggregate query +DETAIL: GROUP BY GROUPING SETS, ROLLUP and CUBE are not supported by continuous aggregates +HINT: Define multiple continuous aggregates with different grouping levels. +--NO immutable functions -- check all clauses +CREATE FUNCTION test_stablefunc(int) RETURNS int LANGUAGE 'sql' + STABLE AS 'SELECT $1 + 10'; +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), max(timec + INTERVAL '1h') +from conditions +group by time_bucket('1week', timec) , location WITH NO DATA; +ERROR: only immutable functions supported in continuous aggregate view +HINT: Make sure all functions in the continuous aggregate definition have IMMUTABLE volatility. Note that functions or expressions may be IMMUTABLE for one data type, but STABLE or VOLATILE for another. +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), min(location) +from conditions +group by time_bucket('1week', timec) +having max(timec + INTERVAL '1h') > '2010-01-01 09:00:00-08' WITH NO DATA; +ERROR: only immutable functions supported in continuous aggregate view +HINT: Make sure all functions in the continuous aggregate definition have IMMUTABLE volatility. Note that functions or expressions may be IMMUTABLE for one data type, but STABLE or VOLATILE for another. +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum( test_stablefunc(humidity::int) ), min(location) +from conditions +group by time_bucket('1week', timec) WITH NO DATA; +ERROR: only immutable functions supported in continuous aggregate view +HINT: Make sure all functions in the continuous aggregate definition have IMMUTABLE volatility. Note that functions or expressions may be IMMUTABLE for one data type, but STABLE or VOLATILE for another. +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum( temperature ), min(location) +from conditions +group by time_bucket('1week', timec), test_stablefunc(humidity::int) WITH NO DATA; +ERROR: only immutable functions supported in continuous aggregate view +HINT: Make sure all functions in the continuous aggregate definition have IMMUTABLE volatility. Note that functions or expressions may be IMMUTABLE for one data type, but STABLE or VOLATILE for another. +-- Should use CREATE MATERIALIZED VIEW to create continuous aggregates +CREATE VIEW continuous_aggs_errors_tbl1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) AS +SELECT time_bucket('1 week', timec) + FROM conditions +GROUP BY time_bucket('1 week', timec); +ERROR: cannot create continuous aggregate with CREATE VIEW +HINT: Use CREATE MATERIALIZED VIEW to create a continuous aggregate. +-- row security on table +create table rowsec_tab( a bigint, b integer, c integer); +select table_name from create_hypertable( 'rowsec_tab', 'a', chunk_time_interval=>10); +NOTICE: adding not-null constraint to column "a" +DETAIL: Dimensions cannot have NULL values. + table_name +------------ + rowsec_tab +(1 row) + +CREATE OR REPLACE FUNCTION integer_now_test() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(a), 0)::bigint FROM rowsec_tab $$; +SELECT set_integer_now_func('rowsec_tab', 'integer_now_test'); + set_integer_now_func +---------------------- + +(1 row) + +alter table rowsec_tab ENABLE ROW LEVEL SECURITY; +create policy rowsec_tab_allview ON rowsec_tab FOR SELECT USING(true); +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum( b), min(c) +from rowsec_tab +group by time_bucket('1', a) WITH NO DATA; +ERROR: cannot create continuous aggregate on hypertable with row security +-- cagg on cagg not allowed +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +SELECT time_bucket('1 day', timec) AS bucket + FROM conditions +GROUP BY time_bucket('1 day', timec); +NOTICE: continuous aggregate "mat_m1" is already up-to-date +CREATE MATERIALIZED VIEW mat_m2_on_mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false) +AS +SELECT time_bucket('1 week', bucket) AS bucket + FROM mat_m1 +GROUP BY time_bucket('1 week', bucket); +ERROR: old format of continuous aggregate is not supported +HINT: Run "CALL cagg_migrate('public.mat_m1');" to migrate to the new format. +drop table conditions cascade; +NOTICE: drop cascades to 3 other objects +DETAIL: drop cascades to view mat_m1 +drop cascades to view _timescaledb_internal._partial_view_3 +drop cascades to view _timescaledb_internal._direct_view_3 +--negative tests for WITH options +CREATE TABLE conditions ( + timec TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL, + lowp double precision NULL, + highp double precision null, + allnull double precision null + ); +select table_name from create_hypertable( 'conditions', 'timec'); + table_name +------------ + conditions +(1 row) + +create materialized view mat_with_test( timec, minl, sumt , sumh) +WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +as +select time_bucket('1day', timec), min(location), sum(temperature),sum(humidity) +from conditions +group by time_bucket('1day', timec) WITH NO DATA; +SELECT h.schema_name AS "MAT_SCHEMA_NAME", + h.table_name AS "MAT_TABLE_NAME", + partial_view_name as "PART_VIEW_NAME", + partial_view_schema as "PART_VIEW_SCHEMA" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'mat_with_test' +\gset +\set ON_ERROR_STOP 0 +ALTER MATERIALIZED VIEW mat_with_test SET(timescaledb.create_group_indexes = 'false'); +ERROR: cannot alter create_group_indexes option for continuous aggregates +ALTER MATERIALIZED VIEW mat_with_test SET(timescaledb.create_group_indexes = 'true'); +ERROR: cannot alter create_group_indexes option for continuous aggregates +ALTER MATERIALIZED VIEW mat_with_test ALTER timec DROP default; +ERROR: cannot alter only SET options of a continuous aggregate +\set ON_ERROR_STOP 1 +\set VERBOSITY terse +DROP TABLE conditions CASCADE; +NOTICE: drop cascades to 3 other objects +--test WITH using a hypertable with an integer time dimension +CREATE TABLE conditions ( + timec SMALLINT NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL, + lowp double precision NULL, + highp double precision null, + allnull double precision null + ); +select table_name from create_hypertable( 'conditions', 'timec', chunk_time_interval=> 100); + table_name +------------ + conditions +(1 row) + +CREATE OR REPLACE FUNCTION integer_now_test_s() returns smallint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(timec), 0)::smallint FROM conditions $$; +SELECT set_integer_now_func('conditions', 'integer_now_test_s'); + set_integer_now_func +---------------------- + +(1 row) + +\set ON_ERROR_STOP 0 +create materialized view mat_with_test( timec, minl, sumt , sumh) +WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +as +select time_bucket(100, timec), min(location), sum(temperature),sum(humidity) +from conditions +group by time_bucket(100, timec) WITH NO DATA; +ERROR: time bucket function must reference a hypertable dimension column +create materialized view mat_with_test( timec, minl, sumt , sumh) +WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +as +select time_bucket(100, timec), min(location), sum(temperature),sum(humidity) +from conditions +group by time_bucket(100, timec) WITH NO DATA; +ERROR: time bucket function must reference a hypertable dimension column +ALTER TABLE conditions ALTER timec type int; +create materialized view mat_with_test( timec, minl, sumt , sumh) +WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +as +select time_bucket(100, timec), min(location), sum(temperature),sum(humidity) +from conditions +group by time_bucket(100, timec) WITH NO DATA; +\set ON_ERROR_STOP 1 +DROP TABLE conditions cascade; +NOTICE: drop cascades to 3 other objects +CREATE TABLE conditions ( + timec BIGINT NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL, + lowp double precision NULL, + highp double precision null, + allnull double precision null + ); +select table_name from create_hypertable( 'conditions', 'timec', chunk_time_interval=> 100); + table_name +------------ + conditions +(1 row) + +CREATE OR REPLACE FUNCTION integer_now_test_b() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(timec), 0)::bigint FROM conditions $$; +SELECT set_integer_now_func('conditions', 'integer_now_test_b'); + set_integer_now_func +---------------------- + +(1 row) + +create materialized view mat_with_test( timec, minl, sumt , sumh) +WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +as +select time_bucket(BIGINT '100', timec), min(location), sum(temperature),sum(humidity) +from conditions +group by 1 WITH NO DATA; +-- custom time partition functions are not supported with invalidations +CREATE FUNCTION text_part_func(TEXT) RETURNS BIGINT + AS $$ SELECT length($1)::BIGINT $$ + LANGUAGE SQL IMMUTABLE; +CREATE TABLE text_time(time TEXT); + SELECT create_hypertable('text_time', 'time', chunk_time_interval => 10, time_partitioning_func => 'text_part_func'); +NOTICE: adding not-null constraint to column "time" + create_hypertable +------------------------- + (10,public,text_time,t) +(1 row) + +\set VERBOSITY default +\set ON_ERROR_STOP 0 +CREATE MATERIALIZED VIEW text_view + WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) + AS SELECT time_bucket('5', text_part_func(time)), COUNT(time) + FROM text_time + GROUP BY 1 WITH NO DATA; +ERROR: custom partitioning functions not supported with continuous aggregates +\set ON_ERROR_STOP 1 +-- Check that we get an error when mixing normal materialized views +-- and continuous aggregates. +CREATE MATERIALIZED VIEW normal_mat_view AS +SELECT time_bucket('5', text_part_func(time)), COUNT(time) + FROM text_time +GROUP BY 1 WITH NO DATA; +\set VERBOSITY terse +\set ON_ERROR_STOP 0 +DROP MATERIALIZED VIEW normal_mat_view, mat_with_test; +ERROR: mixing continuous aggregates and other objects not allowed +\set ON_ERROR_STOP 1 +DROP TABLE text_time CASCADE; +NOTICE: drop cascades to materialized view normal_mat_view +CREATE TABLE measurements (time TIMESTAMPTZ NOT NULL, device INT, value FLOAT); +SELECT create_hypertable('measurements', 'time'); + create_hypertable +---------------------------- + (11,public,measurements,t) +(1 row) + +INSERT INTO measurements VALUES ('2019-03-04 13:30', 1, 1.3); +-- Add a continuous aggregate on the measurements table and a policy +-- to be able to test error cases for the add_job function. +CREATE MATERIALIZED VIEW measurements_summary WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) AS +SELECT time_bucket('1 day', time), COUNT(time) + FROM measurements +GROUP BY 1 WITH NO DATA; +SELECT ca.mat_hypertable_id AS "MAT_HYPERTABLE_ID" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'measurements_summary' +\gset +-- First test that add_job checks the config. It is currently possible +-- to add non-custom jobs using the add_job function so we need to +-- test that the function actually checks the config parameters. These +-- should all generate errors, for different reasons... +\set ON_ERROR_STOP 0 +-- ... this one because it is missing a field. +SELECT add_job( + '_timescaledb_functions.policy_refresh_continuous_aggregate'::regproc, + '1 hour'::interval, + check_config => '_timescaledb_functions.policy_refresh_continuous_aggregate_check'::regproc, + config => '{"end_offset": null, "start_offset": null}'); +ERROR: could not find "mat_hypertable_id" in config for job +-- ... this one because it has a bad value for start_offset +SELECT add_job( + '_timescaledb_functions.policy_refresh_continuous_aggregate'::regproc, + '1 hour'::interval, + check_config => '_timescaledb_functions.policy_refresh_continuous_aggregate_check'::regproc, + config => ('{"end_offset": null, "start_offset": "1 fortnight", "mat_hypertable_id": '||:'MAT_HYPERTABLE_ID'||'}')::jsonb); +ERROR: invalid input syntax for type interval: "1 fortnight" +-- ... this one because it has a bad value for end_offset +SELECT add_job( + '_timescaledb_functions.policy_refresh_continuous_aggregate'::regproc, + '1 hour'::interval, + check_config => '_timescaledb_functions.policy_refresh_continuous_aggregate_check'::regproc, + config => ('{"end_offset": "chicken", "start_offset": null, "mat_hypertable_id": '||:'MAT_HYPERTABLE_ID'||'}')::jsonb); +ERROR: invalid input syntax for type interval: "chicken" +\set ON_ERROR_STOP 1 +SELECT add_continuous_aggregate_policy('measurements_summary', NULL, NULL, '1 h'::interval) AS job_id +\gset +\x on +SELECT * FROM _timescaledb_config.bgw_job WHERE id = :job_id; +-[ RECORD 1 ]-----+-------------------------------------------------------------------- +id | 1000 +application_name | Refresh Continuous Aggregate Policy [1000] +schedule_interval | @ 1 hour +max_runtime | @ 0 +max_retries | -1 +retry_period | @ 1 hour +proc_schema | _timescaledb_functions +proc_name | policy_refresh_continuous_aggregate +owner | default_perm_user +scheduled | t +fixed_schedule | f +initial_start | +hypertable_id | 12 +config | {"end_offset": null, "start_offset": null, "mat_hypertable_id": 12} +check_schema | _timescaledb_functions +check_name | policy_refresh_continuous_aggregate_check +timezone | + +\x off +-- These are all weird values for the parameters for the continuous +-- aggregate jobs and should generate an error. Since the config will +-- be replaced, we will also generate error for missing arguments. +\set ON_ERROR_STOP 0 +SELECT alter_job(:job_id, config => '{"end_offset": "1 week", "start_offset": "2 fortnights"}'); +ERROR: could not find "mat_hypertable_id" in config for job +SELECT alter_job(:job_id, + config => ('{"mat_hypertable_id": '||:'MAT_HYPERTABLE_ID'||', "end_offset": "chicken", "start_offset": "1 fortnights"}')::jsonb); +ERROR: invalid input syntax for type interval: "1 fortnights" +SELECT alter_job(:job_id, + config => ('{"mat_hypertable_id": '||:'MAT_HYPERTABLE_ID'||', "end_offset": "chicken", "start_offset": "1 week"}')::jsonb); +ERROR: invalid input syntax for type interval: "chicken" +\set ON_ERROR_STOP 1 +DROP TABLE measurements CASCADE; +NOTICE: drop cascades to 3 other objects +DROP TABLE conditions CASCADE; +NOTICE: drop cascades to 3 other objects +-- test handling of invalid mat_hypertable_id +create table i2980(time timestamptz not null); +select create_hypertable('i2980','time'); + create_hypertable +--------------------- + (13,public,i2980,t) +(1 row) + +create materialized view i2980_cagg with (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) AS SELECT time_bucket('1h',time), avg(7) FROM i2980 GROUP BY 1; +NOTICE: continuous aggregate "i2980_cagg" is already up-to-date +select add_continuous_aggregate_policy('i2980_cagg',NULL,NULL,'4h') AS job_id \gset +\set ON_ERROR_STOP 0 +select alter_job(:job_id,config:='{"end_offset": null, "start_offset": null, "mat_hypertable_id": 1000}'); +ERROR: configuration materialization hypertable id 1000 not found +--test creating continuous aggregate with compression enabled -- +CREATE MATERIALIZED VIEW i2980_cagg2 with (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.compress, timescaledb.finalized = false) +AS SELECT time_bucket('1h',time), avg(7) FROM i2980 GROUP BY 1; +ERROR: cannot enable compression while creating a continuous aggregate +--this one succeeds +CREATE MATERIALIZED VIEW i2980_cagg2 with (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS SELECT time_bucket('1h',time) as bucket, avg(7) FROM i2980 GROUP BY 1; +NOTICE: continuous aggregate "i2980_cagg2" is already up-to-date +--now enable compression with invalid parameters +ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress, +timescaledb.compress_segmentby = 'bucket'); +NOTICE: defaulting compress_orderby to bucket +ERROR: cannot use column "bucket" for both ordering and segmenting +ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress, +timescaledb.compress_orderby = 'bucket'); +--enable compression and test re-enabling compression +ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress); +NOTICE: defaulting compress_orderby to bucket +insert into i2980 select now(); +call refresh_continuous_aggregate('i2980_cagg2', NULL, NULL); +SELECT compress_chunk(ch) FROM show_chunks('i2980_cagg2') ch; + compress_chunk +----------------------------------------- + _timescaledb_internal._hyper_15_3_chunk +(1 row) + +ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress = 'false'); +ERROR: cannot change configuration on already compressed chunks +ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress = 'true'); +NOTICE: defaulting compress_orderby to bucket +ERROR: cannot change configuration on already compressed chunks +ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress, timescaledb.compress_segmentby = 'bucket'); +NOTICE: defaulting compress_orderby to bucket +ERROR: cannot change configuration on already compressed chunks +--Errors with compression policy on caggs-- +select add_continuous_aggregate_policy('i2980_cagg2', interval '10 day', interval '2 day' ,'4h') AS job_id ; + job_id +-------- + 1002 +(1 row) + +SELECT add_compression_policy('i2980_cagg', '8 day'::interval); +ERROR: compression not enabled on continuous aggregate "i2980_cagg" +ALTER MATERIALIZED VIEW i2980_cagg SET ( timescaledb.compress ); +NOTICE: defaulting compress_orderby to time_bucket +SELECT add_compression_policy('i2980_cagg', '8 day'::interval); +ERROR: compress_after value for compression policy should be greater than the start of the refresh window of continuous aggregate policy for i2980_cagg +SELECT add_continuous_aggregate_policy('i2980_cagg2', '10 day'::interval, '6 day'::interval); +ERROR: function add_continuous_aggregate_policy(unknown, interval, interval) does not exist at character 8 +SELECT add_compression_policy('i2980_cagg2', '3 day'::interval); +ERROR: compress_after value for compression policy should be greater than the start of the refresh window of continuous aggregate policy for i2980_cagg2 +SELECT add_compression_policy('i2980_cagg2', '1 day'::interval); +ERROR: compress_after value for compression policy should be greater than the start of the refresh window of continuous aggregate policy for i2980_cagg2 +SELECT add_compression_policy('i2980_cagg2', '3'::integer); +ERROR: unsupported compress_after argument type, expected type : interval +SELECT add_compression_policy('i2980_cagg2', 13::integer); +ERROR: unsupported compress_after argument type, expected type : interval +SELECT materialization_hypertable_schema || '.' || materialization_hypertable_name AS "MAT_TABLE_NAME" +FROM timescaledb_information.continuous_aggregates +WHERE view_name = 'i2980_cagg2' +\gset +SELECT add_compression_policy( :'MAT_TABLE_NAME', 13::integer); +ERROR: cannot add compression policy to materialized hypertable "_materialized_hypertable_15" +--TEST compressing cagg chunks without enabling compression +SELECT count(*) FROM (select decompress_chunk(ch) FROM show_chunks('i2980_cagg2') ch ) q; + count +------- + 1 +(1 row) + +ALTER MATERIALIZED VIEW i2980_cagg2 SET (timescaledb.compress = 'false'); +SELECT compress_chunk(ch) FROM show_chunks('i2980_cagg2') ch; +ERROR: compression not enabled on "i2980_cagg2" +-- test error handling when trying to create cagg on internal hypertable +CREATE TABLE comp_ht_test(time timestamptz NOT NULL); +SELECT table_name FROM create_hypertable('comp_ht_test','time'); + table_name +-------------- + comp_ht_test +(1 row) + +ALTER TABLE comp_ht_test SET (timescaledb.compress); +SELECT + format('%I.%I', ht.schema_name, ht.table_name) AS "INTERNALTABLE" +FROM + _timescaledb_catalog.hypertable ht + INNER JOIN _timescaledb_catalog.hypertable uncompress ON (ht.id = uncompress.compressed_hypertable_id + AND uncompress.table_name = 'comp_ht_test') \gset +CREATE MATERIALIZED VIEW cagg1 WITH(timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) AS SELECT time_bucket('1h',_ts_meta_min_1) FROM :INTERNALTABLE GROUP BY 1; +ERROR: hypertable is an internal compressed hypertable diff --git a/tsl/test/expected/cagg_errors_deprecated-15.out b/tsl/test/expected/cagg_errors_deprecated-15.out new file mode 100644 index 00000000000..d504e9687b4 --- /dev/null +++ b/tsl/test/expected/cagg_errors_deprecated-15.out @@ -0,0 +1,714 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\set ON_ERROR_STOP 0 +\set VERBOSITY default +--negative tests for query validation +create table mat_t1( a integer, b integer,c TEXT); +CREATE TABLE conditions ( + timec TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + temperature integer NULL, + humidity DOUBLE PRECISION NULL, + timemeasure TIMESTAMPTZ, + timeinterval INTERVAL + ); +select table_name from create_hypertable( 'conditions', 'timec'); + table_name +------------ + conditions +(1 row) + +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false, timescaledb.myfill = 1) +as +select location , min(temperature) +from conditions +group by time_bucket('1d', timec), location WITH NO DATA; +ERROR: unrecognized parameter "timescaledb.myfill" +--valid PG option +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false, check_option = LOCAL ) +as +select * from conditions , mat_t1 WITH NO DATA; +ERROR: unsupported combination of storage parameters +DETAIL: A continuous aggregate does not support standard storage parameters. +HINT: Use only parameters with the "timescaledb." prefix when creating a continuous aggregate. +--non-hypertable +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +as +select a, count(*) from mat_t1 +group by a WITH NO DATA; +ERROR: table "mat_t1" is not a hypertable +-- no group by +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +as +select count(*) from conditions WITH NO DATA; +ERROR: invalid continuous aggregate query +HINT: Include at least one aggregate function and a GROUP BY clause with time bucket. +-- no time_bucket in group by +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +as +select count(*) from conditions group by location WITH NO DATA; +ERROR: continuous aggregate view must include a valid time bucket function +-- with valid query in a CTE +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +with m1 as ( +Select location, count(*) from conditions + group by time_bucket('1week', timec) , location) +select * from m1 WITH NO DATA; +ERROR: invalid continuous aggregate query +DETAIL: CTEs, subqueries and set-returning functions are not supported by continuous aggregates. +--with DISTINCT ON +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +as + select distinct on ( location ) count(*) from conditions group by location, time_bucket('1week', timec) WITH NO DATA; +ERROR: invalid continuous aggregate query +DETAIL: DISTINCT / DISTINCT ON queries are not supported by continuous aggregates. +--aggregate with DISTINCT +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select time_bucket('1week', timec), + count(location) , sum(distinct temperature) from conditions + group by time_bucket('1week', timec) , location WITH NO DATA; +ERROR: aggregates with FILTER / DISTINCT / ORDER BY are not supported +--aggregate with FILTER +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select time_bucket('1week', timec), + sum(temperature) filter ( where humidity > 20 ) from conditions + group by time_bucket('1week', timec) , location WITH NO DATA; +ERROR: aggregates with FILTER / DISTINCT / ORDER BY are not supported +-- aggregate with filter in having clause +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select time_bucket('1week', timec), max(temperature) +from conditions + group by time_bucket('1week', timec) , location + having sum(temperature) filter ( where humidity > 20 ) > 50 WITH NO DATA; +ERROR: aggregates with FILTER / DISTINCT / ORDER BY are not supported +-- time_bucket on non partitioning column of hypertable +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select max(temperature) +from conditions + group by time_bucket('1week', timemeasure) , location WITH NO DATA; +ERROR: time bucket function must reference a hypertable dimension column +--time_bucket on expression +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select max(temperature) +from conditions + group by time_bucket('1week', timec+ '10 minutes'::interval) , location WITH NO DATA; +ERROR: time bucket function must reference a hypertable dimension column +--multiple time_bucket functions +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select max(temperature) +from conditions + group by time_bucket('1week', timec) , time_bucket('1month', timec), location WITH NO DATA; +ERROR: continuous aggregate view cannot contain multiple time bucket functions +--time_bucket using additional args +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select max(temperature) +from conditions + group by time_bucket( INTERVAL '5 minutes', timec, INTERVAL '-2.5 minutes') , location WITH NO DATA; +ERROR: continuous aggregate view must include a valid time bucket function +--time_bucket using non-const for first argument +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select max(temperature) +from conditions + group by time_bucket( timeinterval, timec) , location WITH NO DATA; +ERROR: only immutable expressions allowed in time bucket function +HINT: Use an immutable expression as first argument to the time bucket function. +-- ordered set aggr +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select mode() within group( order by humidity) +from conditions + group by time_bucket('1week', timec) WITH NO DATA; +ERROR: aggregates with FILTER / DISTINCT / ORDER BY are not supported +--window function +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select avg(temperature) over( order by humidity) +from conditions + WITH NO DATA; +ERROR: invalid continuous aggregate query +DETAIL: Window functions are not supported by continuous aggregates. +--aggregate without combine function +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select json_agg(location) +from conditions + group by time_bucket('1week', timec) , location WITH NO DATA; +ERROR: aggregates which are not parallelizable are not supported +; +-- Starting on PG16 this test will pass because array_agg is parallel safe +-- https://github.com/postgres/postgres/commit/16fd03e956540d1b47b743f6a84f37c54ac93dd4 +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), avg(temperature), array_agg(location) +from conditions + group by time_bucket('1week', timec) , location WITH NO DATA; +ERROR: aggregates which are not parallelizable are not supported +; +-- userdefined aggregate without combine function +CREATE AGGREGATE newavg ( + sfunc = int4_avg_accum, basetype = int4, stype = _int8, + finalfunc = int8_avg, + initcond1 = '{0,0}' +); +DROP MATERIALIZED VIEW IF EXISTS mat_m1; +NOTICE: materialized view "mat_m1" does not exist, skipping +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), newavg(temperature::int4) +from conditions + group by time_bucket('1week', timec) , location WITH NO DATA; +ERROR: aggregates which are not parallelizable are not supported +; +-- using subqueries +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), avg(temperature::int4) +from +( select humidity, temperature, location, timec +from conditions ) q + group by time_bucket('1week', timec) , location WITH NO DATA; +ERROR: invalid continuous aggregate view +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +select * from +( Select sum(humidity), avg(temperature::int4) +from conditions + group by time_bucket('1week', timec) , location ) q WITH NO DATA; +ERROR: invalid continuous aggregate query +HINT: Include at least one aggregate function and a GROUP BY clause with time bucket. +--using limit /limit offset +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), avg(temperature::int4) +from conditions + group by time_bucket('1week', timec) , location +limit 10 WITH NO DATA; +ERROR: invalid continuous aggregate query +DETAIL: LIMIT and LIMIT OFFSET are not supported in queries defining continuous aggregates. +HINT: Use LIMIT and LIMIT OFFSET in SELECTS from the continuous aggregate view instead. +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), avg(temperature::int4) +from conditions + group by time_bucket('1week', timec) , location +offset 10 WITH NO DATA; +ERROR: invalid continuous aggregate query +DETAIL: LIMIT and LIMIT OFFSET are not supported in queries defining continuous aggregates. +HINT: Use LIMIT and LIMIT OFFSET in SELECTS from the continuous aggregate view instead. +--using ORDER BY in view defintion +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), avg(temperature::int4) +from conditions + group by time_bucket('1week', timec) , location +ORDER BY 1 WITH NO DATA; +ERROR: invalid continuous aggregate query +DETAIL: ORDER BY is not supported in queries defining continuous aggregates. +HINT: Use ORDER BY clauses in SELECTS from the continuous aggregate view instead. +--using FETCH +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), avg(temperature::int4) +from conditions + group by time_bucket('1week', timec) , location +fetch first 10 rows only WITH NO DATA; +ERROR: invalid continuous aggregate query +DETAIL: LIMIT and LIMIT OFFSET are not supported in queries defining continuous aggregates. +HINT: Use LIMIT and LIMIT OFFSET in SELECTS from the continuous aggregate view instead. +--using locking clauses FOR clause +--all should be disabled. we cannot guarntee locks on the hypertable +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), avg(temperature::int4) +from conditions + group by time_bucket('1week', timec) , location +FOR KEY SHARE WITH NO DATA; +ERROR: FOR KEY SHARE is not allowed with GROUP BY clause +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), avg(temperature::int4) +from conditions + group by time_bucket('1week', timec) , location +FOR SHARE WITH NO DATA; +ERROR: FOR SHARE is not allowed with GROUP BY clause +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), avg(temperature::int4) +from conditions + group by time_bucket('1week', timec) , location +FOR UPDATE WITH NO DATA; +ERROR: FOR UPDATE is not allowed with GROUP BY clause +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), avg(temperature::int4) +from conditions + group by time_bucket('1week', timec) , location +FOR NO KEY UPDATE WITH NO DATA; +ERROR: FOR NO KEY UPDATE is not allowed with GROUP BY clause +--tablesample clause +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), avg(temperature::int4) +from conditions tablesample bernoulli(0.2) + group by time_bucket('1week', timec) , location + WITH NO DATA; +ERROR: invalid continuous aggregate view +-- ONLY in from clause +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), avg(temperature::int4) +from ONLY conditions + group by time_bucket('1week', timec) , location WITH NO DATA; +ERROR: invalid continuous aggregate view +--grouping sets and variants +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), avg(temperature::int4) +from conditions + group by grouping sets(time_bucket('1week', timec) , location ) WITH NO DATA; +ERROR: invalid continuous aggregate query +DETAIL: GROUP BY GROUPING SETS, ROLLUP and CUBE are not supported by continuous aggregates +HINT: Define multiple continuous aggregates with different grouping levels. +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), avg(temperature::int4) +from conditions +group by rollup(time_bucket('1week', timec) , location ) WITH NO DATA; +ERROR: invalid continuous aggregate query +DETAIL: GROUP BY GROUPING SETS, ROLLUP and CUBE are not supported by continuous aggregates +HINT: Define multiple continuous aggregates with different grouping levels. +--NO immutable functions -- check all clauses +CREATE FUNCTION test_stablefunc(int) RETURNS int LANGUAGE 'sql' + STABLE AS 'SELECT $1 + 10'; +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), max(timec + INTERVAL '1h') +from conditions +group by time_bucket('1week', timec) , location WITH NO DATA; +ERROR: only immutable functions supported in continuous aggregate view +HINT: Make sure all functions in the continuous aggregate definition have IMMUTABLE volatility. Note that functions or expressions may be IMMUTABLE for one data type, but STABLE or VOLATILE for another. +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), min(location) +from conditions +group by time_bucket('1week', timec) +having max(timec + INTERVAL '1h') > '2010-01-01 09:00:00-08' WITH NO DATA; +ERROR: only immutable functions supported in continuous aggregate view +HINT: Make sure all functions in the continuous aggregate definition have IMMUTABLE volatility. Note that functions or expressions may be IMMUTABLE for one data type, but STABLE or VOLATILE for another. +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum( test_stablefunc(humidity::int) ), min(location) +from conditions +group by time_bucket('1week', timec) WITH NO DATA; +ERROR: only immutable functions supported in continuous aggregate view +HINT: Make sure all functions in the continuous aggregate definition have IMMUTABLE volatility. Note that functions or expressions may be IMMUTABLE for one data type, but STABLE or VOLATILE for another. +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum( temperature ), min(location) +from conditions +group by time_bucket('1week', timec), test_stablefunc(humidity::int) WITH NO DATA; +ERROR: only immutable functions supported in continuous aggregate view +HINT: Make sure all functions in the continuous aggregate definition have IMMUTABLE volatility. Note that functions or expressions may be IMMUTABLE for one data type, but STABLE or VOLATILE for another. +-- Should use CREATE MATERIALIZED VIEW to create continuous aggregates +CREATE VIEW continuous_aggs_errors_tbl1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) AS +SELECT time_bucket('1 week', timec) + FROM conditions +GROUP BY time_bucket('1 week', timec); +ERROR: cannot create continuous aggregate with CREATE VIEW +HINT: Use CREATE MATERIALIZED VIEW to create a continuous aggregate. +-- row security on table +create table rowsec_tab( a bigint, b integer, c integer); +select table_name from create_hypertable( 'rowsec_tab', 'a', chunk_time_interval=>10); +NOTICE: adding not-null constraint to column "a" +DETAIL: Dimensions cannot have NULL values. + table_name +------------ + rowsec_tab +(1 row) + +CREATE OR REPLACE FUNCTION integer_now_test() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(a), 0)::bigint FROM rowsec_tab $$; +SELECT set_integer_now_func('rowsec_tab', 'integer_now_test'); + set_integer_now_func +---------------------- + +(1 row) + +alter table rowsec_tab ENABLE ROW LEVEL SECURITY; +create policy rowsec_tab_allview ON rowsec_tab FOR SELECT USING(true); +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum( b), min(c) +from rowsec_tab +group by time_bucket('1', a) WITH NO DATA; +ERROR: cannot create continuous aggregate on hypertable with row security +-- cagg on cagg not allowed +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +SELECT time_bucket('1 day', timec) AS bucket + FROM conditions +GROUP BY time_bucket('1 day', timec); +NOTICE: continuous aggregate "mat_m1" is already up-to-date +CREATE MATERIALIZED VIEW mat_m2_on_mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false) +AS +SELECT time_bucket('1 week', bucket) AS bucket + FROM mat_m1 +GROUP BY time_bucket('1 week', bucket); +ERROR: old format of continuous aggregate is not supported +HINT: Run "CALL cagg_migrate('public.mat_m1');" to migrate to the new format. +drop table conditions cascade; +NOTICE: drop cascades to 3 other objects +DETAIL: drop cascades to view mat_m1 +drop cascades to view _timescaledb_internal._partial_view_3 +drop cascades to view _timescaledb_internal._direct_view_3 +--negative tests for WITH options +CREATE TABLE conditions ( + timec TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL, + lowp double precision NULL, + highp double precision null, + allnull double precision null + ); +select table_name from create_hypertable( 'conditions', 'timec'); + table_name +------------ + conditions +(1 row) + +create materialized view mat_with_test( timec, minl, sumt , sumh) +WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +as +select time_bucket('1day', timec), min(location), sum(temperature),sum(humidity) +from conditions +group by time_bucket('1day', timec) WITH NO DATA; +SELECT h.schema_name AS "MAT_SCHEMA_NAME", + h.table_name AS "MAT_TABLE_NAME", + partial_view_name as "PART_VIEW_NAME", + partial_view_schema as "PART_VIEW_SCHEMA" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'mat_with_test' +\gset +\set ON_ERROR_STOP 0 +ALTER MATERIALIZED VIEW mat_with_test SET(timescaledb.create_group_indexes = 'false'); +ERROR: cannot alter create_group_indexes option for continuous aggregates +ALTER MATERIALIZED VIEW mat_with_test SET(timescaledb.create_group_indexes = 'true'); +ERROR: cannot alter create_group_indexes option for continuous aggregates +ALTER MATERIALIZED VIEW mat_with_test ALTER timec DROP default; +ERROR: cannot alter only SET options of a continuous aggregate +\set ON_ERROR_STOP 1 +\set VERBOSITY terse +DROP TABLE conditions CASCADE; +NOTICE: drop cascades to 3 other objects +--test WITH using a hypertable with an integer time dimension +CREATE TABLE conditions ( + timec SMALLINT NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL, + lowp double precision NULL, + highp double precision null, + allnull double precision null + ); +select table_name from create_hypertable( 'conditions', 'timec', chunk_time_interval=> 100); + table_name +------------ + conditions +(1 row) + +CREATE OR REPLACE FUNCTION integer_now_test_s() returns smallint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(timec), 0)::smallint FROM conditions $$; +SELECT set_integer_now_func('conditions', 'integer_now_test_s'); + set_integer_now_func +---------------------- + +(1 row) + +\set ON_ERROR_STOP 0 +create materialized view mat_with_test( timec, minl, sumt , sumh) +WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +as +select time_bucket(100, timec), min(location), sum(temperature),sum(humidity) +from conditions +group by time_bucket(100, timec) WITH NO DATA; +ERROR: time bucket function must reference a hypertable dimension column +create materialized view mat_with_test( timec, minl, sumt , sumh) +WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +as +select time_bucket(100, timec), min(location), sum(temperature),sum(humidity) +from conditions +group by time_bucket(100, timec) WITH NO DATA; +ERROR: time bucket function must reference a hypertable dimension column +ALTER TABLE conditions ALTER timec type int; +create materialized view mat_with_test( timec, minl, sumt , sumh) +WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +as +select time_bucket(100, timec), min(location), sum(temperature),sum(humidity) +from conditions +group by time_bucket(100, timec) WITH NO DATA; +\set ON_ERROR_STOP 1 +DROP TABLE conditions cascade; +NOTICE: drop cascades to 3 other objects +CREATE TABLE conditions ( + timec BIGINT NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL, + lowp double precision NULL, + highp double precision null, + allnull double precision null + ); +select table_name from create_hypertable( 'conditions', 'timec', chunk_time_interval=> 100); + table_name +------------ + conditions +(1 row) + +CREATE OR REPLACE FUNCTION integer_now_test_b() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(timec), 0)::bigint FROM conditions $$; +SELECT set_integer_now_func('conditions', 'integer_now_test_b'); + set_integer_now_func +---------------------- + +(1 row) + +create materialized view mat_with_test( timec, minl, sumt , sumh) +WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +as +select time_bucket(BIGINT '100', timec), min(location), sum(temperature),sum(humidity) +from conditions +group by 1 WITH NO DATA; +-- custom time partition functions are not supported with invalidations +CREATE FUNCTION text_part_func(TEXT) RETURNS BIGINT + AS $$ SELECT length($1)::BIGINT $$ + LANGUAGE SQL IMMUTABLE; +CREATE TABLE text_time(time TEXT); + SELECT create_hypertable('text_time', 'time', chunk_time_interval => 10, time_partitioning_func => 'text_part_func'); +NOTICE: adding not-null constraint to column "time" + create_hypertable +------------------------- + (10,public,text_time,t) +(1 row) + +\set VERBOSITY default +\set ON_ERROR_STOP 0 +CREATE MATERIALIZED VIEW text_view + WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) + AS SELECT time_bucket('5', text_part_func(time)), COUNT(time) + FROM text_time + GROUP BY 1 WITH NO DATA; +ERROR: custom partitioning functions not supported with continuous aggregates +\set ON_ERROR_STOP 1 +-- Check that we get an error when mixing normal materialized views +-- and continuous aggregates. +CREATE MATERIALIZED VIEW normal_mat_view AS +SELECT time_bucket('5', text_part_func(time)), COUNT(time) + FROM text_time +GROUP BY 1 WITH NO DATA; +\set VERBOSITY terse +\set ON_ERROR_STOP 0 +DROP MATERIALIZED VIEW normal_mat_view, mat_with_test; +ERROR: mixing continuous aggregates and other objects not allowed +\set ON_ERROR_STOP 1 +DROP TABLE text_time CASCADE; +NOTICE: drop cascades to materialized view normal_mat_view +CREATE TABLE measurements (time TIMESTAMPTZ NOT NULL, device INT, value FLOAT); +SELECT create_hypertable('measurements', 'time'); + create_hypertable +---------------------------- + (11,public,measurements,t) +(1 row) + +INSERT INTO measurements VALUES ('2019-03-04 13:30', 1, 1.3); +-- Add a continuous aggregate on the measurements table and a policy +-- to be able to test error cases for the add_job function. +CREATE MATERIALIZED VIEW measurements_summary WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) AS +SELECT time_bucket('1 day', time), COUNT(time) + FROM measurements +GROUP BY 1 WITH NO DATA; +SELECT ca.mat_hypertable_id AS "MAT_HYPERTABLE_ID" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'measurements_summary' +\gset +-- First test that add_job checks the config. It is currently possible +-- to add non-custom jobs using the add_job function so we need to +-- test that the function actually checks the config parameters. These +-- should all generate errors, for different reasons... +\set ON_ERROR_STOP 0 +-- ... this one because it is missing a field. +SELECT add_job( + '_timescaledb_functions.policy_refresh_continuous_aggregate'::regproc, + '1 hour'::interval, + check_config => '_timescaledb_functions.policy_refresh_continuous_aggregate_check'::regproc, + config => '{"end_offset": null, "start_offset": null}'); +ERROR: could not find "mat_hypertable_id" in config for job +-- ... this one because it has a bad value for start_offset +SELECT add_job( + '_timescaledb_functions.policy_refresh_continuous_aggregate'::regproc, + '1 hour'::interval, + check_config => '_timescaledb_functions.policy_refresh_continuous_aggregate_check'::regproc, + config => ('{"end_offset": null, "start_offset": "1 fortnight", "mat_hypertable_id": '||:'MAT_HYPERTABLE_ID'||'}')::jsonb); +ERROR: invalid input syntax for type interval: "1 fortnight" +-- ... this one because it has a bad value for end_offset +SELECT add_job( + '_timescaledb_functions.policy_refresh_continuous_aggregate'::regproc, + '1 hour'::interval, + check_config => '_timescaledb_functions.policy_refresh_continuous_aggregate_check'::regproc, + config => ('{"end_offset": "chicken", "start_offset": null, "mat_hypertable_id": '||:'MAT_HYPERTABLE_ID'||'}')::jsonb); +ERROR: invalid input syntax for type interval: "chicken" +\set ON_ERROR_STOP 1 +SELECT add_continuous_aggregate_policy('measurements_summary', NULL, NULL, '1 h'::interval) AS job_id +\gset +\x on +SELECT * FROM _timescaledb_config.bgw_job WHERE id = :job_id; +-[ RECORD 1 ]-----+-------------------------------------------------------------------- +id | 1000 +application_name | Refresh Continuous Aggregate Policy [1000] +schedule_interval | @ 1 hour +max_runtime | @ 0 +max_retries | -1 +retry_period | @ 1 hour +proc_schema | _timescaledb_functions +proc_name | policy_refresh_continuous_aggregate +owner | default_perm_user +scheduled | t +fixed_schedule | f +initial_start | +hypertable_id | 12 +config | {"end_offset": null, "start_offset": null, "mat_hypertable_id": 12} +check_schema | _timescaledb_functions +check_name | policy_refresh_continuous_aggregate_check +timezone | + +\x off +-- These are all weird values for the parameters for the continuous +-- aggregate jobs and should generate an error. Since the config will +-- be replaced, we will also generate error for missing arguments. +\set ON_ERROR_STOP 0 +SELECT alter_job(:job_id, config => '{"end_offset": "1 week", "start_offset": "2 fortnights"}'); +ERROR: could not find "mat_hypertable_id" in config for job +SELECT alter_job(:job_id, + config => ('{"mat_hypertable_id": '||:'MAT_HYPERTABLE_ID'||', "end_offset": "chicken", "start_offset": "1 fortnights"}')::jsonb); +ERROR: invalid input syntax for type interval: "1 fortnights" +SELECT alter_job(:job_id, + config => ('{"mat_hypertable_id": '||:'MAT_HYPERTABLE_ID'||', "end_offset": "chicken", "start_offset": "1 week"}')::jsonb); +ERROR: invalid input syntax for type interval: "chicken" +\set ON_ERROR_STOP 1 +DROP TABLE measurements CASCADE; +NOTICE: drop cascades to 3 other objects +DROP TABLE conditions CASCADE; +NOTICE: drop cascades to 3 other objects +-- test handling of invalid mat_hypertable_id +create table i2980(time timestamptz not null); +select create_hypertable('i2980','time'); + create_hypertable +--------------------- + (13,public,i2980,t) +(1 row) + +create materialized view i2980_cagg with (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) AS SELECT time_bucket('1h',time), avg(7) FROM i2980 GROUP BY 1; +NOTICE: continuous aggregate "i2980_cagg" is already up-to-date +select add_continuous_aggregate_policy('i2980_cagg',NULL,NULL,'4h') AS job_id \gset +\set ON_ERROR_STOP 0 +select alter_job(:job_id,config:='{"end_offset": null, "start_offset": null, "mat_hypertable_id": 1000}'); +ERROR: configuration materialization hypertable id 1000 not found +--test creating continuous aggregate with compression enabled -- +CREATE MATERIALIZED VIEW i2980_cagg2 with (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.compress, timescaledb.finalized = false) +AS SELECT time_bucket('1h',time), avg(7) FROM i2980 GROUP BY 1; +ERROR: cannot enable compression while creating a continuous aggregate +--this one succeeds +CREATE MATERIALIZED VIEW i2980_cagg2 with (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS SELECT time_bucket('1h',time) as bucket, avg(7) FROM i2980 GROUP BY 1; +NOTICE: continuous aggregate "i2980_cagg2" is already up-to-date +--now enable compression with invalid parameters +ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress, +timescaledb.compress_segmentby = 'bucket'); +NOTICE: defaulting compress_orderby to bucket +ERROR: cannot use column "bucket" for both ordering and segmenting +ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress, +timescaledb.compress_orderby = 'bucket'); +--enable compression and test re-enabling compression +ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress); +NOTICE: defaulting compress_orderby to bucket +insert into i2980 select now(); +call refresh_continuous_aggregate('i2980_cagg2', NULL, NULL); +SELECT compress_chunk(ch) FROM show_chunks('i2980_cagg2') ch; + compress_chunk +----------------------------------------- + _timescaledb_internal._hyper_15_3_chunk +(1 row) + +ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress = 'false'); +ERROR: cannot change configuration on already compressed chunks +ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress = 'true'); +NOTICE: defaulting compress_orderby to bucket +ERROR: cannot change configuration on already compressed chunks +ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress, timescaledb.compress_segmentby = 'bucket'); +NOTICE: defaulting compress_orderby to bucket +ERROR: cannot change configuration on already compressed chunks +--Errors with compression policy on caggs-- +select add_continuous_aggregate_policy('i2980_cagg2', interval '10 day', interval '2 day' ,'4h') AS job_id ; + job_id +-------- + 1002 +(1 row) + +SELECT add_compression_policy('i2980_cagg', '8 day'::interval); +ERROR: compression not enabled on continuous aggregate "i2980_cagg" +ALTER MATERIALIZED VIEW i2980_cagg SET ( timescaledb.compress ); +NOTICE: defaulting compress_orderby to time_bucket +SELECT add_compression_policy('i2980_cagg', '8 day'::interval); +ERROR: compress_after value for compression policy should be greater than the start of the refresh window of continuous aggregate policy for i2980_cagg +SELECT add_continuous_aggregate_policy('i2980_cagg2', '10 day'::interval, '6 day'::interval); +ERROR: function add_continuous_aggregate_policy(unknown, interval, interval) does not exist at character 8 +SELECT add_compression_policy('i2980_cagg2', '3 day'::interval); +ERROR: compress_after value for compression policy should be greater than the start of the refresh window of continuous aggregate policy for i2980_cagg2 +SELECT add_compression_policy('i2980_cagg2', '1 day'::interval); +ERROR: compress_after value for compression policy should be greater than the start of the refresh window of continuous aggregate policy for i2980_cagg2 +SELECT add_compression_policy('i2980_cagg2', '3'::integer); +ERROR: unsupported compress_after argument type, expected type : interval +SELECT add_compression_policy('i2980_cagg2', 13::integer); +ERROR: unsupported compress_after argument type, expected type : interval +SELECT materialization_hypertable_schema || '.' || materialization_hypertable_name AS "MAT_TABLE_NAME" +FROM timescaledb_information.continuous_aggregates +WHERE view_name = 'i2980_cagg2' +\gset +SELECT add_compression_policy( :'MAT_TABLE_NAME', 13::integer); +ERROR: cannot add compression policy to materialized hypertable "_materialized_hypertable_15" +--TEST compressing cagg chunks without enabling compression +SELECT count(*) FROM (select decompress_chunk(ch) FROM show_chunks('i2980_cagg2') ch ) q; + count +------- + 1 +(1 row) + +ALTER MATERIALIZED VIEW i2980_cagg2 SET (timescaledb.compress = 'false'); +SELECT compress_chunk(ch) FROM show_chunks('i2980_cagg2') ch; +ERROR: compression not enabled on "i2980_cagg2" +-- test error handling when trying to create cagg on internal hypertable +CREATE TABLE comp_ht_test(time timestamptz NOT NULL); +SELECT table_name FROM create_hypertable('comp_ht_test','time'); + table_name +-------------- + comp_ht_test +(1 row) + +ALTER TABLE comp_ht_test SET (timescaledb.compress); +SELECT + format('%I.%I', ht.schema_name, ht.table_name) AS "INTERNALTABLE" +FROM + _timescaledb_catalog.hypertable ht + INNER JOIN _timescaledb_catalog.hypertable uncompress ON (ht.id = uncompress.compressed_hypertable_id + AND uncompress.table_name = 'comp_ht_test') \gset +CREATE MATERIALIZED VIEW cagg1 WITH(timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) AS SELECT time_bucket('1h',_ts_meta_min_1) FROM :INTERNALTABLE GROUP BY 1; +ERROR: hypertable is an internal compressed hypertable diff --git a/tsl/test/expected/cagg_errors_deprecated-16.out b/tsl/test/expected/cagg_errors_deprecated-16.out new file mode 100644 index 00000000000..d48635538a4 --- /dev/null +++ b/tsl/test/expected/cagg_errors_deprecated-16.out @@ -0,0 +1,712 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\set ON_ERROR_STOP 0 +\set VERBOSITY default +--negative tests for query validation +create table mat_t1( a integer, b integer,c TEXT); +CREATE TABLE conditions ( + timec TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + temperature integer NULL, + humidity DOUBLE PRECISION NULL, + timemeasure TIMESTAMPTZ, + timeinterval INTERVAL + ); +select table_name from create_hypertable( 'conditions', 'timec'); + table_name +------------ + conditions +(1 row) + +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false, timescaledb.myfill = 1) +as +select location , min(temperature) +from conditions +group by time_bucket('1d', timec), location WITH NO DATA; +ERROR: unrecognized parameter "timescaledb.myfill" +--valid PG option +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false, check_option = LOCAL ) +as +select * from conditions , mat_t1 WITH NO DATA; +ERROR: unsupported combination of storage parameters +DETAIL: A continuous aggregate does not support standard storage parameters. +HINT: Use only parameters with the "timescaledb." prefix when creating a continuous aggregate. +--non-hypertable +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +as +select a, count(*) from mat_t1 +group by a WITH NO DATA; +ERROR: table "mat_t1" is not a hypertable +-- no group by +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +as +select count(*) from conditions WITH NO DATA; +ERROR: invalid continuous aggregate query +HINT: Include at least one aggregate function and a GROUP BY clause with time bucket. +-- no time_bucket in group by +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +as +select count(*) from conditions group by location WITH NO DATA; +ERROR: continuous aggregate view must include a valid time bucket function +-- with valid query in a CTE +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +with m1 as ( +Select location, count(*) from conditions + group by time_bucket('1week', timec) , location) +select * from m1 WITH NO DATA; +ERROR: invalid continuous aggregate query +DETAIL: CTEs, subqueries and set-returning functions are not supported by continuous aggregates. +--with DISTINCT ON +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +as + select distinct on ( location ) count(*) from conditions group by location, time_bucket('1week', timec) WITH NO DATA; +ERROR: invalid continuous aggregate query +DETAIL: DISTINCT / DISTINCT ON queries are not supported by continuous aggregates. +--aggregate with DISTINCT +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select time_bucket('1week', timec), + count(location) , sum(distinct temperature) from conditions + group by time_bucket('1week', timec) , location WITH NO DATA; +ERROR: aggregates with FILTER / DISTINCT / ORDER BY are not supported +--aggregate with FILTER +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select time_bucket('1week', timec), + sum(temperature) filter ( where humidity > 20 ) from conditions + group by time_bucket('1week', timec) , location WITH NO DATA; +ERROR: aggregates with FILTER / DISTINCT / ORDER BY are not supported +-- aggregate with filter in having clause +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select time_bucket('1week', timec), max(temperature) +from conditions + group by time_bucket('1week', timec) , location + having sum(temperature) filter ( where humidity > 20 ) > 50 WITH NO DATA; +ERROR: aggregates with FILTER / DISTINCT / ORDER BY are not supported +-- time_bucket on non partitioning column of hypertable +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select max(temperature) +from conditions + group by time_bucket('1week', timemeasure) , location WITH NO DATA; +ERROR: time bucket function must reference a hypertable dimension column +--time_bucket on expression +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select max(temperature) +from conditions + group by time_bucket('1week', timec+ '10 minutes'::interval) , location WITH NO DATA; +ERROR: time bucket function must reference a hypertable dimension column +--multiple time_bucket functions +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select max(temperature) +from conditions + group by time_bucket('1week', timec) , time_bucket('1month', timec), location WITH NO DATA; +ERROR: continuous aggregate view cannot contain multiple time bucket functions +--time_bucket using additional args +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select max(temperature) +from conditions + group by time_bucket( INTERVAL '5 minutes', timec, INTERVAL '-2.5 minutes') , location WITH NO DATA; +ERROR: continuous aggregate view must include a valid time bucket function +--time_bucket using non-const for first argument +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select max(temperature) +from conditions + group by time_bucket( timeinterval, timec) , location WITH NO DATA; +ERROR: only immutable expressions allowed in time bucket function +HINT: Use an immutable expression as first argument to the time bucket function. +-- ordered set aggr +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select mode() within group( order by humidity) +from conditions + group by time_bucket('1week', timec) WITH NO DATA; +ERROR: aggregates with FILTER / DISTINCT / ORDER BY are not supported +--window function +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select avg(temperature) over( order by humidity) +from conditions + WITH NO DATA; +ERROR: invalid continuous aggregate query +DETAIL: Window functions are not supported by continuous aggregates. +--aggregate without combine function +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select json_agg(location) +from conditions + group by time_bucket('1week', timec) , location WITH NO DATA; +ERROR: aggregates which are not parallelizable are not supported +; +-- Starting on PG16 this test will pass because array_agg is parallel safe +-- https://github.com/postgres/postgres/commit/16fd03e956540d1b47b743f6a84f37c54ac93dd4 +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), avg(temperature), array_agg(location) +from conditions + group by time_bucket('1week', timec) , location WITH NO DATA; +; +-- userdefined aggregate without combine function +CREATE AGGREGATE newavg ( + sfunc = int4_avg_accum, basetype = int4, stype = _int8, + finalfunc = int8_avg, + initcond1 = '{0,0}' +); +DROP MATERIALIZED VIEW IF EXISTS mat_m1; +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), newavg(temperature::int4) +from conditions + group by time_bucket('1week', timec) , location WITH NO DATA; +ERROR: aggregates which are not parallelizable are not supported +; +-- using subqueries +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), avg(temperature::int4) +from +( select humidity, temperature, location, timec +from conditions ) q + group by time_bucket('1week', timec) , location WITH NO DATA; +ERROR: invalid continuous aggregate view +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +select * from +( Select sum(humidity), avg(temperature::int4) +from conditions + group by time_bucket('1week', timec) , location ) q WITH NO DATA; +ERROR: invalid continuous aggregate query +HINT: Include at least one aggregate function and a GROUP BY clause with time bucket. +--using limit /limit offset +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), avg(temperature::int4) +from conditions + group by time_bucket('1week', timec) , location +limit 10 WITH NO DATA; +ERROR: invalid continuous aggregate query +DETAIL: LIMIT and LIMIT OFFSET are not supported in queries defining continuous aggregates. +HINT: Use LIMIT and LIMIT OFFSET in SELECTS from the continuous aggregate view instead. +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), avg(temperature::int4) +from conditions + group by time_bucket('1week', timec) , location +offset 10 WITH NO DATA; +ERROR: invalid continuous aggregate query +DETAIL: LIMIT and LIMIT OFFSET are not supported in queries defining continuous aggregates. +HINT: Use LIMIT and LIMIT OFFSET in SELECTS from the continuous aggregate view instead. +--using ORDER BY in view defintion +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), avg(temperature::int4) +from conditions + group by time_bucket('1week', timec) , location +ORDER BY 1 WITH NO DATA; +ERROR: invalid continuous aggregate query +DETAIL: ORDER BY is not supported in queries defining continuous aggregates. +HINT: Use ORDER BY clauses in SELECTS from the continuous aggregate view instead. +--using FETCH +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), avg(temperature::int4) +from conditions + group by time_bucket('1week', timec) , location +fetch first 10 rows only WITH NO DATA; +ERROR: invalid continuous aggregate query +DETAIL: LIMIT and LIMIT OFFSET are not supported in queries defining continuous aggregates. +HINT: Use LIMIT and LIMIT OFFSET in SELECTS from the continuous aggregate view instead. +--using locking clauses FOR clause +--all should be disabled. we cannot guarntee locks on the hypertable +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), avg(temperature::int4) +from conditions + group by time_bucket('1week', timec) , location +FOR KEY SHARE WITH NO DATA; +ERROR: FOR KEY SHARE is not allowed with GROUP BY clause +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), avg(temperature::int4) +from conditions + group by time_bucket('1week', timec) , location +FOR SHARE WITH NO DATA; +ERROR: FOR SHARE is not allowed with GROUP BY clause +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), avg(temperature::int4) +from conditions + group by time_bucket('1week', timec) , location +FOR UPDATE WITH NO DATA; +ERROR: FOR UPDATE is not allowed with GROUP BY clause +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), avg(temperature::int4) +from conditions + group by time_bucket('1week', timec) , location +FOR NO KEY UPDATE WITH NO DATA; +ERROR: FOR NO KEY UPDATE is not allowed with GROUP BY clause +--tablesample clause +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), avg(temperature::int4) +from conditions tablesample bernoulli(0.2) + group by time_bucket('1week', timec) , location + WITH NO DATA; +ERROR: invalid continuous aggregate view +-- ONLY in from clause +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), avg(temperature::int4) +from ONLY conditions + group by time_bucket('1week', timec) , location WITH NO DATA; +ERROR: invalid continuous aggregate view +--grouping sets and variants +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), avg(temperature::int4) +from conditions + group by grouping sets(time_bucket('1week', timec) , location ) WITH NO DATA; +ERROR: invalid continuous aggregate query +DETAIL: GROUP BY GROUPING SETS, ROLLUP and CUBE are not supported by continuous aggregates +HINT: Define multiple continuous aggregates with different grouping levels. +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), avg(temperature::int4) +from conditions +group by rollup(time_bucket('1week', timec) , location ) WITH NO DATA; +ERROR: invalid continuous aggregate query +DETAIL: GROUP BY GROUPING SETS, ROLLUP and CUBE are not supported by continuous aggregates +HINT: Define multiple continuous aggregates with different grouping levels. +--NO immutable functions -- check all clauses +CREATE FUNCTION test_stablefunc(int) RETURNS int LANGUAGE 'sql' + STABLE AS 'SELECT $1 + 10'; +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), max(timec + INTERVAL '1h') +from conditions +group by time_bucket('1week', timec) , location WITH NO DATA; +ERROR: only immutable functions supported in continuous aggregate view +HINT: Make sure all functions in the continuous aggregate definition have IMMUTABLE volatility. Note that functions or expressions may be IMMUTABLE for one data type, but STABLE or VOLATILE for another. +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum(humidity), min(location) +from conditions +group by time_bucket('1week', timec) +having max(timec + INTERVAL '1h') > '2010-01-01 09:00:00-08' WITH NO DATA; +ERROR: only immutable functions supported in continuous aggregate view +HINT: Make sure all functions in the continuous aggregate definition have IMMUTABLE volatility. Note that functions or expressions may be IMMUTABLE for one data type, but STABLE or VOLATILE for another. +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum( test_stablefunc(humidity::int) ), min(location) +from conditions +group by time_bucket('1week', timec) WITH NO DATA; +ERROR: only immutable functions supported in continuous aggregate view +HINT: Make sure all functions in the continuous aggregate definition have IMMUTABLE volatility. Note that functions or expressions may be IMMUTABLE for one data type, but STABLE or VOLATILE for another. +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum( temperature ), min(location) +from conditions +group by time_bucket('1week', timec), test_stablefunc(humidity::int) WITH NO DATA; +ERROR: only immutable functions supported in continuous aggregate view +HINT: Make sure all functions in the continuous aggregate definition have IMMUTABLE volatility. Note that functions or expressions may be IMMUTABLE for one data type, but STABLE or VOLATILE for another. +-- Should use CREATE MATERIALIZED VIEW to create continuous aggregates +CREATE VIEW continuous_aggs_errors_tbl1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) AS +SELECT time_bucket('1 week', timec) + FROM conditions +GROUP BY time_bucket('1 week', timec); +ERROR: cannot create continuous aggregate with CREATE VIEW +HINT: Use CREATE MATERIALIZED VIEW to create a continuous aggregate. +-- row security on table +create table rowsec_tab( a bigint, b integer, c integer); +select table_name from create_hypertable( 'rowsec_tab', 'a', chunk_time_interval=>10); +NOTICE: adding not-null constraint to column "a" +DETAIL: Dimensions cannot have NULL values. + table_name +------------ + rowsec_tab +(1 row) + +CREATE OR REPLACE FUNCTION integer_now_test() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(a), 0)::bigint FROM rowsec_tab $$; +SELECT set_integer_now_func('rowsec_tab', 'integer_now_test'); + set_integer_now_func +---------------------- + +(1 row) + +alter table rowsec_tab ENABLE ROW LEVEL SECURITY; +create policy rowsec_tab_allview ON rowsec_tab FOR SELECT USING(true); +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +Select sum( b), min(c) +from rowsec_tab +group by time_bucket('1', a) WITH NO DATA; +ERROR: cannot create continuous aggregate on hypertable with row security +-- cagg on cagg not allowed +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS +SELECT time_bucket('1 day', timec) AS bucket + FROM conditions +GROUP BY time_bucket('1 day', timec); +NOTICE: continuous aggregate "mat_m1" is already up-to-date +CREATE MATERIALIZED VIEW mat_m2_on_mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false) +AS +SELECT time_bucket('1 week', bucket) AS bucket + FROM mat_m1 +GROUP BY time_bucket('1 week', bucket); +ERROR: old format of continuous aggregate is not supported +HINT: Run "CALL cagg_migrate('public.mat_m1');" to migrate to the new format. +drop table conditions cascade; +NOTICE: drop cascades to 3 other objects +DETAIL: drop cascades to view mat_m1 +drop cascades to view _timescaledb_internal._partial_view_4 +drop cascades to view _timescaledb_internal._direct_view_4 +--negative tests for WITH options +CREATE TABLE conditions ( + timec TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL, + lowp double precision NULL, + highp double precision null, + allnull double precision null + ); +select table_name from create_hypertable( 'conditions', 'timec'); + table_name +------------ + conditions +(1 row) + +create materialized view mat_with_test( timec, minl, sumt , sumh) +WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +as +select time_bucket('1day', timec), min(location), sum(temperature),sum(humidity) +from conditions +group by time_bucket('1day', timec) WITH NO DATA; +SELECT h.schema_name AS "MAT_SCHEMA_NAME", + h.table_name AS "MAT_TABLE_NAME", + partial_view_name as "PART_VIEW_NAME", + partial_view_schema as "PART_VIEW_SCHEMA" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'mat_with_test' +\gset +\set ON_ERROR_STOP 0 +ALTER MATERIALIZED VIEW mat_with_test SET(timescaledb.create_group_indexes = 'false'); +ERROR: cannot alter create_group_indexes option for continuous aggregates +ALTER MATERIALIZED VIEW mat_with_test SET(timescaledb.create_group_indexes = 'true'); +ERROR: cannot alter create_group_indexes option for continuous aggregates +ALTER MATERIALIZED VIEW mat_with_test ALTER timec DROP default; +ERROR: cannot alter only SET options of a continuous aggregate +\set ON_ERROR_STOP 1 +\set VERBOSITY terse +DROP TABLE conditions CASCADE; +NOTICE: drop cascades to 3 other objects +--test WITH using a hypertable with an integer time dimension +CREATE TABLE conditions ( + timec SMALLINT NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL, + lowp double precision NULL, + highp double precision null, + allnull double precision null + ); +select table_name from create_hypertable( 'conditions', 'timec', chunk_time_interval=> 100); + table_name +------------ + conditions +(1 row) + +CREATE OR REPLACE FUNCTION integer_now_test_s() returns smallint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(timec), 0)::smallint FROM conditions $$; +SELECT set_integer_now_func('conditions', 'integer_now_test_s'); + set_integer_now_func +---------------------- + +(1 row) + +\set ON_ERROR_STOP 0 +create materialized view mat_with_test( timec, minl, sumt , sumh) +WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +as +select time_bucket(100, timec), min(location), sum(temperature),sum(humidity) +from conditions +group by time_bucket(100, timec) WITH NO DATA; +ERROR: time bucket function must reference a hypertable dimension column +create materialized view mat_with_test( timec, minl, sumt , sumh) +WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +as +select time_bucket(100, timec), min(location), sum(temperature),sum(humidity) +from conditions +group by time_bucket(100, timec) WITH NO DATA; +ERROR: time bucket function must reference a hypertable dimension column +ALTER TABLE conditions ALTER timec type int; +create materialized view mat_with_test( timec, minl, sumt , sumh) +WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +as +select time_bucket(100, timec), min(location), sum(temperature),sum(humidity) +from conditions +group by time_bucket(100, timec) WITH NO DATA; +\set ON_ERROR_STOP 1 +DROP TABLE conditions cascade; +NOTICE: drop cascades to 3 other objects +CREATE TABLE conditions ( + timec BIGINT NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL, + lowp double precision NULL, + highp double precision null, + allnull double precision null + ); +select table_name from create_hypertable( 'conditions', 'timec', chunk_time_interval=> 100); + table_name +------------ + conditions +(1 row) + +CREATE OR REPLACE FUNCTION integer_now_test_b() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(timec), 0)::bigint FROM conditions $$; +SELECT set_integer_now_func('conditions', 'integer_now_test_b'); + set_integer_now_func +---------------------- + +(1 row) + +create materialized view mat_with_test( timec, minl, sumt , sumh) +WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +as +select time_bucket(BIGINT '100', timec), min(location), sum(temperature),sum(humidity) +from conditions +group by 1 WITH NO DATA; +-- custom time partition functions are not supported with invalidations +CREATE FUNCTION text_part_func(TEXT) RETURNS BIGINT + AS $$ SELECT length($1)::BIGINT $$ + LANGUAGE SQL IMMUTABLE; +CREATE TABLE text_time(time TEXT); + SELECT create_hypertable('text_time', 'time', chunk_time_interval => 10, time_partitioning_func => 'text_part_func'); +NOTICE: adding not-null constraint to column "time" + create_hypertable +------------------------- + (11,public,text_time,t) +(1 row) + +\set VERBOSITY default +\set ON_ERROR_STOP 0 +CREATE MATERIALIZED VIEW text_view + WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) + AS SELECT time_bucket('5', text_part_func(time)), COUNT(time) + FROM text_time + GROUP BY 1 WITH NO DATA; +ERROR: custom partitioning functions not supported with continuous aggregates +\set ON_ERROR_STOP 1 +-- Check that we get an error when mixing normal materialized views +-- and continuous aggregates. +CREATE MATERIALIZED VIEW normal_mat_view AS +SELECT time_bucket('5', text_part_func(time)), COUNT(time) + FROM text_time +GROUP BY 1 WITH NO DATA; +\set VERBOSITY terse +\set ON_ERROR_STOP 0 +DROP MATERIALIZED VIEW normal_mat_view, mat_with_test; +ERROR: mixing continuous aggregates and other objects not allowed +\set ON_ERROR_STOP 1 +DROP TABLE text_time CASCADE; +NOTICE: drop cascades to materialized view normal_mat_view +CREATE TABLE measurements (time TIMESTAMPTZ NOT NULL, device INT, value FLOAT); +SELECT create_hypertable('measurements', 'time'); + create_hypertable +---------------------------- + (12,public,measurements,t) +(1 row) + +INSERT INTO measurements VALUES ('2019-03-04 13:30', 1, 1.3); +-- Add a continuous aggregate on the measurements table and a policy +-- to be able to test error cases for the add_job function. +CREATE MATERIALIZED VIEW measurements_summary WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) AS +SELECT time_bucket('1 day', time), COUNT(time) + FROM measurements +GROUP BY 1 WITH NO DATA; +SELECT ca.mat_hypertable_id AS "MAT_HYPERTABLE_ID" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'measurements_summary' +\gset +-- First test that add_job checks the config. It is currently possible +-- to add non-custom jobs using the add_job function so we need to +-- test that the function actually checks the config parameters. These +-- should all generate errors, for different reasons... +\set ON_ERROR_STOP 0 +-- ... this one because it is missing a field. +SELECT add_job( + '_timescaledb_functions.policy_refresh_continuous_aggregate'::regproc, + '1 hour'::interval, + check_config => '_timescaledb_functions.policy_refresh_continuous_aggregate_check'::regproc, + config => '{"end_offset": null, "start_offset": null}'); +ERROR: could not find "mat_hypertable_id" in config for job +-- ... this one because it has a bad value for start_offset +SELECT add_job( + '_timescaledb_functions.policy_refresh_continuous_aggregate'::regproc, + '1 hour'::interval, + check_config => '_timescaledb_functions.policy_refresh_continuous_aggregate_check'::regproc, + config => ('{"end_offset": null, "start_offset": "1 fortnight", "mat_hypertable_id": '||:'MAT_HYPERTABLE_ID'||'}')::jsonb); +ERROR: invalid input syntax for type interval: "1 fortnight" +-- ... this one because it has a bad value for end_offset +SELECT add_job( + '_timescaledb_functions.policy_refresh_continuous_aggregate'::regproc, + '1 hour'::interval, + check_config => '_timescaledb_functions.policy_refresh_continuous_aggregate_check'::regproc, + config => ('{"end_offset": "chicken", "start_offset": null, "mat_hypertable_id": '||:'MAT_HYPERTABLE_ID'||'}')::jsonb); +ERROR: invalid input syntax for type interval: "chicken" +\set ON_ERROR_STOP 1 +SELECT add_continuous_aggregate_policy('measurements_summary', NULL, NULL, '1 h'::interval) AS job_id +\gset +\x on +SELECT * FROM _timescaledb_config.bgw_job WHERE id = :job_id; +-[ RECORD 1 ]-----+-------------------------------------------------------------------- +id | 1000 +application_name | Refresh Continuous Aggregate Policy [1000] +schedule_interval | @ 1 hour +max_runtime | @ 0 +max_retries | -1 +retry_period | @ 1 hour +proc_schema | _timescaledb_functions +proc_name | policy_refresh_continuous_aggregate +owner | default_perm_user +scheduled | t +fixed_schedule | f +initial_start | +hypertable_id | 13 +config | {"end_offset": null, "start_offset": null, "mat_hypertable_id": 13} +check_schema | _timescaledb_functions +check_name | policy_refresh_continuous_aggregate_check +timezone | + +\x off +-- These are all weird values for the parameters for the continuous +-- aggregate jobs and should generate an error. Since the config will +-- be replaced, we will also generate error for missing arguments. +\set ON_ERROR_STOP 0 +SELECT alter_job(:job_id, config => '{"end_offset": "1 week", "start_offset": "2 fortnights"}'); +ERROR: could not find "mat_hypertable_id" in config for job +SELECT alter_job(:job_id, + config => ('{"mat_hypertable_id": '||:'MAT_HYPERTABLE_ID'||', "end_offset": "chicken", "start_offset": "1 fortnights"}')::jsonb); +ERROR: invalid input syntax for type interval: "1 fortnights" +SELECT alter_job(:job_id, + config => ('{"mat_hypertable_id": '||:'MAT_HYPERTABLE_ID'||', "end_offset": "chicken", "start_offset": "1 week"}')::jsonb); +ERROR: invalid input syntax for type interval: "chicken" +\set ON_ERROR_STOP 1 +DROP TABLE measurements CASCADE; +NOTICE: drop cascades to 3 other objects +DROP TABLE conditions CASCADE; +NOTICE: drop cascades to 3 other objects +-- test handling of invalid mat_hypertable_id +create table i2980(time timestamptz not null); +select create_hypertable('i2980','time'); + create_hypertable +--------------------- + (14,public,i2980,t) +(1 row) + +create materialized view i2980_cagg with (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) AS SELECT time_bucket('1h',time), avg(7) FROM i2980 GROUP BY 1; +NOTICE: continuous aggregate "i2980_cagg" is already up-to-date +select add_continuous_aggregate_policy('i2980_cagg',NULL,NULL,'4h') AS job_id \gset +\set ON_ERROR_STOP 0 +select alter_job(:job_id,config:='{"end_offset": null, "start_offset": null, "mat_hypertable_id": 1000}'); +ERROR: configuration materialization hypertable id 1000 not found +--test creating continuous aggregate with compression enabled -- +CREATE MATERIALIZED VIEW i2980_cagg2 with (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.compress, timescaledb.finalized = false) +AS SELECT time_bucket('1h',time), avg(7) FROM i2980 GROUP BY 1; +ERROR: cannot enable compression while creating a continuous aggregate +--this one succeeds +CREATE MATERIALIZED VIEW i2980_cagg2 with (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) +AS SELECT time_bucket('1h',time) as bucket, avg(7) FROM i2980 GROUP BY 1; +NOTICE: continuous aggregate "i2980_cagg2" is already up-to-date +--now enable compression with invalid parameters +ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress, +timescaledb.compress_segmentby = 'bucket'); +NOTICE: defaulting compress_orderby to bucket +ERROR: cannot use column "bucket" for both ordering and segmenting +ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress, +timescaledb.compress_orderby = 'bucket'); +--enable compression and test re-enabling compression +ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress); +NOTICE: defaulting compress_orderby to bucket +insert into i2980 select now(); +call refresh_continuous_aggregate('i2980_cagg2', NULL, NULL); +SELECT compress_chunk(ch) FROM show_chunks('i2980_cagg2') ch; + compress_chunk +----------------------------------------- + _timescaledb_internal._hyper_16_3_chunk +(1 row) + +ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress = 'false'); +ERROR: cannot change configuration on already compressed chunks +ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress = 'true'); +NOTICE: defaulting compress_orderby to bucket +ERROR: cannot change configuration on already compressed chunks +ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress, timescaledb.compress_segmentby = 'bucket'); +NOTICE: defaulting compress_orderby to bucket +ERROR: cannot change configuration on already compressed chunks +--Errors with compression policy on caggs-- +select add_continuous_aggregate_policy('i2980_cagg2', interval '10 day', interval '2 day' ,'4h') AS job_id ; + job_id +-------- + 1002 +(1 row) + +SELECT add_compression_policy('i2980_cagg', '8 day'::interval); +ERROR: compression not enabled on continuous aggregate "i2980_cagg" +ALTER MATERIALIZED VIEW i2980_cagg SET ( timescaledb.compress ); +NOTICE: defaulting compress_orderby to time_bucket +SELECT add_compression_policy('i2980_cagg', '8 day'::interval); +ERROR: compress_after value for compression policy should be greater than the start of the refresh window of continuous aggregate policy for i2980_cagg +SELECT add_continuous_aggregate_policy('i2980_cagg2', '10 day'::interval, '6 day'::interval); +ERROR: function add_continuous_aggregate_policy(unknown, interval, interval) does not exist at character 8 +SELECT add_compression_policy('i2980_cagg2', '3 day'::interval); +ERROR: compress_after value for compression policy should be greater than the start of the refresh window of continuous aggregate policy for i2980_cagg2 +SELECT add_compression_policy('i2980_cagg2', '1 day'::interval); +ERROR: compress_after value for compression policy should be greater than the start of the refresh window of continuous aggregate policy for i2980_cagg2 +SELECT add_compression_policy('i2980_cagg2', '3'::integer); +ERROR: unsupported compress_after argument type, expected type : interval +SELECT add_compression_policy('i2980_cagg2', 13::integer); +ERROR: unsupported compress_after argument type, expected type : interval +SELECT materialization_hypertable_schema || '.' || materialization_hypertable_name AS "MAT_TABLE_NAME" +FROM timescaledb_information.continuous_aggregates +WHERE view_name = 'i2980_cagg2' +\gset +SELECT add_compression_policy( :'MAT_TABLE_NAME', 13::integer); +ERROR: cannot add compression policy to materialized hypertable "_materialized_hypertable_16" +--TEST compressing cagg chunks without enabling compression +SELECT count(*) FROM (select decompress_chunk(ch) FROM show_chunks('i2980_cagg2') ch ) q; + count +------- + 1 +(1 row) + +ALTER MATERIALIZED VIEW i2980_cagg2 SET (timescaledb.compress = 'false'); +SELECT compress_chunk(ch) FROM show_chunks('i2980_cagg2') ch; +ERROR: compression not enabled on "i2980_cagg2" +-- test error handling when trying to create cagg on internal hypertable +CREATE TABLE comp_ht_test(time timestamptz NOT NULL); +SELECT table_name FROM create_hypertable('comp_ht_test','time'); + table_name +-------------- + comp_ht_test +(1 row) + +ALTER TABLE comp_ht_test SET (timescaledb.compress); +SELECT + format('%I.%I', ht.schema_name, ht.table_name) AS "INTERNALTABLE" +FROM + _timescaledb_catalog.hypertable ht + INNER JOIN _timescaledb_catalog.hypertable uncompress ON (ht.id = uncompress.compressed_hypertable_id + AND uncompress.table_name = 'comp_ht_test') \gset +CREATE MATERIALIZED VIEW cagg1 WITH(timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) AS SELECT time_bucket('1h',_ts_meta_min_1) FROM :INTERNALTABLE GROUP BY 1; +ERROR: hypertable is an internal compressed hypertable diff --git a/tsl/test/expected/cagg_invalidation_dist_ht-16.out b/tsl/test/expected/cagg_invalidation_dist_ht-16.out new file mode 100644 index 00000000000..10cfcc6976f --- /dev/null +++ b/tsl/test/expected/cagg_invalidation_dist_ht-16.out @@ -0,0 +1,1397 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +------------------------------------ +-- Set up a distributed environment +------------------------------------ +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER +\set DATA_NODE_1 :TEST_DBNAME _1 +\set DATA_NODE_2 :TEST_DBNAME _2 +\set DATA_NODE_3 :TEST_DBNAME _3 +\ir include/remote_exec.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +CREATE SCHEMA IF NOT EXISTS test; +psql:include/remote_exec.sql:5: NOTICE: schema "test" already exists, skipping +GRANT USAGE ON SCHEMA test TO PUBLIC; +CREATE OR REPLACE FUNCTION test.remote_exec(srv_name name[], command text) +RETURNS VOID +AS :TSL_MODULE_PATHNAME, 'ts_remote_exec' +LANGUAGE C; +CREATE OR REPLACE FUNCTION test.remote_exec_get_result_strings(srv_name name[], command text) +RETURNS TABLE("table_record" CSTRING[]) +AS :TSL_MODULE_PATHNAME, 'ts_remote_exec_get_result_strings' +LANGUAGE C; +SELECT node_name, database, node_created, database_created, extension_created +FROM ( + SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* + FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) +) a; + node_name | database | node_created | database_created | extension_created +--------------------------------+--------------------------------+--------------+------------------+------------------- + db_cagg_invalidation_dist_ht_1 | db_cagg_invalidation_dist_ht_1 | t | t | t + db_cagg_invalidation_dist_ht_2 | db_cagg_invalidation_dist_ht_2 | t | t | t + db_cagg_invalidation_dist_ht_3 | db_cagg_invalidation_dist_ht_3 | t | t | t +(3 rows) + +GRANT USAGE ON FOREIGN SERVER :DATA_NODE_1, :DATA_NODE_2, :DATA_NODE_3 TO PUBLIC; +GRANT CREATE ON SCHEMA public TO :ROLE_DEFAULT_PERM_USER, :ROLE_DEFAULT_PERM_USER_2; +\set IS_DISTRIBUTED TRUE +\ir include/cagg_invalidation_common.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- Disable background workers since we are testing manual refresh +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER +SELECT _timescaledb_functions.stop_background_workers(); + stop_background_workers +------------------------- + t +(1 row) + +SET ROLE :ROLE_DEFAULT_PERM_USER; +SET datestyle TO 'ISO, YMD'; +SET timezone TO 'UTC'; +CREATE TABLE conditions (time bigint NOT NULL, device int, temp float); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('conditions', 'time', chunk_time_interval => 10, replication_factor => 2); + create_distributed_hypertable +------------------------------- + (1,public,conditions,t) +(1 row) + +\else +SELECT create_hypertable('conditions', 'time', chunk_time_interval => 10); +\endif +CREATE TABLE measurements (time int NOT NULL, device int, temp float); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('measurements', 'time', chunk_time_interval => 10, replication_factor => 2); + create_distributed_hypertable +------------------------------- + (2,public,measurements,t) +(1 row) + +\else +SELECT create_hypertable('measurements', 'time', chunk_time_interval => 10); +\endif +CREATE OR REPLACE FUNCTION bigint_now() +RETURNS bigint LANGUAGE SQL STABLE AS +$$ + SELECT coalesce(max(time), 0) + FROM conditions +$$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ +CREATE OR REPLACE FUNCTION bigint_now() +RETURNS bigint LANGUAGE SQL STABLE AS +$$ + SELECT coalesce(max(time), 0) + FROM conditions +$$; +$DIST$); +\endif +CREATE OR REPLACE FUNCTION int_now() +RETURNS int LANGUAGE SQL STABLE AS +$$ + SELECT coalesce(max(time), 0) + FROM measurements +$$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ +CREATE OR REPLACE FUNCTION int_now() +RETURNS int LANGUAGE SQL STABLE AS +$$ + SELECT coalesce(max(time), 0) + FROM measurements +$$; +$DIST$); +\endif +SELECT set_integer_now_func('conditions', 'bigint_now'); + set_integer_now_func +---------------------- + +(1 row) + +SELECT set_integer_now_func('measurements', 'int_now'); + set_integer_now_func +---------------------- + +(1 row) + +INSERT INTO conditions +SELECT t, ceil(abs(timestamp_hash(to_timestamp(t)::timestamp))%4)::int, + abs(timestamp_hash(to_timestamp(t)::timestamp))%40 +FROM generate_series(1, 100, 1) t; +CREATE TABLE temp AS +SELECT * FROM conditions; +INSERT INTO measurements +SELECT * FROM temp; +-- Show the most recent data +SELECT * FROM conditions +ORDER BY time DESC, device +LIMIT 10; + time | device | temp +------+--------+------ + 100 | 0 | 8 + 99 | 1 | 5 + 98 | 2 | 26 + 97 | 2 | 10 + 96 | 2 | 34 + 95 | 2 | 30 + 94 | 3 | 31 + 93 | 0 | 4 + 92 | 0 | 32 + 91 | 3 | 15 +(10 rows) + +-- Create two continuous aggregates on the same hypertable to test +-- that invalidations are handled correctly across both of them. +CREATE MATERIALIZED VIEW cond_10 +WITH (timescaledb.continuous, + timescaledb.materialized_only=true) +AS +SELECT time_bucket(BIGINT '10', time) AS bucket, device, avg(temp) AS avg_temp +FROM conditions +GROUP BY 1,2 WITH NO DATA; +CREATE MATERIALIZED VIEW cond_20 +WITH (timescaledb.continuous, + timescaledb.materialized_only=true) +AS +SELECT time_bucket(BIGINT '20', time) AS bucket, device, avg(temp) AS avg_temp +FROM conditions +GROUP BY 1,2 WITH NO DATA; +CREATE MATERIALIZED VIEW measure_10 +WITH (timescaledb.continuous, + timescaledb.materialized_only=true) +AS +SELECT time_bucket(10, time) AS bucket, device, avg(temp) AS avg_temp +FROM measurements +GROUP BY 1,2 WITH NO DATA; +-- There should be three continuous aggregates, two on one hypertable +-- and one on the other: +SELECT mat_hypertable_id, raw_hypertable_id, user_view_name +FROM _timescaledb_catalog.continuous_agg; + mat_hypertable_id | raw_hypertable_id | user_view_name +-------------------+-------------------+---------------- + 3 | 1 | cond_10 + 4 | 1 | cond_20 + 5 | 2 | measure_10 +(3 rows) + +-- The continuous aggregates should be empty +SELECT * FROM cond_10 +ORDER BY 1 DESC, 2; + bucket | device | avg_temp +--------+--------+---------- +(0 rows) + +SELECT * FROM cond_20 +ORDER BY 1 DESC, 2; + bucket | device | avg_temp +--------+--------+---------- +(0 rows) + +SELECT * FROM measure_10 +ORDER BY 1 DESC, 2; + bucket | device | avg_temp +--------+--------+---------- +(0 rows) + +\if :IS_DISTRIBUTED +CREATE OR REPLACE FUNCTION get_hyper_invals() RETURNS TABLE( + "hyper_id" INT, + "start" BIGINT, + "end" BIGINT +) +LANGUAGE SQL VOLATILE AS +$$ +SELECT DISTINCT table_record[1]::TEXT::INT, table_record[2]::TEXT::BIGINT, table_record[3]::TEXT::BIGINT FROM test.remote_exec_get_result_strings(NULL, $DIST$ + SELECT hypertable_id, + lowest_modified_value, + greatest_modified_value + FROM _timescaledb_catalog.continuous_aggs_hypertable_invalidation_log +$DIST$) +ORDER BY 1,2,3 +$$; +CREATE OR REPLACE FUNCTION get_cagg_invals() RETURNS TABLE( + "cagg_id" INT, + "start" BIGINT, + "end" BIGINT +) +LANGUAGE SQL VOLATILE AS +$$ +SELECT DISTINCT table_record[1]::TEXT::INT, table_record[2]::TEXT::BIGINT, table_record[3]::TEXT::BIGINT FROM test.remote_exec_get_result_strings(NULL, $DIST$ + SELECT materialization_id AS cagg_id, + lowest_modified_value AS start, + greatest_modified_value AS end + FROM _timescaledb_catalog.continuous_aggs_materialization_invalidation_log +$DIST$) +ORDER BY 1,2,3 +$$; +\else +CREATE OR REPLACE FUNCTION get_hyper_invals() RETURNS TABLE ( + "hyper_id" INT, + "start" BIGINT, + "end" BIGINT +) +LANGUAGE SQL VOLATILE AS +$$ +SELECT hypertable_id, + lowest_modified_value, + greatest_modified_value + FROM _timescaledb_catalog.continuous_aggs_hypertable_invalidation_log + ORDER BY 1,2,3 +$$; +CREATE OR REPLACE FUNCTION get_cagg_invals() RETURNS TABLE ( + "cagg_id" INT, + "start" BIGINT, + "end" BIGINT +) +LANGUAGE SQL VOLATILE AS +$$ +SELECT materialization_id, + lowest_modified_value, + greatest_modified_value + FROM _timescaledb_catalog.continuous_aggs_materialization_invalidation_log + ORDER BY 1,2,3 +$$; +\endif +CREATE VIEW hyper_invals AS SELECT * FROM get_hyper_invals(); +CREATE VIEW cagg_invals AS SELECT * FROM get_cagg_invals(); +-- Must refresh to move the invalidation threshold, or no +-- invalidations will be generated. Initially, threshold is the +-- MIN of the time dimension data type: +SELECT * FROM _timescaledb_catalog.continuous_aggs_invalidation_threshold +ORDER BY 1,2; + hypertable_id | watermark +---------------+---------------------- + 1 | -9223372036854775808 + 2 | -2147483648 +(2 rows) + +-- There should be only "infinite" invalidations in the cagg +-- invalidation log: +SELECT * FROM cagg_invals; + cagg_id | start | end +---------+----------------------+--------------------- + 3 | -9223372036854775808 | 9223372036854775807 + 4 | -9223372036854775808 | 9223372036854775807 + 5 | -9223372036854775808 | 9223372036854775807 +(3 rows) + +-- Now refresh up to 50 without the first bucket, and the threshold should be updated accordingly: +CALL refresh_continuous_aggregate('cond_10', 1, 50); +SELECT * FROM _timescaledb_catalog.continuous_aggs_invalidation_threshold +ORDER BY 1,2; + hypertable_id | watermark +---------------+------------- + 1 | 50 + 2 | -2147483648 +(2 rows) + +-- Invalidations should be cleared inside the refresh window: +SELECT * FROM cagg_invals; + cagg_id | start | end +---------+----------------------+--------------------- + 3 | -9223372036854775808 | 9 + 3 | 50 | 9223372036854775807 + 4 | -9223372036854775808 | 9223372036854775807 + 5 | -9223372036854775808 | 9223372036854775807 +(4 rows) + +-- Refresh up to 50 from the beginning +CALL refresh_continuous_aggregate('cond_10', 0, 50); +SELECT * FROM cagg_invals; + cagg_id | start | end +---------+----------------------+--------------------- + 3 | -9223372036854775808 | -1 + 3 | 50 | 9223372036854775807 + 4 | -9223372036854775808 | 9223372036854775807 + 5 | -9223372036854775808 | 9223372036854775807 +(4 rows) + +-- Refreshing below the threshold does not move it: +CALL refresh_continuous_aggregate('cond_10', 20, 49); +psql:include/cagg_invalidation_common.sql:207: NOTICE: continuous aggregate "cond_10" is already up-to-date +SELECT * FROM _timescaledb_catalog.continuous_aggs_invalidation_threshold +ORDER BY 1,2; + hypertable_id | watermark +---------------+------------- + 1 | 50 + 2 | -2147483648 +(2 rows) + +-- Nothing changes with invalidations either since the region was +-- already refreshed and no new invalidations have been generated: +SELECT * FROM cagg_invals; + cagg_id | start | end +---------+----------------------+--------------------- + 3 | -9223372036854775808 | -1 + 3 | 50 | 9223372036854775807 + 4 | -9223372036854775808 | 9223372036854775807 + 5 | -9223372036854775808 | 9223372036854775807 +(4 rows) + +-- Refreshing measure_10 moves the threshold only for the other hypertable: +CALL refresh_continuous_aggregate('measure_10', 0, 30); +SELECT * FROM _timescaledb_catalog.continuous_aggs_invalidation_threshold +ORDER BY 1,2; + hypertable_id | watermark +---------------+----------- + 1 | 50 + 2 | 30 +(2 rows) + +SELECT * FROM cagg_invals; + cagg_id | start | end +---------+----------------------+--------------------- + 3 | -9223372036854775808 | -1 + 3 | 50 | 9223372036854775807 + 4 | -9223372036854775808 | 9223372036854775807 + 5 | -9223372036854775808 | -1 + 5 | 30 | 9223372036854775807 +(5 rows) + +-- Refresh on the second continuous aggregate, cond_20, on the first +-- hypertable moves the same threshold as when refreshing cond_10: +CALL refresh_continuous_aggregate('cond_20', 60, 100); +SELECT * FROM _timescaledb_catalog.continuous_aggs_invalidation_threshold +ORDER BY 1,2; + hypertable_id | watermark +---------------+----------- + 1 | 100 + 2 | 30 +(2 rows) + +SELECT * FROM cagg_invals; + cagg_id | start | end +---------+----------------------+--------------------- + 3 | -9223372036854775808 | -1 + 3 | 50 | 9223372036854775807 + 4 | -9223372036854775808 | 59 + 4 | 100 | 9223372036854775807 + 5 | -9223372036854775808 | -1 + 5 | 30 | 9223372036854775807 +(6 rows) + +-- There should be no hypertable invalidations initially: +SELECT * FROM hyper_invals; + hyper_id | start | end +----------+-------+----- +(0 rows) + +SELECT * FROM cagg_invals; + cagg_id | start | end +---------+----------------------+--------------------- + 3 | -9223372036854775808 | -1 + 3 | 50 | 9223372036854775807 + 4 | -9223372036854775808 | 59 + 4 | 100 | 9223372036854775807 + 5 | -9223372036854775808 | -1 + 5 | 30 | 9223372036854775807 +(6 rows) + +-- Create invalidations across different ranges. Some of these should +-- be deleted and others cut in different ways when a refresh is +-- run. Note that the refresh window is inclusive in the start of the +-- window but exclusive at the end. +-- Entries that should be left unmodified: +INSERT INTO conditions VALUES (10, 4, 23.7); +INSERT INTO conditions VALUES (10, 5, 23.8), (19, 3, 23.6); +INSERT INTO conditions VALUES (60, 3, 23.7), (70, 4, 23.7); +-- Should see some invaliations in the hypertable invalidation log: +SELECT * FROM hyper_invals; + hyper_id | start | end +----------+-------+----- + 1 | 10 | 10 + 1 | 10 | 19 + 1 | 60 | 60 + 1 | 60 | 70 + 1 | 70 | 70 +(5 rows) + +-- Generate some invalidations for the other hypertable +INSERT INTO measurements VALUES (20, 4, 23.7); +INSERT INTO measurements VALUES (30, 5, 23.8), (80, 3, 23.6); +-- Should now see invalidations for both hypertables +SELECT * FROM hyper_invals; + hyper_id | start | end +----------+-------+----- + 1 | 10 | 10 + 1 | 10 | 19 + 1 | 60 | 60 + 1 | 60 | 70 + 1 | 70 | 70 + 2 | 20 | 20 + 2 | 30 | 80 +(7 rows) + +-- First refresh a window where we don't have any invalidations. This +-- allows us to see only the copying of the invalidations to the per +-- cagg log without additional processing. +CALL refresh_continuous_aggregate('cond_10', 20, 60); +-- Invalidation threshold remains at 100: +SELECT * FROM _timescaledb_catalog.continuous_aggs_invalidation_threshold +ORDER BY 1,2; + hypertable_id | watermark +---------------+----------- + 1 | 100 + 2 | 30 +(2 rows) + +-- Invalidations should be moved from the hypertable invalidation log +-- to the continuous aggregate log, but only for the hypertable that +-- the refreshed aggregate belongs to: +SELECT * FROM hyper_invals; + hyper_id | start | end +----------+-------+----- + 2 | 20 | 20 + 2 | 30 | 80 +(2 rows) + +SELECT * FROM cagg_invals; + cagg_id | start | end +---------+----------------------+--------------------- + 3 | -9223372036854775808 | -1 + 3 | 10 | 19 + 3 | 60 | 9223372036854775807 + 4 | -9223372036854775808 | 59 + 4 | 0 | 19 + 4 | 60 | 79 + 4 | 100 | 9223372036854775807 + 5 | -9223372036854775808 | -1 + 5 | 30 | 9223372036854775807 +(9 rows) + +-- Now add more invalidations to test a refresh that overlaps with them. +-- Entries that should be deleted: +INSERT INTO conditions VALUES (30, 1, 23.4), (59, 1, 23.4); +INSERT INTO conditions VALUES (20, 1, 23.4), (30, 1, 23.4); +-- Entries that should be cut to the right, leaving an invalidation to +-- the left of the refresh window: +INSERT INTO conditions VALUES (1, 4, 23.7), (25, 1, 23.4); +INSERT INTO conditions VALUES (19, 4, 23.7), (59, 1, 23.4); +-- Entries that should be cut to the left and right, leaving two +-- invalidation entries on each side of the refresh window: +INSERT INTO conditions VALUES (2, 2, 23.5), (60, 1, 23.4); +INSERT INTO conditions VALUES (3, 2, 23.5), (80, 1, 23.4); +-- Entries that should be cut to the left, leaving an invalidation to +-- the right of the refresh window: +INSERT INTO conditions VALUES (60, 3, 23.6), (90, 3, 23.6); +INSERT INTO conditions VALUES (20, 5, 23.8), (100, 3, 23.6); +-- New invalidations in the hypertable invalidation log: +SELECT * FROM hyper_invals; + hyper_id | start | end +----------+-------+----- + 1 | 1 | 1 + 1 | 1 | 25 + 1 | 2 | 60 + 1 | 3 | 3 + 1 | 3 | 80 + 1 | 19 | 19 + 1 | 19 | 59 + 1 | 20 | 20 + 1 | 20 | 30 + 1 | 20 | 100 + 1 | 25 | 25 + 1 | 30 | 30 + 1 | 30 | 59 + 1 | 59 | 59 + 1 | 60 | 90 + 1 | 80 | 80 + 1 | 100 | 100 + 2 | 20 | 20 + 2 | 30 | 80 +(19 rows) + +-- But nothing has yet changed in the cagg invalidation log: +SELECT * FROM cagg_invals; + cagg_id | start | end +---------+----------------------+--------------------- + 3 | -9223372036854775808 | -1 + 3 | 10 | 19 + 3 | 60 | 9223372036854775807 + 4 | -9223372036854775808 | 59 + 4 | 0 | 19 + 4 | 60 | 79 + 4 | 100 | 9223372036854775807 + 5 | -9223372036854775808 | -1 + 5 | 30 | 9223372036854775807 +(9 rows) + +-- Refresh to process invalidations for daily temperature: +CALL refresh_continuous_aggregate('cond_10', 20, 60); +-- Invalidations should be moved from the hypertable invalidation log +-- to the continuous aggregate log. +SELECT * FROM hyper_invals; + hyper_id | start | end +----------+-------+----- + 2 | 20 | 20 + 2 | 30 | 80 +(2 rows) + +-- Only the cond_10 cagg should have its entries cut: +SELECT * FROM cagg_invals; + cagg_id | start | end +---------+----------------------+--------------------- + 3 | -9223372036854775808 | -1 + 3 | -9223372036854775808 | 19 + 3 | 10 | 19 + 3 | 60 | 9223372036854775807 + 4 | -9223372036854775808 | 59 + 4 | 0 | 19 + 4 | 0 | 99 + 4 | 0 | 119 + 4 | 60 | 79 + 4 | 100 | 9223372036854775807 + 5 | -9223372036854775808 | -1 + 5 | 30 | 9223372036854775807 +(12 rows) + +-- Refresh also cond_20: +CALL refresh_continuous_aggregate('cond_20', 20, 60); +-- The cond_20 cagg should also have its entries cut: +SELECT * FROM cagg_invals; + cagg_id | start | end +---------+----------------------+--------------------- + 3 | -9223372036854775808 | -1 + 3 | -9223372036854775808 | 19 + 3 | 10 | 19 + 3 | 60 | 9223372036854775807 + 4 | -9223372036854775808 | 19 + 4 | 60 | 9223372036854775807 + 5 | -9223372036854775808 | -1 + 5 | 30 | 9223372036854775807 +(8 rows) + +-- Refresh cond_10 to completely remove an invalidation: +CALL refresh_continuous_aggregate('cond_10', 0, 20); +-- The 1-19 invalidation should be deleted: +SELECT * FROM cagg_invals; + cagg_id | start | end +---------+----------------------+--------------------- + 3 | -9223372036854775808 | -1 + 3 | 60 | 9223372036854775807 + 4 | -9223372036854775808 | 19 + 4 | 60 | 9223372036854775807 + 5 | -9223372036854775808 | -1 + 5 | 30 | 9223372036854775807 +(6 rows) + +-- Clear everything between 0 and 100 to make way for new +-- invalidations +CALL refresh_continuous_aggregate('cond_10', 0, 100); +-- Test refreshing with non-overlapping invalidations +INSERT INTO conditions VALUES (20, 1, 23.4), (25, 1, 23.4); +INSERT INTO conditions VALUES (30, 1, 23.4), (46, 1, 23.4); +CALL refresh_continuous_aggregate('cond_10', 1, 40); +SELECT * FROM cagg_invals; + cagg_id | start | end +---------+----------------------+--------------------- + 3 | -9223372036854775808 | -1 + 3 | 40 | 49 + 3 | 100 | 9223372036854775807 + 4 | -9223372036854775808 | 19 + 4 | 20 | 39 + 4 | 20 | 59 + 4 | 60 | 9223372036854775807 + 5 | -9223372036854775808 | -1 + 5 | 30 | 9223372036854775807 +(9 rows) + +-- Refresh whithout cutting (in area where there are no +-- invalidations). Merging of overlapping entries should still happen: +INSERT INTO conditions VALUES (15, 1, 23.4), (42, 1, 23.4); +CALL refresh_continuous_aggregate('cond_10', 90, 100); +psql:include/cagg_invalidation_common.sql:327: NOTICE: continuous aggregate "cond_10" is already up-to-date +SELECT * FROM cagg_invals; + cagg_id | start | end +---------+----------------------+--------------------- + 3 | -9223372036854775808 | -1 + 3 | 10 | 49 + 3 | 100 | 9223372036854775807 + 4 | -9223372036854775808 | 19 + 4 | 0 | 59 + 4 | 20 | 39 + 4 | 20 | 59 + 4 | 60 | 9223372036854775807 + 5 | -9223372036854775808 | -1 + 5 | 30 | 9223372036854775807 +(10 rows) + +-- Test max refresh window +CALL refresh_continuous_aggregate('cond_10', NULL, NULL); +SELECT * FROM cagg_invals; + cagg_id | start | end +---------+----------------------+--------------------- + 3 | 110 | 9223372036854775807 + 4 | -9223372036854775808 | 19 + 4 | 0 | 59 + 4 | 20 | 39 + 4 | 20 | 59 + 4 | 60 | 9223372036854775807 + 5 | -9223372036854775808 | -1 + 5 | 30 | 9223372036854775807 +(8 rows) + +SELECT * FROM hyper_invals; + hyper_id | start | end +----------+-------+----- + 2 | 20 | 20 + 2 | 30 | 80 +(2 rows) + +-- Pick the first chunk of conditions to TRUNCATE +SELECT show_chunks AS chunk_to_truncate +FROM show_chunks('conditions') +ORDER BY 1 +LIMIT 1 \gset +-- Show the data before truncating one of the chunks +SELECT * FROM :chunk_to_truncate +ORDER BY 1; + time | device | temp +------+--------+------ + 1 | 4 | 23.7 + 1 | 0 | 16 + 2 | 2 | 23.5 + 2 | 1 | 25 + 3 | 2 | 23.5 + 3 | 0 | 20 + 4 | 2 | 10 + 5 | 2 | 26 + 6 | 1 | 13 + 7 | 3 | 35 + 8 | 1 | 37 + 9 | 3 | 7 +(12 rows) + +-- Truncate one chunk +\if :IS_DISTRIBUTED +-- There is no TRUNCATE implementation for FOREIGN tables yet +\set ON_ERROR_STOP 0 +\endif +TRUNCATE TABLE :chunk_to_truncate; +psql:include/cagg_invalidation_common.sql:352: ERROR: cannot truncate foreign table "_dist_hyper_1_1_chunk" +\if :IS_DISTRIBUTED +\set ON_ERROR_STOP 1 +\endif +-- Should see new invalidation entries for conditions for the non-distributed case +SELECT * FROM hyper_invals; + hyper_id | start | end +----------+-------+----- + 2 | 20 | 20 + 2 | 30 | 80 +(2 rows) + +-- TRUNCATE the hypertable to invalidate all its continuous aggregates +TRUNCATE conditions; +-- Now empty +SELECT * FROM conditions; + time | device | temp +------+--------+------ +(0 rows) + +-- Should see an infinite invalidation entry for conditions +SELECT * FROM hyper_invals; + hyper_id | start | end +----------+----------------------+--------------------- + 1 | -9223372036854775808 | 9223372036854775807 + 2 | 20 | 20 + 2 | 30 | 80 +(3 rows) + +-- Aggregates still hold data +SELECT * FROM cond_10 +ORDER BY 1,2 +LIMIT 5; + bucket | device | avg_temp +--------+--------+---------- + 0 | 0 | 18 + 0 | 1 | 25 + 0 | 2 | 20.75 + 0 | 3 | 21 + 0 | 4 | 23.7 +(5 rows) + +SELECT * FROM cond_20 +ORDER BY 1,2 +LIMIT 5; + bucket | device | avg_temp +--------+--------+------------------ + 20 | 0 | 18.2857142857143 + 20 | 1 | 23.5142857142857 + 20 | 2 | 26 + 20 | 3 | 23 + 20 | 5 | 23.8 +(5 rows) + +CALL refresh_continuous_aggregate('cond_10', NULL, NULL); +CALL refresh_continuous_aggregate('cond_20', NULL, NULL); +-- Both should now be empty after refresh +SELECT * FROM cond_10 +ORDER BY 1,2; + bucket | device | avg_temp +--------+--------+---------- +(0 rows) + +SELECT * FROM cond_20 +ORDER BY 1,2; + bucket | device | avg_temp +--------+--------+---------- +(0 rows) + +-- Insert new data again and refresh +INSERT INTO conditions VALUES + (1, 1, 23.4), (4, 3, 14.3), (5, 1, 13.6), + (6, 2, 17.9), (12, 1, 18.3), (19, 3, 28.2), + (10, 3, 22.3), (11, 2, 34.9), (15, 2, 45.6), + (21, 1, 15.3), (22, 2, 12.3), (29, 3, 16.3); +CALL refresh_continuous_aggregate('cond_10', NULL, NULL); +CALL refresh_continuous_aggregate('cond_20', NULL, NULL); +-- Should now hold data again +SELECT * FROM cond_10 +ORDER BY 1,2; + bucket | device | avg_temp +--------+--------+---------- + 0 | 1 | 18.5 + 0 | 2 | 17.9 + 0 | 3 | 14.3 + 10 | 1 | 18.3 + 10 | 2 | 40.25 + 10 | 3 | 25.25 + 20 | 1 | 15.3 + 20 | 2 | 12.3 + 20 | 3 | 16.3 +(9 rows) + +SELECT * FROM cond_20 +ORDER BY 1,2; + bucket | device | avg_temp +--------+--------+------------------ + 0 | 1 | 18.4333333333333 + 0 | 2 | 32.8 + 0 | 3 | 21.6 + 20 | 1 | 15.3 + 20 | 2 | 12.3 + 20 | 3 | 16.3 +(6 rows) + +-- Truncate one of the aggregates, but first test that we block +-- TRUNCATE ONLY +\set ON_ERROR_STOP 0 +TRUNCATE ONLY cond_20; +psql:include/cagg_invalidation_common.sql:408: ERROR: cannot truncate only a continuous aggregate +\set ON_ERROR_STOP 1 +TRUNCATE cond_20; +-- Should now be empty +SELECT * FROM cond_20 +ORDER BY 1,2; + bucket | device | avg_temp +--------+--------+---------- +(0 rows) + +-- Other aggregate is not affected +SELECT * FROM cond_10 +ORDER BY 1,2; + bucket | device | avg_temp +--------+--------+---------- + 0 | 1 | 18.5 + 0 | 2 | 17.9 + 0 | 3 | 14.3 + 10 | 1 | 18.3 + 10 | 2 | 40.25 + 10 | 3 | 25.25 + 20 | 1 | 15.3 + 20 | 2 | 12.3 + 20 | 3 | 16.3 +(9 rows) + +-- Refresh again to bring data back +CALL refresh_continuous_aggregate('cond_20', NULL, NULL); +-- The aggregate should be populated again +SELECT * FROM cond_20 +ORDER BY 1,2; + bucket | device | avg_temp +--------+--------+------------------ + 0 | 1 | 18.4333333333333 + 0 | 2 | 32.8 + 0 | 3 | 21.6 + 20 | 1 | 15.3 + 20 | 2 | 12.3 + 20 | 3 | 16.3 +(6 rows) + +------------------------------------------------------- +-- Test corner cases against a minimal bucket aggregate +------------------------------------------------------- +-- First, clear the table and aggregate +TRUNCATE conditions; +SELECT * FROM conditions; + time | device | temp +------+--------+------ +(0 rows) + +CALL refresh_continuous_aggregate('cond_10', NULL, NULL); +SELECT * FROM cond_10 +ORDER BY 1,2; + bucket | device | avg_temp +--------+--------+---------- +(0 rows) + +CREATE MATERIALIZED VIEW cond_1 +WITH (timescaledb.continuous, + timescaledb.materialized_only=true) +AS +SELECT time_bucket(BIGINT '1', time) AS bucket, device, avg(temp) AS avg_temp +FROM conditions +GROUP BY 1,2 WITH NO DATA; +SELECT mat_hypertable_id AS cond_1_id +FROM _timescaledb_catalog.continuous_agg +WHERE user_view_name = 'cond_1' \gset +-- Test manual invalidation error +\if :IS_DISTRIBUTED +\else +\set ON_ERROR_STOP 0 +SELECT _timescaledb_functions.invalidation_cagg_log_add_entry(:cond_1_id, 1, 0); +\set ON_ERROR_STOP 1 +\endif +-- Test invalidations with bucket size 1 +INSERT INTO conditions VALUES (0, 1, 1.0); +SELECT * FROM hyper_invals; + hyper_id | start | end +----------+-------+----- + 1 | 0 | 0 + 2 | 20 | 20 + 2 | 30 | 80 +(3 rows) + +-- Refreshing around the bucket should not update the aggregate +CALL refresh_continuous_aggregate('cond_1', -1, 0); +SELECT * FROM cond_1 +ORDER BY 1,2; + bucket | device | avg_temp +--------+--------+---------- +(0 rows) + +CALL refresh_continuous_aggregate('cond_1', 1, 2); +SELECT * FROM cond_1 +ORDER BY 1,2; + bucket | device | avg_temp +--------+--------+---------- +(0 rows) + +-- Refresh only the invalidated bucket +CALL refresh_continuous_aggregate('cond_1', 0, 1); +SELECT * FROM cagg_invals +WHERE cagg_id = :cond_1_id; + cagg_id | start | end +---------+----------------------+--------------------- + 6 | -9223372036854775808 | -2 + 6 | 2 | 9223372036854775807 +(2 rows) + +SELECT * FROM cond_1 +ORDER BY 1,2; + bucket | device | avg_temp +--------+--------+---------- + 0 | 1 | 1 +(1 row) + +-- Refresh 1 extra bucket on the left +INSERT INTO conditions VALUES (0, 1, 2.0); +CALL refresh_continuous_aggregate('cond_1', -1, 1); +SELECT * FROM cond_1 +ORDER BY 1,2; + bucket | device | avg_temp +--------+--------+---------- + 0 | 1 | 1.5 +(1 row) + +-- Refresh 1 extra bucket on the right +INSERT INTO conditions VALUES (0, 1, 3.0); +CALL refresh_continuous_aggregate('cond_1', 0, 2); +SELECT * FROM cond_1 +ORDER BY 1,2; + bucket | device | avg_temp +--------+--------+---------- + 0 | 1 | 2 +(1 row) + +-- Refresh 1 extra bucket on each side +INSERT INTO conditions VALUES (0, 1, 4.0); +CALL refresh_continuous_aggregate('cond_1', -1, 2); +SELECT * FROM cond_1 +ORDER BY 1,2; + bucket | device | avg_temp +--------+--------+---------- + 0 | 1 | 2.5 +(1 row) + +-- Clear to reset aggregate +TRUNCATE conditions; +CALL refresh_continuous_aggregate('cond_1', NULL, NULL); +-- Test invalidation of size 2 +INSERT INTO conditions VALUES (0, 1, 1.0), (1, 1, 2.0); +-- Refresh one bucket at a time +CALL refresh_continuous_aggregate('cond_1', 0, 1); +SELECT * FROM cond_1 +ORDER BY 1,2; + bucket | device | avg_temp +--------+--------+---------- + 0 | 1 | 1 +(1 row) + +CALL refresh_continuous_aggregate('cond_1', 1, 2); +SELECT * FROM cond_1 +ORDER BY 1,2; + bucket | device | avg_temp +--------+--------+---------- + 0 | 1 | 1 + 1 | 1 | 2 +(2 rows) + +-- Repeat the same thing but refresh the whole invalidation at once +TRUNCATE conditions; +CALL refresh_continuous_aggregate('cond_1', NULL, NULL); +INSERT INTO conditions VALUES (0, 1, 1.0), (1, 1, 2.0); +CALL refresh_continuous_aggregate('cond_1', 0, 2); +SELECT * FROM cond_1 +ORDER BY 1,2; + bucket | device | avg_temp +--------+--------+---------- + 0 | 1 | 1 + 1 | 1 | 2 +(2 rows) + +-- Test invalidation of size 3 +TRUNCATE conditions; +CALL refresh_continuous_aggregate('cond_1', NULL, NULL); +INSERT INTO conditions VALUES (0, 1, 1.0), (1, 1, 2.0), (2, 1, 3.0); +-- Invalidation extends beyond the refresh window on both ends +CALL refresh_continuous_aggregate('cond_1', 1, 2); +SELECT * FROM cond_1 +ORDER BY 1,2; + bucket | device | avg_temp +--------+--------+---------- + 1 | 1 | 2 +(1 row) + +-- Should leave one invalidation on each side of the refresh window +SELECT * FROM cagg_invals +WHERE cagg_id = :cond_1_id; + cagg_id | start | end +---------+-------+--------------------- + 6 | 0 | 0 + 6 | 2 | 2 + 6 | 110 | 9223372036854775807 +(3 rows) + +-- Refresh the two remaining invalidations +CALL refresh_continuous_aggregate('cond_1', 0, 1); +SELECT * FROM cond_1 +ORDER BY 1,2; + bucket | device | avg_temp +--------+--------+---------- + 0 | 1 | 1 + 1 | 1 | 2 +(2 rows) + +CALL refresh_continuous_aggregate('cond_1', 2, 3); +SELECT * FROM cond_1 +ORDER BY 1,2; + bucket | device | avg_temp +--------+--------+---------- + 0 | 1 | 1 + 1 | 1 | 2 + 2 | 1 | 3 +(3 rows) + +-- Clear and repeat but instead refresh the whole range in one go. The +-- result should be the same as the three partial refreshes. Use +-- DELETE instead of TRUNCATE to clear this time. +DELETE FROM conditions; +CALL refresh_continuous_aggregate('cond_1', NULL, NULL); +INSERT INTO conditions VALUES (0, 1, 1.0), (1, 1, 2.0), (2, 1, 3.0); +CALL refresh_continuous_aggregate('cond_1', 0, 3); +SELECT * FROM cond_1 +ORDER BY 1,2; + bucket | device | avg_temp +--------+--------+---------- + 0 | 1 | 1 + 1 | 1 | 2 + 2 | 1 | 3 +(3 rows) + +---------------------------------------------- +-- Test that invalidation threshold is capped +---------------------------------------------- +CREATE table threshold_test (time int, value int); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('threshold_test', 'time', chunk_time_interval => 4, replication_factor => 2); +psql:include/cagg_invalidation_common.sql:565: NOTICE: adding not-null constraint to column "time" + create_distributed_hypertable +------------------------------- + (7,public,threshold_test,t) +(1 row) + +\else +SELECT create_hypertable('threshold_test', 'time', chunk_time_interval => 4); +\endif +SELECT set_integer_now_func('threshold_test', 'int_now'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE MATERIALIZED VIEW thresh_2 +WITH (timescaledb.continuous, + timescaledb.materialized_only=true) +AS +SELECT time_bucket(2, time) AS bucket, max(value) AS max +FROM threshold_test +GROUP BY 1 WITH NO DATA; +SELECT raw_hypertable_id AS thresh_hyper_id, mat_hypertable_id AS thresh_cagg_id +FROM _timescaledb_catalog.continuous_agg +WHERE user_view_name = 'thresh_2' \gset +-- There's no invalidation threshold initially +SELECT * FROM _timescaledb_catalog.continuous_aggs_invalidation_threshold +WHERE hypertable_id = :thresh_hyper_id +ORDER BY 1,2; + hypertable_id | watermark +---------------+------------- + 7 | -2147483648 +(1 row) + +-- Test manual invalidation error +\if :IS_DISTRIBUTED +\else +\set ON_ERROR_STOP 0 +SELECT _timescaledb_functions.invalidation_hyper_log_add_entry(:thresh_hyper_id, 1, 0); +\set ON_ERROR_STOP 1 +\endif +-- Test that threshold is initilized to min value when there's no data +-- and we specify an infinite end. Note that the min value may differ +-- depending on time type. +CALL refresh_continuous_aggregate('thresh_2', 0, NULL); +psql:include/cagg_invalidation_common.sql:599: NOTICE: continuous aggregate "thresh_2" is already up-to-date +SELECT * FROM _timescaledb_catalog.continuous_aggs_invalidation_threshold +WHERE hypertable_id = :thresh_hyper_id +ORDER BY 1,2; + hypertable_id | watermark +---------------+------------- + 7 | -2147483648 +(1 row) + +INSERT INTO threshold_test +SELECT v, v FROM generate_series(1, 10) v; +CALL refresh_continuous_aggregate('thresh_2', 0, 5); +-- Threshold should move to end of the last refreshed bucket, which is +-- the last bucket fully included in the window, i.e., the window +-- shrinks to end of previous bucket. +SELECT * FROM _timescaledb_catalog.continuous_aggs_invalidation_threshold +WHERE hypertable_id = :thresh_hyper_id +ORDER BY 1,2; + hypertable_id | watermark +---------------+----------- + 7 | 4 +(1 row) + +-- Refresh where both the start and end of the window is above the +-- max data value +CALL refresh_continuous_aggregate('thresh_2', 14, NULL); +psql:include/cagg_invalidation_common.sql:619: NOTICE: continuous aggregate "thresh_2" is already up-to-date +SELECT watermark AS thresh_hyper_id_watermark +FROM _timescaledb_catalog.continuous_aggs_invalidation_threshold +WHERE hypertable_id = :thresh_hyper_id \gset +-- Refresh where we start from the current watermark to infinity +CALL refresh_continuous_aggregate('thresh_2', :thresh_hyper_id_watermark, NULL); +psql:include/cagg_invalidation_common.sql:626: NOTICE: continuous aggregate "thresh_2" is already up-to-date +-- Now refresh with max end of the window to test that the +-- invalidation threshold is capped at the last bucket of data +CALL refresh_continuous_aggregate('thresh_2', 0, NULL); +SELECT * FROM _timescaledb_catalog.continuous_aggs_invalidation_threshold +WHERE hypertable_id = :thresh_hyper_id +ORDER BY 1,2; + hypertable_id | watermark +---------------+----------- + 7 | 12 +(1 row) + +-- Should not have processed invalidations beyond the invalidation +-- threshold. +SELECT * FROM cagg_invals +WHERE cagg_id = :thresh_cagg_id; + cagg_id | start | end +---------+----------------------+--------------------- + 8 | -9223372036854775808 | -1 + 8 | 12 | 9223372036854775807 +(2 rows) + +-- Check that things are properly materialized +SELECT * FROM thresh_2 +ORDER BY 1; + bucket | max +--------+----- + 0 | 1 + 2 | 3 + 4 | 5 + 6 | 7 + 8 | 9 + 10 | 10 +(6 rows) + +-- Delete the last data +SELECT show_chunks AS chunk_to_drop +FROM show_chunks('threshold_test') +ORDER BY 1 DESC +LIMIT 1 \gset +DELETE FROM threshold_test +WHERE time > 6; +-- The last data in the hypertable is gone +SELECT time_bucket(2, time) AS bucket, max(value) AS max +FROM threshold_test +GROUP BY 1 +ORDER BY 1; + bucket | max +--------+----- + 0 | 1 + 2 | 3 + 4 | 5 + 6 | 6 +(4 rows) + +-- The aggregate still holds data +SELECT * FROM thresh_2 +ORDER BY 1; + bucket | max +--------+----- + 0 | 1 + 2 | 3 + 4 | 5 + 6 | 7 + 8 | 9 + 10 | 10 +(6 rows) + +-- Refresh the aggregate to bring it up-to-date +CALL refresh_continuous_aggregate('thresh_2', 0, NULL); +-- Data also gone from the aggregate +SELECT * FROM thresh_2 +ORDER BY 1; + bucket | max +--------+----- + 0 | 1 + 2 | 3 + 4 | 5 + 6 | 6 +(4 rows) + +-- The invalidation threshold remains the same +SELECT * FROM _timescaledb_catalog.continuous_aggs_invalidation_threshold +WHERE hypertable_id = :thresh_hyper_id +ORDER BY 1,2; + hypertable_id | watermark +---------------+----------- + 7 | 12 +(1 row) + +-- Insert new data beyond the invalidation threshold to move it +-- forward +INSERT INTO threshold_test +SELECT v, v FROM generate_series(7, 15) v; +CALL refresh_continuous_aggregate('thresh_2', 0, NULL); +-- Aggregate now updated to reflect newly aggregated data +SELECT * FROM thresh_2 +ORDER BY 1; + bucket | max +--------+----- + 0 | 1 + 2 | 3 + 4 | 5 + 6 | 7 + 8 | 9 + 10 | 11 + 12 | 13 + 14 | 15 +(8 rows) + +-- The invalidation threshold should have moved forward to the end of +-- the new data +SELECT * FROM _timescaledb_catalog.continuous_aggs_invalidation_threshold +WHERE hypertable_id = :thresh_hyper_id +ORDER BY 1,2; + hypertable_id | watermark +---------------+----------- + 7 | 16 +(1 row) + +-- The aggregate remains invalid beyond the invalidation threshold +SELECT * FROM cagg_invals +WHERE cagg_id = :thresh_cagg_id; + cagg_id | start | end +---------+----------------------+--------------------- + 8 | -9223372036854775808 | -1 + 8 | 16 | 9223372036854775807 +(2 rows) + +---------------------------------------------------------------------- +-- Test that dropping a chunk invalidates the dropped region. First +-- create another chunk so that we have two chunks. One of the chunks +-- will be dropped. +--------------------------------------------------------------------- +INSERT INTO conditions VALUES (10, 1, 10.0); +-- Chunks currently associated with the hypertable +SELECT show_chunks AS chunk_to_drop +FROM show_chunks('conditions'); + chunk_to_drop +---------------------------------------------- + _timescaledb_internal._dist_hyper_1_34_chunk + _timescaledb_internal._dist_hyper_1_40_chunk +(2 rows) + +-- Pick the first one to drop +SELECT show_chunks AS chunk_to_drop +FROM show_chunks('conditions') +ORDER BY 1 +LIMIT 1 \gset +-- Show the data before dropping one of the chunks +SELECT * FROM conditions +ORDER BY 1,2; + time | device | temp +------+--------+------ + 0 | 1 | 1 + 1 | 1 | 2 + 2 | 1 | 3 + 10 | 1 | 10 +(4 rows) + +-- Drop one chunk +\if :IS_DISTRIBUTED +CALL distributed_exec(format('DROP TABLE IF EXISTS %s', :'chunk_to_drop')); +DROP FOREIGN TABLE :chunk_to_drop; +\else +DROP TABLE :chunk_to_drop; +\endif +-- The chunk's data no longer exists in the hypertable +SELECT * FROM conditions +ORDER BY 1,2; + time | device | temp +------+--------+------ + 10 | 1 | 10 +(1 row) + +-- Aggregate still remains in continuous aggregate, however +SELECT * FROM cond_1 +ORDER BY 1,2; + bucket | device | avg_temp +--------+--------+---------- + 0 | 1 | 1 + 1 | 1 | 2 + 2 | 1 | 3 +(3 rows) + +-- Refresh the continuous aggregate to make the dropped data be +-- reflected in the aggregate +CALL refresh_continuous_aggregate('cond_1', NULL, NULL); +-- Aggregate now up-to-date with the source hypertable +SELECT * FROM cond_1 +ORDER BY 1,2; + bucket | device | avg_temp +--------+--------+---------- + 10 | 1 | 10 +(1 row) + +-- Test that adjacent invalidations are merged +INSERT INTO conditions VALUES(1, 1, 1.0), (2, 1, 2.0); +INSERT INTO conditions VALUES(3, 1, 1.0); +INSERT INTO conditions VALUES(4, 1, 1.0); +INSERT INTO conditions VALUES(6, 1, 1.0); +CALL refresh_continuous_aggregate('cond_1', 10, NULL); +psql:include/cagg_invalidation_common.sql:748: NOTICE: continuous aggregate "cond_1" is already up-to-date +SELECT * FROM cagg_invals +WHERE cagg_id = :cond_1_id; + cagg_id | start | end +---------+-------+--------------------- + 6 | 1 | 4 + 6 | 6 | 6 + 6 | 110 | 9223372036854775807 +(3 rows) + +--------------------------------------------------------------------- +-- Test that single timestamp invalidations are expanded to buckets, +-- and adjacent buckets merged. This merging cannot cross Data-Node +-- chunk boundaries for the distributed hypertable case. +--------------------------------------------------------------------- +-- First clear invalidations in a range: +CALL refresh_continuous_aggregate('cond_10', -20, 60); +-- The following three should be merged to one range 0-29 +INSERT INTO conditions VALUES (5, 1, 1.0); +INSERT INTO conditions VALUES (15, 1, 1.0); +INSERT INTO conditions VALUES (25, 1, 1.0); +-- The last one should not merge with the others +INSERT INTO conditions VALUES (40, 1, 1.0); +-- Refresh to process invalidations, but outside the range of +-- invalidations we inserted so that we don't clear them. +CALL refresh_continuous_aggregate('cond_10', 50, 60); +psql:include/cagg_invalidation_common.sql:769: NOTICE: continuous aggregate "cond_10" is already up-to-date +SELECT mat_hypertable_id AS cond_10_id +FROM _timescaledb_catalog.continuous_agg +WHERE user_view_name = 'cond_10' \gset +SELECT * FROM cagg_invals +WHERE cagg_id = :cond_10_id; + cagg_id | start | end +---------+----------------------+--------------------- + 3 | -9223372036854775808 | -21 + 3 | 0 | 9 + 3 | 0 | 19 + 3 | 10 | 29 + 3 | 20 | 29 + 3 | 40 | 49 + 3 | 60 | 9223372036854775807 +(7 rows) + +-- should trigger two individual refreshes +CALL refresh_continuous_aggregate('cond_10', 0, 200); +-- Allow at most 5 individual invalidations per refreshe +SET timescaledb.materializations_per_refresh_window=5; +-- Insert into every second bucket +INSERT INTO conditions VALUES (20, 1, 1.0); +INSERT INTO conditions VALUES (40, 1, 1.0); +INSERT INTO conditions VALUES (60, 1, 1.0); +INSERT INTO conditions VALUES (80, 1, 1.0); +INSERT INTO conditions VALUES (100, 1, 1.0); +INSERT INTO conditions VALUES (120, 1, 1.0); +INSERT INTO conditions VALUES (140, 1, 1.0); +CALL refresh_continuous_aggregate('cond_10', 0, 200); +\set VERBOSITY default +-- Test acceptable values for materializations per refresh +SET timescaledb.materializations_per_refresh_window=' 5 '; +INSERT INTO conditions VALUES (140, 1, 1.0); +CALL refresh_continuous_aggregate('cond_10', 0, 200); +-- Large value will be treated as LONG_MAX +SET timescaledb.materializations_per_refresh_window=342239897234023842394249234766923492347; +INSERT INTO conditions VALUES (140, 1, 1.0); +CALL refresh_continuous_aggregate('cond_10', 0, 200); +-- Test bad values for materializations per refresh +SET timescaledb.materializations_per_refresh_window='foo'; +INSERT INTO conditions VALUES (140, 1, 1.0); +CALL refresh_continuous_aggregate('cond_10', 0, 200); +psql:include/cagg_invalidation_common.sql:808: WARNING: invalid value for session variable "timescaledb.materializations_per_refresh_window" +DETAIL: Expected an integer but current value is "foo". +SET timescaledb.materializations_per_refresh_window='2bar'; +INSERT INTO conditions VALUES (140, 1, 1.0); +CALL refresh_continuous_aggregate('cond_10', 0, 200); +psql:include/cagg_invalidation_common.sql:811: WARNING: invalid value for session variable "timescaledb.materializations_per_refresh_window" +DETAIL: Expected an integer but current value is "2bar". +SET timescaledb.materializations_per_refresh_window='-'; +INSERT INTO conditions VALUES (140, 1, 1.0); +CALL refresh_continuous_aggregate('cond_10', 0, 200); +psql:include/cagg_invalidation_common.sql:815: WARNING: invalid value for session variable "timescaledb.materializations_per_refresh_window" +DETAIL: Expected an integer but current value is "-". +\set VERBOSITY terse +-- cleanup +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; +DROP DATABASE :DATA_NODE_1 WITH (FORCE); +DROP DATABASE :DATA_NODE_2 WITH (FORCE); +DROP DATABASE :DATA_NODE_3 WITH (FORCE); diff --git a/tsl/test/expected/cagg_query-13.out b/tsl/test/expected/cagg_query-13.out index d68232f5433..734cbbc7e47 100644 --- a/tsl/test/expected/cagg_query-13.out +++ b/tsl/test/expected/cagg_query-13.out @@ -802,7 +802,8 @@ SELECT m1.location, m1.timec, sumt, sumh, firsth, lasth, maxtemp, mintemp FROM mat_m1 m1 RIGHT JOIN mat_m2 m2 ON (m1.location = m2.location AND m1.timec = m2.timec) -ORDER BY m1.location COLLATE "C" NULLS LAST, m1.timec DESC NULLS LAST, firsth NULLS LAST +ORDER BY m1.location COLLATE "C" NULLS LAST, m1.timec DESC NULLS LAST, firsth NULLS LAST, + lasth NULLS LAST, mintemp NULLS LAST, maxtemp NULLS LAST LIMIT 10; location | timec | sumt | sumh | firsth | lasth | maxtemp | mintemp ----------+------------------------------+------+------+--------+-------+---------+--------- @@ -812,8 +813,8 @@ LIMIT 10; por | Mon Jan 01 16:00:00 2018 PST | 100 | 100 | 100 | 100 | 100 | 100 | | | | 10 | | 20 | 10 | | | | 30 | 50 | 85 | 45 - | | | | 45 | 45 | 65 | 55 | | | | 45 | 30 | 65 | 45 + | | | | 45 | 45 | 65 | 55 | | | | | | | (9 rows) diff --git a/tsl/test/expected/cagg_query-14.out b/tsl/test/expected/cagg_query-14.out index d68232f5433..734cbbc7e47 100644 --- a/tsl/test/expected/cagg_query-14.out +++ b/tsl/test/expected/cagg_query-14.out @@ -802,7 +802,8 @@ SELECT m1.location, m1.timec, sumt, sumh, firsth, lasth, maxtemp, mintemp FROM mat_m1 m1 RIGHT JOIN mat_m2 m2 ON (m1.location = m2.location AND m1.timec = m2.timec) -ORDER BY m1.location COLLATE "C" NULLS LAST, m1.timec DESC NULLS LAST, firsth NULLS LAST +ORDER BY m1.location COLLATE "C" NULLS LAST, m1.timec DESC NULLS LAST, firsth NULLS LAST, + lasth NULLS LAST, mintemp NULLS LAST, maxtemp NULLS LAST LIMIT 10; location | timec | sumt | sumh | firsth | lasth | maxtemp | mintemp ----------+------------------------------+------+------+--------+-------+---------+--------- @@ -812,8 +813,8 @@ LIMIT 10; por | Mon Jan 01 16:00:00 2018 PST | 100 | 100 | 100 | 100 | 100 | 100 | | | | 10 | | 20 | 10 | | | | 30 | 50 | 85 | 45 - | | | | 45 | 45 | 65 | 55 | | | | 45 | 30 | 65 | 45 + | | | | 45 | 45 | 65 | 55 | | | | | | | (9 rows) diff --git a/tsl/test/expected/cagg_query-15.out b/tsl/test/expected/cagg_query-15.out index a205ef90a34..7b1e7dd6b6f 100644 --- a/tsl/test/expected/cagg_query-15.out +++ b/tsl/test/expected/cagg_query-15.out @@ -830,7 +830,8 @@ SELECT m1.location, m1.timec, sumt, sumh, firsth, lasth, maxtemp, mintemp FROM mat_m1 m1 RIGHT JOIN mat_m2 m2 ON (m1.location = m2.location AND m1.timec = m2.timec) -ORDER BY m1.location COLLATE "C" NULLS LAST, m1.timec DESC NULLS LAST, firsth NULLS LAST +ORDER BY m1.location COLLATE "C" NULLS LAST, m1.timec DESC NULLS LAST, firsth NULLS LAST, + lasth NULLS LAST, mintemp NULLS LAST, maxtemp NULLS LAST LIMIT 10; location | timec | sumt | sumh | firsth | lasth | maxtemp | mintemp ----------+------------------------------+------+------+--------+-------+---------+--------- @@ -840,8 +841,8 @@ LIMIT 10; por | Mon Jan 01 16:00:00 2018 PST | 100 | 100 | 100 | 100 | 100 | 100 | | | | 10 | | 20 | 10 | | | | 30 | 50 | 85 | 45 - | | | | 45 | 45 | 65 | 55 | | | | 45 | 30 | 65 | 45 + | | | | 45 | 45 | 65 | 55 | | | | | | | (9 rows) diff --git a/tsl/test/expected/cagg_query-16.out b/tsl/test/expected/cagg_query-16.out new file mode 100644 index 00000000000..7b1e7dd6b6f --- /dev/null +++ b/tsl/test/expected/cagg_query-16.out @@ -0,0 +1,849 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\set TEST_BASE_NAME cagg_query +SELECT + format('%s/results/%s_results_view.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') as "TEST_RESULTS_VIEW", + format('%s/results/%s_results_view_hashagg.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') as "TEST_RESULTS_VIEW_HASHAGG", + format('%s/results/%s_results_table.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') as "TEST_RESULTS_TABLE" +\gset +SELECT format('\! diff %s %s', :'TEST_RESULTS_VIEW', :'TEST_RESULTS_TABLE') as "DIFF_CMD", + format('\! diff %s %s', :'TEST_RESULTS_VIEW_HASHAGG', :'TEST_RESULTS_TABLE') as "DIFF_CMD2" +\gset +\set EXPLAIN 'EXPLAIN (VERBOSE, COSTS OFF)' +SET client_min_messages TO NOTICE; +CREATE TABLE conditions ( + timec TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL + ); +select table_name from create_hypertable( 'conditions', 'timec'); + table_name +------------ + conditions +(1 row) + +insert into conditions values ( '2018-01-01 09:20:00-08', 'SFO', 55, 45); +insert into conditions values ( '2018-01-02 09:30:00-08', 'por', 100, 100); +insert into conditions values ( '2018-01-02 09:20:00-08', 'SFO', 65, 45); +insert into conditions values ( '2018-01-02 09:10:00-08', 'NYC', 65, 45); +insert into conditions values ( '2018-11-01 09:20:00-08', 'NYC', 45, 30); +insert into conditions values ( '2018-11-01 10:40:00-08', 'NYC', 55, 35); +insert into conditions values ( '2018-11-01 11:50:00-08', 'NYC', 65, 40); +insert into conditions values ( '2018-11-01 12:10:00-08', 'NYC', 75, 45); +insert into conditions values ( '2018-11-01 13:10:00-08', 'NYC', 85, 50); +insert into conditions values ( '2018-11-02 09:20:00-08', 'NYC', 10, 10); +insert into conditions values ( '2018-11-02 10:30:00-08', 'NYC', 20, 15); +insert into conditions values ( '2018-11-02 11:40:00-08', 'NYC', null, null); +insert into conditions values ( '2018-11-03 09:50:00-08', 'NYC', null, null); +create table location_tab( locid integer, locname text ); +insert into location_tab values( 1, 'SFO'); +insert into location_tab values( 2, 'NYC'); +insert into location_tab values( 3, 'por'); +create materialized view mat_m1( location, timec, minl, sumt , sumh) +WITH (timescaledb.continuous, timescaledb.materialized_only=false) +as +select location, time_bucket('1day', timec), min(location), sum(temperature),sum(humidity) +from conditions +group by time_bucket('1day', timec), location WITH NO DATA; +--compute time_bucketted max+bucket_width for the materialized view +SELECT time_bucket('1day' , q.timeval+ '1day'::interval) +FROM ( select max(timec)as timeval from conditions ) as q; + time_bucket +------------------------------ + Sat Nov 03 17:00:00 2018 PDT +(1 row) + +CALL refresh_continuous_aggregate('mat_m1', NULL, NULL); +--test first/last +create materialized view mat_m2(location, timec, firsth, lasth, maxtemp, mintemp) +WITH (timescaledb.continuous, timescaledb.materialized_only=false) +as +select location, time_bucket('1day', timec), first(humidity, timec), last(humidity, timec), max(temperature), min(temperature) +from conditions +group by time_bucket('1day', timec), location WITH NO DATA; +--time that refresh assumes as now() for repeatability +SELECT time_bucket('1day' , q.timeval+ '1day'::interval) +FROM ( select max(timec)as timeval from conditions ) as q; + time_bucket +------------------------------ + Sat Nov 03 17:00:00 2018 PDT +(1 row) + +CALL refresh_continuous_aggregate('mat_m2', NULL, NULL); +--normal view -- +create or replace view regview( location, timec, minl, sumt , sumh) +as +select location, time_bucket('1day', timec), min(location), sum(temperature),sum(humidity) +from conditions +group by location, time_bucket('1day', timec); +set enable_hashagg = false; +-- NO pushdown cases --- +--when we have addl. attrs in order by that are not in the +-- group by, we will still need a sort +:EXPLAIN +select * from mat_m1 order by sumh, sumt, minl, timec ; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Output: _materialized_hypertable_2.location, _materialized_hypertable_2.timec, _materialized_hypertable_2.minl, _materialized_hypertable_2.sumt, _materialized_hypertable_2.sumh + Sort Key: _materialized_hypertable_2.sumh, _materialized_hypertable_2.sumt, _materialized_hypertable_2.minl, _materialized_hypertable_2.timec + -> Append + -> Custom Scan (ChunkAppend) on _timescaledb_internal._materialized_hypertable_2 + Output: _materialized_hypertable_2.location, _materialized_hypertable_2.timec, _materialized_hypertable_2.minl, _materialized_hypertable_2.sumt, _materialized_hypertable_2.sumh + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 0 + -> Index Scan using _hyper_2_3_chunk__materialized_hypertable_2_timec_idx on _timescaledb_internal._hyper_2_3_chunk + Output: _hyper_2_3_chunk.location, _hyper_2_3_chunk.timec, _hyper_2_3_chunk.minl, _hyper_2_3_chunk.sumt, _hyper_2_3_chunk.sumh + Index Cond: (_hyper_2_3_chunk.timec < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(2)), '-infinity'::timestamp with time zone)) + -> Index Scan using _hyper_2_4_chunk__materialized_hypertable_2_timec_idx on _timescaledb_internal._hyper_2_4_chunk + Output: _hyper_2_4_chunk.location, _hyper_2_4_chunk.timec, _hyper_2_4_chunk.minl, _hyper_2_4_chunk.sumt, _hyper_2_4_chunk.sumh + Index Cond: (_hyper_2_4_chunk.timec < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(2)), '-infinity'::timestamp with time zone)) + -> GroupAggregate + Output: conditions.location, (time_bucket('@ 1 day'::interval, conditions.timec)), min(conditions.location), sum(conditions.temperature), sum(conditions.humidity) + Group Key: (time_bucket('@ 1 day'::interval, conditions.timec)), conditions.location + -> Sort + Output: conditions.location, (time_bucket('@ 1 day'::interval, conditions.timec)), conditions.temperature, conditions.humidity + Sort Key: (time_bucket('@ 1 day'::interval, conditions.timec)), conditions.location + -> Result + Output: conditions.location, time_bucket('@ 1 day'::interval, conditions.timec), conditions.temperature, conditions.humidity + -> Custom Scan (ChunkAppend) on public.conditions + Output: conditions.location, conditions.timec, conditions.temperature, conditions.humidity + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 1 + -> Index Scan using _hyper_1_2_chunk_conditions_timec_idx on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.location, _hyper_1_2_chunk.timec, _hyper_1_2_chunk.temperature, _hyper_1_2_chunk.humidity + Index Cond: (_hyper_1_2_chunk.timec >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(2)), '-infinity'::timestamp with time zone)) +(31 rows) + +:EXPLAIN +select * from regview order by timec desc; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Output: _hyper_1_1_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_1_chunk.timec)), (min(_hyper_1_1_chunk.location)), (sum(_hyper_1_1_chunk.temperature)), (sum(_hyper_1_1_chunk.humidity)) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_1_1_chunk.timec)) DESC + -> GroupAggregate + Output: _hyper_1_1_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_1_chunk.timec)), min(_hyper_1_1_chunk.location), sum(_hyper_1_1_chunk.temperature), sum(_hyper_1_1_chunk.humidity) + Group Key: _hyper_1_1_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_1_chunk.timec)) + -> Sort + Output: _hyper_1_1_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_1_chunk.timec)), _hyper_1_1_chunk.temperature, _hyper_1_1_chunk.humidity + Sort Key: _hyper_1_1_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_1_chunk.timec)) + -> Result + Output: _hyper_1_1_chunk.location, time_bucket('@ 1 day'::interval, _hyper_1_1_chunk.timec), _hyper_1_1_chunk.temperature, _hyper_1_1_chunk.humidity + -> Append + -> Seq Scan on _timescaledb_internal._hyper_1_1_chunk + Output: _hyper_1_1_chunk.location, _hyper_1_1_chunk.timec, _hyper_1_1_chunk.temperature, _hyper_1_1_chunk.humidity + -> Seq Scan on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.location, _hyper_1_2_chunk.timec, _hyper_1_2_chunk.temperature, _hyper_1_2_chunk.humidity +(16 rows) + +-- PUSHDOWN cases -- +-- all group by elts in order by , reorder group by elts to match +-- group by order +-- This should prevent an additional sort after GroupAggregate +:EXPLAIN +select * from mat_m1 order by timec desc, location; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Output: _materialized_hypertable_2.location, _materialized_hypertable_2.timec, _materialized_hypertable_2.minl, _materialized_hypertable_2.sumt, _materialized_hypertable_2.sumh + Sort Key: _materialized_hypertable_2.timec DESC, _materialized_hypertable_2.location + -> Append + -> Custom Scan (ChunkAppend) on _timescaledb_internal._materialized_hypertable_2 + Output: _materialized_hypertable_2.location, _materialized_hypertable_2.timec, _materialized_hypertable_2.minl, _materialized_hypertable_2.sumt, _materialized_hypertable_2.sumh + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 0 + -> Index Scan using _hyper_2_3_chunk__materialized_hypertable_2_timec_idx on _timescaledb_internal._hyper_2_3_chunk + Output: _hyper_2_3_chunk.location, _hyper_2_3_chunk.timec, _hyper_2_3_chunk.minl, _hyper_2_3_chunk.sumt, _hyper_2_3_chunk.sumh + Index Cond: (_hyper_2_3_chunk.timec < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(2)), '-infinity'::timestamp with time zone)) + -> Index Scan using _hyper_2_4_chunk__materialized_hypertable_2_timec_idx on _timescaledb_internal._hyper_2_4_chunk + Output: _hyper_2_4_chunk.location, _hyper_2_4_chunk.timec, _hyper_2_4_chunk.minl, _hyper_2_4_chunk.sumt, _hyper_2_4_chunk.sumh + Index Cond: (_hyper_2_4_chunk.timec < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(2)), '-infinity'::timestamp with time zone)) + -> GroupAggregate + Output: conditions.location, (time_bucket('@ 1 day'::interval, conditions.timec)), min(conditions.location), sum(conditions.temperature), sum(conditions.humidity) + Group Key: (time_bucket('@ 1 day'::interval, conditions.timec)), conditions.location + -> Sort + Output: conditions.location, (time_bucket('@ 1 day'::interval, conditions.timec)), conditions.temperature, conditions.humidity + Sort Key: (time_bucket('@ 1 day'::interval, conditions.timec)), conditions.location + -> Result + Output: conditions.location, time_bucket('@ 1 day'::interval, conditions.timec), conditions.temperature, conditions.humidity + -> Custom Scan (ChunkAppend) on public.conditions + Output: conditions.location, conditions.timec, conditions.temperature, conditions.humidity + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 1 + -> Index Scan using _hyper_1_2_chunk_conditions_timec_idx on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.location, _hyper_1_2_chunk.timec, _hyper_1_2_chunk.temperature, _hyper_1_2_chunk.humidity + Index Cond: (_hyper_1_2_chunk.timec >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(2)), '-infinity'::timestamp with time zone)) +(31 rows) + +:EXPLAIN +select * from mat_m1 order by location, timec desc; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Output: _materialized_hypertable_2.location, _materialized_hypertable_2.timec, _materialized_hypertable_2.minl, _materialized_hypertable_2.sumt, _materialized_hypertable_2.sumh + Sort Key: _materialized_hypertable_2.location, _materialized_hypertable_2.timec DESC + -> Append + -> Custom Scan (ChunkAppend) on _timescaledb_internal._materialized_hypertable_2 + Output: _materialized_hypertable_2.location, _materialized_hypertable_2.timec, _materialized_hypertable_2.minl, _materialized_hypertable_2.sumt, _materialized_hypertable_2.sumh + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 0 + -> Index Scan using _hyper_2_3_chunk__materialized_hypertable_2_timec_idx on _timescaledb_internal._hyper_2_3_chunk + Output: _hyper_2_3_chunk.location, _hyper_2_3_chunk.timec, _hyper_2_3_chunk.minl, _hyper_2_3_chunk.sumt, _hyper_2_3_chunk.sumh + Index Cond: (_hyper_2_3_chunk.timec < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(2)), '-infinity'::timestamp with time zone)) + -> Index Scan using _hyper_2_4_chunk__materialized_hypertable_2_timec_idx on _timescaledb_internal._hyper_2_4_chunk + Output: _hyper_2_4_chunk.location, _hyper_2_4_chunk.timec, _hyper_2_4_chunk.minl, _hyper_2_4_chunk.sumt, _hyper_2_4_chunk.sumh + Index Cond: (_hyper_2_4_chunk.timec < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(2)), '-infinity'::timestamp with time zone)) + -> GroupAggregate + Output: conditions.location, (time_bucket('@ 1 day'::interval, conditions.timec)), min(conditions.location), sum(conditions.temperature), sum(conditions.humidity) + Group Key: (time_bucket('@ 1 day'::interval, conditions.timec)), conditions.location + -> Sort + Output: conditions.location, (time_bucket('@ 1 day'::interval, conditions.timec)), conditions.temperature, conditions.humidity + Sort Key: (time_bucket('@ 1 day'::interval, conditions.timec)), conditions.location + -> Result + Output: conditions.location, time_bucket('@ 1 day'::interval, conditions.timec), conditions.temperature, conditions.humidity + -> Custom Scan (ChunkAppend) on public.conditions + Output: conditions.location, conditions.timec, conditions.temperature, conditions.humidity + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 1 + -> Index Scan using _hyper_1_2_chunk_conditions_timec_idx on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.location, _hyper_1_2_chunk.timec, _hyper_1_2_chunk.temperature, _hyper_1_2_chunk.humidity + Index Cond: (_hyper_1_2_chunk.timec >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(2)), '-infinity'::timestamp with time zone)) +(31 rows) + +:EXPLAIN +select * from mat_m1 order by location, timec asc; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Output: _materialized_hypertable_2.location, _materialized_hypertable_2.timec, _materialized_hypertable_2.minl, _materialized_hypertable_2.sumt, _materialized_hypertable_2.sumh + Sort Key: _materialized_hypertable_2.location, _materialized_hypertable_2.timec + -> Append + -> Custom Scan (ChunkAppend) on _timescaledb_internal._materialized_hypertable_2 + Output: _materialized_hypertable_2.location, _materialized_hypertable_2.timec, _materialized_hypertable_2.minl, _materialized_hypertable_2.sumt, _materialized_hypertable_2.sumh + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 0 + -> Index Scan using _hyper_2_3_chunk__materialized_hypertable_2_timec_idx on _timescaledb_internal._hyper_2_3_chunk + Output: _hyper_2_3_chunk.location, _hyper_2_3_chunk.timec, _hyper_2_3_chunk.minl, _hyper_2_3_chunk.sumt, _hyper_2_3_chunk.sumh + Index Cond: (_hyper_2_3_chunk.timec < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(2)), '-infinity'::timestamp with time zone)) + -> Index Scan using _hyper_2_4_chunk__materialized_hypertable_2_timec_idx on _timescaledb_internal._hyper_2_4_chunk + Output: _hyper_2_4_chunk.location, _hyper_2_4_chunk.timec, _hyper_2_4_chunk.minl, _hyper_2_4_chunk.sumt, _hyper_2_4_chunk.sumh + Index Cond: (_hyper_2_4_chunk.timec < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(2)), '-infinity'::timestamp with time zone)) + -> GroupAggregate + Output: conditions.location, (time_bucket('@ 1 day'::interval, conditions.timec)), min(conditions.location), sum(conditions.temperature), sum(conditions.humidity) + Group Key: (time_bucket('@ 1 day'::interval, conditions.timec)), conditions.location + -> Sort + Output: conditions.location, (time_bucket('@ 1 day'::interval, conditions.timec)), conditions.temperature, conditions.humidity + Sort Key: (time_bucket('@ 1 day'::interval, conditions.timec)), conditions.location + -> Result + Output: conditions.location, time_bucket('@ 1 day'::interval, conditions.timec), conditions.temperature, conditions.humidity + -> Custom Scan (ChunkAppend) on public.conditions + Output: conditions.location, conditions.timec, conditions.temperature, conditions.humidity + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 1 + -> Index Scan using _hyper_1_2_chunk_conditions_timec_idx on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.location, _hyper_1_2_chunk.timec, _hyper_1_2_chunk.temperature, _hyper_1_2_chunk.humidity + Index Cond: (_hyper_1_2_chunk.timec >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(2)), '-infinity'::timestamp with time zone)) +(31 rows) + +:EXPLAIN +select * from mat_m1 where timec > '2018-10-01' order by timec desc; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Output: _materialized_hypertable_2.location, _materialized_hypertable_2.timec, _materialized_hypertable_2.minl, _materialized_hypertable_2.sumt, _materialized_hypertable_2.sumh + Sort Key: _materialized_hypertable_2.timec DESC + -> Append + -> Custom Scan (ChunkAppend) on _timescaledb_internal._materialized_hypertable_2 + Output: _materialized_hypertable_2.location, _materialized_hypertable_2.timec, _materialized_hypertable_2.minl, _materialized_hypertable_2.sumt, _materialized_hypertable_2.sumh + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 0 + -> Index Scan using _hyper_2_4_chunk__materialized_hypertable_2_timec_idx on _timescaledb_internal._hyper_2_4_chunk + Output: _hyper_2_4_chunk.location, _hyper_2_4_chunk.timec, _hyper_2_4_chunk.minl, _hyper_2_4_chunk.sumt, _hyper_2_4_chunk.sumh + Index Cond: ((_hyper_2_4_chunk.timec < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(2)), '-infinity'::timestamp with time zone)) AND (_hyper_2_4_chunk.timec > 'Mon Oct 01 00:00:00 2018 PDT'::timestamp with time zone)) + -> GroupAggregate + Output: conditions.location, (time_bucket('@ 1 day'::interval, conditions.timec)), min(conditions.location), sum(conditions.temperature), sum(conditions.humidity) + Group Key: (time_bucket('@ 1 day'::interval, conditions.timec)), conditions.location + -> Sort + Output: conditions.location, (time_bucket('@ 1 day'::interval, conditions.timec)), conditions.temperature, conditions.humidity + Sort Key: (time_bucket('@ 1 day'::interval, conditions.timec)), conditions.location + -> Result + Output: conditions.location, time_bucket('@ 1 day'::interval, conditions.timec), conditions.temperature, conditions.humidity + -> Custom Scan (ChunkAppend) on public.conditions + Output: conditions.location, conditions.timec, conditions.temperature, conditions.humidity + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 0 + -> Index Scan using _hyper_1_2_chunk_conditions_timec_idx on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.location, _hyper_1_2_chunk.timec, _hyper_1_2_chunk.temperature, _hyper_1_2_chunk.humidity + Index Cond: ((_hyper_1_2_chunk.timec >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(2)), '-infinity'::timestamp with time zone)) AND (_hyper_1_2_chunk.timec > 'Mon Oct 01 00:00:00 2018 PDT'::timestamp with time zone)) + Filter: (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec) > 'Mon Oct 01 00:00:00 2018 PDT'::timestamp with time zone) +(29 rows) + +-- outer sort is used by mat_m1 for grouping. But doesn't avoid a sort after the join --- +:EXPLAIN +select l.locid, mat_m1.* from mat_m1 , location_tab l where timec > '2018-10-01' and l.locname = mat_m1.location order by timec desc; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Output: l.locid, _materialized_hypertable_2.location, _materialized_hypertable_2.timec, _materialized_hypertable_2.minl, _materialized_hypertable_2.sumt, _materialized_hypertable_2.sumh + Sort Key: _materialized_hypertable_2.timec DESC + -> Hash Join + Output: l.locid, _materialized_hypertable_2.location, _materialized_hypertable_2.timec, _materialized_hypertable_2.minl, _materialized_hypertable_2.sumt, _materialized_hypertable_2.sumh + Hash Cond: (l.locname = _materialized_hypertable_2.location) + -> Seq Scan on public.location_tab l + Output: l.locid, l.locname + -> Hash + Output: _materialized_hypertable_2.location, _materialized_hypertable_2.timec, _materialized_hypertable_2.minl, _materialized_hypertable_2.sumt, _materialized_hypertable_2.sumh + -> Append + -> Custom Scan (ChunkAppend) on _timescaledb_internal._materialized_hypertable_2 + Output: _materialized_hypertable_2.location, _materialized_hypertable_2.timec, _materialized_hypertable_2.minl, _materialized_hypertable_2.sumt, _materialized_hypertable_2.sumh + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 0 + -> Index Scan using _hyper_2_4_chunk__materialized_hypertable_2_timec_idx on _timescaledb_internal._hyper_2_4_chunk + Output: _hyper_2_4_chunk.location, _hyper_2_4_chunk.timec, _hyper_2_4_chunk.minl, _hyper_2_4_chunk.sumt, _hyper_2_4_chunk.sumh + Index Cond: ((_hyper_2_4_chunk.timec < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(2)), '-infinity'::timestamp with time zone)) AND (_hyper_2_4_chunk.timec > 'Mon Oct 01 00:00:00 2018 PDT'::timestamp with time zone)) + -> GroupAggregate + Output: conditions.location, (time_bucket('@ 1 day'::interval, conditions.timec)), min(conditions.location), sum(conditions.temperature), sum(conditions.humidity) + Group Key: (time_bucket('@ 1 day'::interval, conditions.timec)), conditions.location + -> Sort + Output: conditions.location, (time_bucket('@ 1 day'::interval, conditions.timec)), conditions.temperature, conditions.humidity + Sort Key: (time_bucket('@ 1 day'::interval, conditions.timec)), conditions.location + -> Result + Output: conditions.location, time_bucket('@ 1 day'::interval, conditions.timec), conditions.temperature, conditions.humidity + -> Custom Scan (ChunkAppend) on public.conditions + Output: conditions.location, conditions.timec, conditions.temperature, conditions.humidity + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 0 + -> Index Scan using _hyper_1_2_chunk_conditions_timec_idx on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.location, _hyper_1_2_chunk.timec, _hyper_1_2_chunk.temperature, _hyper_1_2_chunk.humidity + Index Cond: ((_hyper_1_2_chunk.timec >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(2)), '-infinity'::timestamp with time zone)) AND (_hyper_1_2_chunk.timec > 'Mon Oct 01 00:00:00 2018 PDT'::timestamp with time zone)) + Filter: (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec) > 'Mon Oct 01 00:00:00 2018 PDT'::timestamp with time zone) +(36 rows) + +:EXPLAIN +select * from mat_m2 where timec > '2018-10-01' order by timec desc; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Output: _materialized_hypertable_3.location, _materialized_hypertable_3.timec, _materialized_hypertable_3.firsth, _materialized_hypertable_3.lasth, _materialized_hypertable_3.maxtemp, _materialized_hypertable_3.mintemp + Sort Key: _materialized_hypertable_3.timec DESC + -> Append + -> Custom Scan (ChunkAppend) on _timescaledb_internal._materialized_hypertable_3 + Output: _materialized_hypertable_3.location, _materialized_hypertable_3.timec, _materialized_hypertable_3.firsth, _materialized_hypertable_3.lasth, _materialized_hypertable_3.maxtemp, _materialized_hypertable_3.mintemp + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 0 + -> Index Scan using _hyper_3_6_chunk__materialized_hypertable_3_timec_idx on _timescaledb_internal._hyper_3_6_chunk + Output: _hyper_3_6_chunk.location, _hyper_3_6_chunk.timec, _hyper_3_6_chunk.firsth, _hyper_3_6_chunk.lasth, _hyper_3_6_chunk.maxtemp, _hyper_3_6_chunk.mintemp + Index Cond: ((_hyper_3_6_chunk.timec < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(3)), '-infinity'::timestamp with time zone)) AND (_hyper_3_6_chunk.timec > 'Mon Oct 01 00:00:00 2018 PDT'::timestamp with time zone)) + -> GroupAggregate + Output: conditions.location, (time_bucket('@ 1 day'::interval, conditions.timec)), first(conditions.humidity, conditions.timec), last(conditions.humidity, conditions.timec), max(conditions.temperature), min(conditions.temperature) + Group Key: (time_bucket('@ 1 day'::interval, conditions.timec)), conditions.location + -> Sort + Output: conditions.location, (time_bucket('@ 1 day'::interval, conditions.timec)), conditions.humidity, conditions.timec, conditions.temperature + Sort Key: (time_bucket('@ 1 day'::interval, conditions.timec)), conditions.location + -> Result + Output: conditions.location, time_bucket('@ 1 day'::interval, conditions.timec), conditions.humidity, conditions.timec, conditions.temperature + -> Custom Scan (ChunkAppend) on public.conditions + Output: conditions.location, conditions.timec, conditions.humidity, conditions.temperature + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 0 + -> Index Scan using _hyper_1_2_chunk_conditions_timec_idx on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.location, _hyper_1_2_chunk.timec, _hyper_1_2_chunk.humidity, _hyper_1_2_chunk.temperature + Index Cond: ((_hyper_1_2_chunk.timec >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(3)), '-infinity'::timestamp with time zone)) AND (_hyper_1_2_chunk.timec > 'Mon Oct 01 00:00:00 2018 PDT'::timestamp with time zone)) + Filter: (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec) > 'Mon Oct 01 00:00:00 2018 PDT'::timestamp with time zone) +(29 rows) + +:EXPLAIN +select * from (select * from mat_m2 where timec > '2018-10-01' order by timec desc ) as q limit 1; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: _materialized_hypertable_3.location, _materialized_hypertable_3.timec, _materialized_hypertable_3.firsth, _materialized_hypertable_3.lasth, _materialized_hypertable_3.maxtemp, _materialized_hypertable_3.mintemp + -> Sort + Output: _materialized_hypertable_3.location, _materialized_hypertable_3.timec, _materialized_hypertable_3.firsth, _materialized_hypertable_3.lasth, _materialized_hypertable_3.maxtemp, _materialized_hypertable_3.mintemp + Sort Key: _materialized_hypertable_3.timec DESC + -> Append + -> Custom Scan (ChunkAppend) on _timescaledb_internal._materialized_hypertable_3 + Output: _materialized_hypertable_3.location, _materialized_hypertable_3.timec, _materialized_hypertable_3.firsth, _materialized_hypertable_3.lasth, _materialized_hypertable_3.maxtemp, _materialized_hypertable_3.mintemp + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 0 + -> Index Scan using _hyper_3_6_chunk__materialized_hypertable_3_timec_idx on _timescaledb_internal._hyper_3_6_chunk + Output: _hyper_3_6_chunk.location, _hyper_3_6_chunk.timec, _hyper_3_6_chunk.firsth, _hyper_3_6_chunk.lasth, _hyper_3_6_chunk.maxtemp, _hyper_3_6_chunk.mintemp + Index Cond: ((_hyper_3_6_chunk.timec < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(3)), '-infinity'::timestamp with time zone)) AND (_hyper_3_6_chunk.timec > 'Mon Oct 01 00:00:00 2018 PDT'::timestamp with time zone)) + -> GroupAggregate + Output: conditions.location, (time_bucket('@ 1 day'::interval, conditions.timec)), first(conditions.humidity, conditions.timec), last(conditions.humidity, conditions.timec), max(conditions.temperature), min(conditions.temperature) + Group Key: (time_bucket('@ 1 day'::interval, conditions.timec)), conditions.location + -> Sort + Output: conditions.location, (time_bucket('@ 1 day'::interval, conditions.timec)), conditions.humidity, conditions.timec, conditions.temperature + Sort Key: (time_bucket('@ 1 day'::interval, conditions.timec)), conditions.location + -> Result + Output: conditions.location, time_bucket('@ 1 day'::interval, conditions.timec), conditions.humidity, conditions.timec, conditions.temperature + -> Custom Scan (ChunkAppend) on public.conditions + Output: conditions.location, conditions.timec, conditions.humidity, conditions.temperature + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 0 + -> Index Scan using _hyper_1_2_chunk_conditions_timec_idx on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.location, _hyper_1_2_chunk.timec, _hyper_1_2_chunk.humidity, _hyper_1_2_chunk.temperature + Index Cond: ((_hyper_1_2_chunk.timec >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(3)), '-infinity'::timestamp with time zone)) AND (_hyper_1_2_chunk.timec > 'Mon Oct 01 00:00:00 2018 PDT'::timestamp with time zone)) + Filter: (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec) > 'Mon Oct 01 00:00:00 2018 PDT'::timestamp with time zone) +(31 rows) + +:EXPLAIN +select * from (select * from mat_m2 where timec > '2018-10-01' order by timec desc , location asc nulls first) as q limit 1; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: _materialized_hypertable_3.location, _materialized_hypertable_3.timec, _materialized_hypertable_3.firsth, _materialized_hypertable_3.lasth, _materialized_hypertable_3.maxtemp, _materialized_hypertable_3.mintemp + -> Sort + Output: _materialized_hypertable_3.location, _materialized_hypertable_3.timec, _materialized_hypertable_3.firsth, _materialized_hypertable_3.lasth, _materialized_hypertable_3.maxtemp, _materialized_hypertable_3.mintemp + Sort Key: _materialized_hypertable_3.timec DESC, _materialized_hypertable_3.location NULLS FIRST + -> Append + -> Custom Scan (ChunkAppend) on _timescaledb_internal._materialized_hypertable_3 + Output: _materialized_hypertable_3.location, _materialized_hypertable_3.timec, _materialized_hypertable_3.firsth, _materialized_hypertable_3.lasth, _materialized_hypertable_3.maxtemp, _materialized_hypertable_3.mintemp + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 0 + -> Index Scan using _hyper_3_6_chunk__materialized_hypertable_3_timec_idx on _timescaledb_internal._hyper_3_6_chunk + Output: _hyper_3_6_chunk.location, _hyper_3_6_chunk.timec, _hyper_3_6_chunk.firsth, _hyper_3_6_chunk.lasth, _hyper_3_6_chunk.maxtemp, _hyper_3_6_chunk.mintemp + Index Cond: ((_hyper_3_6_chunk.timec < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(3)), '-infinity'::timestamp with time zone)) AND (_hyper_3_6_chunk.timec > 'Mon Oct 01 00:00:00 2018 PDT'::timestamp with time zone)) + -> GroupAggregate + Output: conditions.location, (time_bucket('@ 1 day'::interval, conditions.timec)), first(conditions.humidity, conditions.timec), last(conditions.humidity, conditions.timec), max(conditions.temperature), min(conditions.temperature) + Group Key: (time_bucket('@ 1 day'::interval, conditions.timec)), conditions.location + -> Sort + Output: conditions.location, (time_bucket('@ 1 day'::interval, conditions.timec)), conditions.humidity, conditions.timec, conditions.temperature + Sort Key: (time_bucket('@ 1 day'::interval, conditions.timec)), conditions.location + -> Result + Output: conditions.location, time_bucket('@ 1 day'::interval, conditions.timec), conditions.humidity, conditions.timec, conditions.temperature + -> Custom Scan (ChunkAppend) on public.conditions + Output: conditions.location, conditions.timec, conditions.humidity, conditions.temperature + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 0 + -> Index Scan using _hyper_1_2_chunk_conditions_timec_idx on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.location, _hyper_1_2_chunk.timec, _hyper_1_2_chunk.humidity, _hyper_1_2_chunk.temperature + Index Cond: ((_hyper_1_2_chunk.timec >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(3)), '-infinity'::timestamp with time zone)) AND (_hyper_1_2_chunk.timec > 'Mon Oct 01 00:00:00 2018 PDT'::timestamp with time zone)) + Filter: (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec) > 'Mon Oct 01 00:00:00 2018 PDT'::timestamp with time zone) +(31 rows) + +--plans with CTE +:EXPLAIN +with m1 as ( +Select * from mat_m2 where timec > '2018-10-01' order by timec desc ) +select * from m1; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Output: _materialized_hypertable_3.location, _materialized_hypertable_3.timec, _materialized_hypertable_3.firsth, _materialized_hypertable_3.lasth, _materialized_hypertable_3.maxtemp, _materialized_hypertable_3.mintemp + Sort Key: _materialized_hypertable_3.timec DESC + -> Append + -> Custom Scan (ChunkAppend) on _timescaledb_internal._materialized_hypertable_3 + Output: _materialized_hypertable_3.location, _materialized_hypertable_3.timec, _materialized_hypertable_3.firsth, _materialized_hypertable_3.lasth, _materialized_hypertable_3.maxtemp, _materialized_hypertable_3.mintemp + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 0 + -> Index Scan using _hyper_3_6_chunk__materialized_hypertable_3_timec_idx on _timescaledb_internal._hyper_3_6_chunk + Output: _hyper_3_6_chunk.location, _hyper_3_6_chunk.timec, _hyper_3_6_chunk.firsth, _hyper_3_6_chunk.lasth, _hyper_3_6_chunk.maxtemp, _hyper_3_6_chunk.mintemp + Index Cond: ((_hyper_3_6_chunk.timec < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(3)), '-infinity'::timestamp with time zone)) AND (_hyper_3_6_chunk.timec > 'Mon Oct 01 00:00:00 2018 PDT'::timestamp with time zone)) + -> GroupAggregate + Output: conditions.location, (time_bucket('@ 1 day'::interval, conditions.timec)), first(conditions.humidity, conditions.timec), last(conditions.humidity, conditions.timec), max(conditions.temperature), min(conditions.temperature) + Group Key: (time_bucket('@ 1 day'::interval, conditions.timec)), conditions.location + -> Sort + Output: conditions.location, (time_bucket('@ 1 day'::interval, conditions.timec)), conditions.humidity, conditions.timec, conditions.temperature + Sort Key: (time_bucket('@ 1 day'::interval, conditions.timec)), conditions.location + -> Result + Output: conditions.location, time_bucket('@ 1 day'::interval, conditions.timec), conditions.humidity, conditions.timec, conditions.temperature + -> Custom Scan (ChunkAppend) on public.conditions + Output: conditions.location, conditions.timec, conditions.humidity, conditions.temperature + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 0 + -> Index Scan using _hyper_1_2_chunk_conditions_timec_idx on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.location, _hyper_1_2_chunk.timec, _hyper_1_2_chunk.humidity, _hyper_1_2_chunk.temperature + Index Cond: ((_hyper_1_2_chunk.timec >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(3)), '-infinity'::timestamp with time zone)) AND (_hyper_1_2_chunk.timec > 'Mon Oct 01 00:00:00 2018 PDT'::timestamp with time zone)) + Filter: (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec) > 'Mon Oct 01 00:00:00 2018 PDT'::timestamp with time zone) +(29 rows) + +-- should reorder mat_m1 group by only based on mat_m1 order-by +:EXPLAIN +select * from mat_m1, mat_m2 where mat_m1.timec > '2018-10-01' and mat_m1.timec = mat_m2.timec order by mat_m1.timec desc; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Output: _materialized_hypertable_2.location, _materialized_hypertable_2.timec, _materialized_hypertable_2.minl, _materialized_hypertable_2.sumt, _materialized_hypertable_2.sumh, _materialized_hypertable_3.location, _materialized_hypertable_3.timec, _materialized_hypertable_3.firsth, _materialized_hypertable_3.lasth, _materialized_hypertable_3.maxtemp, _materialized_hypertable_3.mintemp + Sort Key: _materialized_hypertable_2.timec DESC + -> Hash Join + Output: _materialized_hypertable_2.location, _materialized_hypertable_2.timec, _materialized_hypertable_2.minl, _materialized_hypertable_2.sumt, _materialized_hypertable_2.sumh, _materialized_hypertable_3.location, _materialized_hypertable_3.timec, _materialized_hypertable_3.firsth, _materialized_hypertable_3.lasth, _materialized_hypertable_3.maxtemp, _materialized_hypertable_3.mintemp + Hash Cond: (_materialized_hypertable_3.timec = _materialized_hypertable_2.timec) + -> Append + -> Custom Scan (ChunkAppend) on _timescaledb_internal._materialized_hypertable_3 + Output: _materialized_hypertable_3.location, _materialized_hypertable_3.timec, _materialized_hypertable_3.firsth, _materialized_hypertable_3.lasth, _materialized_hypertable_3.maxtemp, _materialized_hypertable_3.mintemp + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 0 + -> Index Scan using _hyper_3_5_chunk__materialized_hypertable_3_timec_idx on _timescaledb_internal._hyper_3_5_chunk + Output: _hyper_3_5_chunk.location, _hyper_3_5_chunk.timec, _hyper_3_5_chunk.firsth, _hyper_3_5_chunk.lasth, _hyper_3_5_chunk.maxtemp, _hyper_3_5_chunk.mintemp + Index Cond: (_hyper_3_5_chunk.timec < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(3)), '-infinity'::timestamp with time zone)) + -> Index Scan using _hyper_3_6_chunk__materialized_hypertable_3_timec_idx on _timescaledb_internal._hyper_3_6_chunk + Output: _hyper_3_6_chunk.location, _hyper_3_6_chunk.timec, _hyper_3_6_chunk.firsth, _hyper_3_6_chunk.lasth, _hyper_3_6_chunk.maxtemp, _hyper_3_6_chunk.mintemp + Index Cond: (_hyper_3_6_chunk.timec < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(3)), '-infinity'::timestamp with time zone)) + -> GroupAggregate + Output: conditions.location, (time_bucket('@ 1 day'::interval, conditions.timec)), first(conditions.humidity, conditions.timec), last(conditions.humidity, conditions.timec), max(conditions.temperature), min(conditions.temperature) + Group Key: (time_bucket('@ 1 day'::interval, conditions.timec)), conditions.location + -> Sort + Output: conditions.location, (time_bucket('@ 1 day'::interval, conditions.timec)), conditions.humidity, conditions.timec, conditions.temperature + Sort Key: (time_bucket('@ 1 day'::interval, conditions.timec)), conditions.location + -> Result + Output: conditions.location, time_bucket('@ 1 day'::interval, conditions.timec), conditions.humidity, conditions.timec, conditions.temperature + -> Custom Scan (ChunkAppend) on public.conditions + Output: conditions.location, conditions.timec, conditions.humidity, conditions.temperature + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 1 + -> Index Scan using _hyper_1_2_chunk_conditions_timec_idx on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.location, _hyper_1_2_chunk.timec, _hyper_1_2_chunk.humidity, _hyper_1_2_chunk.temperature + Index Cond: (_hyper_1_2_chunk.timec >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(3)), '-infinity'::timestamp with time zone)) + -> Hash + Output: _materialized_hypertable_2.location, _materialized_hypertable_2.timec, _materialized_hypertable_2.minl, _materialized_hypertable_2.sumt, _materialized_hypertable_2.sumh + -> Append + -> Custom Scan (ChunkAppend) on _timescaledb_internal._materialized_hypertable_2 + Output: _materialized_hypertable_2.location, _materialized_hypertable_2.timec, _materialized_hypertable_2.minl, _materialized_hypertable_2.sumt, _materialized_hypertable_2.sumh + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 0 + -> Index Scan using _hyper_2_4_chunk__materialized_hypertable_2_timec_idx on _timescaledb_internal._hyper_2_4_chunk + Output: _hyper_2_4_chunk.location, _hyper_2_4_chunk.timec, _hyper_2_4_chunk.minl, _hyper_2_4_chunk.sumt, _hyper_2_4_chunk.sumh + Index Cond: ((_hyper_2_4_chunk.timec < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(2)), '-infinity'::timestamp with time zone)) AND (_hyper_2_4_chunk.timec > 'Mon Oct 01 00:00:00 2018 PDT'::timestamp with time zone)) + -> GroupAggregate + Output: conditions_1.location, (time_bucket('@ 1 day'::interval, conditions_1.timec)), min(conditions_1.location), sum(conditions_1.temperature), sum(conditions_1.humidity) + Group Key: (time_bucket('@ 1 day'::interval, conditions_1.timec)), conditions_1.location + -> Sort + Output: conditions_1.location, (time_bucket('@ 1 day'::interval, conditions_1.timec)), conditions_1.temperature, conditions_1.humidity + Sort Key: (time_bucket('@ 1 day'::interval, conditions_1.timec)), conditions_1.location + -> Result + Output: conditions_1.location, time_bucket('@ 1 day'::interval, conditions_1.timec), conditions_1.temperature, conditions_1.humidity + -> Custom Scan (ChunkAppend) on public.conditions conditions_1 + Output: conditions_1.location, conditions_1.timec, conditions_1.temperature, conditions_1.humidity + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 0 + -> Index Scan using _hyper_1_2_chunk_conditions_timec_idx on _timescaledb_internal._hyper_1_2_chunk _hyper_1_2_chunk_1 + Output: _hyper_1_2_chunk_1.location, _hyper_1_2_chunk_1.timec, _hyper_1_2_chunk_1.temperature, _hyper_1_2_chunk_1.humidity + Index Cond: ((_hyper_1_2_chunk_1.timec >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(2)), '-infinity'::timestamp with time zone)) AND (_hyper_1_2_chunk_1.timec > 'Mon Oct 01 00:00:00 2018 PDT'::timestamp with time zone)) + Filter: (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk_1.timec) > 'Mon Oct 01 00:00:00 2018 PDT'::timestamp with time zone) +(62 rows) + +--should reorder only for mat_m1. +:EXPLAIN +select * from mat_m1, regview where mat_m1.timec > '2018-10-01' and mat_m1.timec = regview.timec order by mat_m1.timec desc; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Sort + Output: _materialized_hypertable_2.location, _materialized_hypertable_2.timec, _materialized_hypertable_2.minl, _materialized_hypertable_2.sumt, _materialized_hypertable_2.sumh, _hyper_1_1_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_1_chunk.timec)), (min(_hyper_1_1_chunk.location)), (sum(_hyper_1_1_chunk.temperature)), (sum(_hyper_1_1_chunk.humidity)) + Sort Key: _materialized_hypertable_2.timec DESC + -> Hash Join + Output: _materialized_hypertable_2.location, _materialized_hypertable_2.timec, _materialized_hypertable_2.minl, _materialized_hypertable_2.sumt, _materialized_hypertable_2.sumh, _hyper_1_1_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_1_chunk.timec)), (min(_hyper_1_1_chunk.location)), (sum(_hyper_1_1_chunk.temperature)), (sum(_hyper_1_1_chunk.humidity)) + Hash Cond: ((time_bucket('@ 1 day'::interval, _hyper_1_1_chunk.timec)) = _materialized_hypertable_2.timec) + -> GroupAggregate + Output: _hyper_1_1_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_1_chunk.timec)), min(_hyper_1_1_chunk.location), sum(_hyper_1_1_chunk.temperature), sum(_hyper_1_1_chunk.humidity) + Group Key: _hyper_1_1_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_1_chunk.timec)) + -> Sort + Output: _hyper_1_1_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_1_chunk.timec)), _hyper_1_1_chunk.temperature, _hyper_1_1_chunk.humidity + Sort Key: _hyper_1_1_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_1_chunk.timec)) + -> Result + Output: _hyper_1_1_chunk.location, time_bucket('@ 1 day'::interval, _hyper_1_1_chunk.timec), _hyper_1_1_chunk.temperature, _hyper_1_1_chunk.humidity + -> Append + -> Seq Scan on _timescaledb_internal._hyper_1_1_chunk + Output: _hyper_1_1_chunk.location, _hyper_1_1_chunk.timec, _hyper_1_1_chunk.temperature, _hyper_1_1_chunk.humidity + -> Seq Scan on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.location, _hyper_1_2_chunk.timec, _hyper_1_2_chunk.temperature, _hyper_1_2_chunk.humidity + -> Hash + Output: _materialized_hypertable_2.location, _materialized_hypertable_2.timec, _materialized_hypertable_2.minl, _materialized_hypertable_2.sumt, _materialized_hypertable_2.sumh + -> Append + -> Custom Scan (ChunkAppend) on _timescaledb_internal._materialized_hypertable_2 + Output: _materialized_hypertable_2.location, _materialized_hypertable_2.timec, _materialized_hypertable_2.minl, _materialized_hypertable_2.sumt, _materialized_hypertable_2.sumh + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 0 + -> Index Scan using _hyper_2_4_chunk__materialized_hypertable_2_timec_idx on _timescaledb_internal._hyper_2_4_chunk + Output: _hyper_2_4_chunk.location, _hyper_2_4_chunk.timec, _hyper_2_4_chunk.minl, _hyper_2_4_chunk.sumt, _hyper_2_4_chunk.sumh + Index Cond: ((_hyper_2_4_chunk.timec < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(2)), '-infinity'::timestamp with time zone)) AND (_hyper_2_4_chunk.timec > 'Mon Oct 01 00:00:00 2018 PDT'::timestamp with time zone)) + -> GroupAggregate + Output: conditions_1.location, (time_bucket('@ 1 day'::interval, conditions_1.timec)), min(conditions_1.location), sum(conditions_1.temperature), sum(conditions_1.humidity) + Group Key: (time_bucket('@ 1 day'::interval, conditions_1.timec)), conditions_1.location + -> Sort + Output: conditions_1.location, (time_bucket('@ 1 day'::interval, conditions_1.timec)), conditions_1.temperature, conditions_1.humidity + Sort Key: (time_bucket('@ 1 day'::interval, conditions_1.timec)), conditions_1.location + -> Result + Output: conditions_1.location, time_bucket('@ 1 day'::interval, conditions_1.timec), conditions_1.temperature, conditions_1.humidity + -> Custom Scan (ChunkAppend) on public.conditions conditions_1 + Output: conditions_1.location, conditions_1.timec, conditions_1.temperature, conditions_1.humidity + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 0 + -> Index Scan using _hyper_1_2_chunk_conditions_timec_idx on _timescaledb_internal._hyper_1_2_chunk _hyper_1_2_chunk_1 + Output: _hyper_1_2_chunk_1.location, _hyper_1_2_chunk_1.timec, _hyper_1_2_chunk_1.temperature, _hyper_1_2_chunk_1.humidity + Index Cond: ((_hyper_1_2_chunk_1.timec >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(2)), '-infinity'::timestamp with time zone)) AND (_hyper_1_2_chunk_1.timec > 'Mon Oct 01 00:00:00 2018 PDT'::timestamp with time zone)) + Filter: (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk_1.timec) > 'Mon Oct 01 00:00:00 2018 PDT'::timestamp with time zone) +(47 rows) + +select l.locid, mat_m1.* from mat_m1 , location_tab l where timec > '2018-10-01' and l.locname = mat_m1.location order by timec desc; + locid | location | timec | minl | sumt | sumh +-------+----------+------------------------------+------+------+------ + 2 | NYC | Fri Nov 02 17:00:00 2018 PDT | NYC | | + 2 | NYC | Thu Nov 01 17:00:00 2018 PDT | NYC | 30 | 25 + 2 | NYC | Wed Oct 31 17:00:00 2018 PDT | NYC | 325 | 200 +(3 rows) + +\set ECHO none +---- Run the same queries with hash agg enabled now +set enable_hashagg = true; +\set ECHO none +--- Run the queries directly on the table now +set enable_hashagg = true; +\set ECHO none +-- diff results view select and table select +:DIFF_CMD +:DIFF_CMD2 +--check if the guc works , reordering will not work +set timescaledb.enable_cagg_reorder_groupby = false; +set enable_hashagg = false; +:EXPLAIN +select * from mat_m1 order by timec desc, location; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Output: _materialized_hypertable_2.location, _materialized_hypertable_2.timec, _materialized_hypertable_2.minl, _materialized_hypertable_2.sumt, _materialized_hypertable_2.sumh + Sort Key: _materialized_hypertable_2.timec DESC, _materialized_hypertable_2.location + -> Append + -> Custom Scan (ChunkAppend) on _timescaledb_internal._materialized_hypertable_2 + Output: _materialized_hypertable_2.location, _materialized_hypertable_2.timec, _materialized_hypertable_2.minl, _materialized_hypertable_2.sumt, _materialized_hypertable_2.sumh + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 0 + -> Index Scan using _hyper_2_3_chunk__materialized_hypertable_2_timec_idx on _timescaledb_internal._hyper_2_3_chunk + Output: _hyper_2_3_chunk.location, _hyper_2_3_chunk.timec, _hyper_2_3_chunk.minl, _hyper_2_3_chunk.sumt, _hyper_2_3_chunk.sumh + Index Cond: (_hyper_2_3_chunk.timec < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(2)), '-infinity'::timestamp with time zone)) + -> Index Scan using _hyper_2_4_chunk__materialized_hypertable_2_timec_idx on _timescaledb_internal._hyper_2_4_chunk + Output: _hyper_2_4_chunk.location, _hyper_2_4_chunk.timec, _hyper_2_4_chunk.minl, _hyper_2_4_chunk.sumt, _hyper_2_4_chunk.sumh + Index Cond: (_hyper_2_4_chunk.timec < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(2)), '-infinity'::timestamp with time zone)) + -> GroupAggregate + Output: conditions.location, (time_bucket('@ 1 day'::interval, conditions.timec)), min(conditions.location), sum(conditions.temperature), sum(conditions.humidity) + Group Key: (time_bucket('@ 1 day'::interval, conditions.timec)), conditions.location + -> Sort + Output: conditions.location, (time_bucket('@ 1 day'::interval, conditions.timec)), conditions.temperature, conditions.humidity + Sort Key: (time_bucket('@ 1 day'::interval, conditions.timec)), conditions.location + -> Result + Output: conditions.location, time_bucket('@ 1 day'::interval, conditions.timec), conditions.temperature, conditions.humidity + -> Custom Scan (ChunkAppend) on public.conditions + Output: conditions.location, conditions.timec, conditions.temperature, conditions.humidity + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 1 + -> Index Scan using _hyper_1_2_chunk_conditions_timec_idx on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.location, _hyper_1_2_chunk.timec, _hyper_1_2_chunk.temperature, _hyper_1_2_chunk.humidity + Index Cond: (_hyper_1_2_chunk.timec >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(2)), '-infinity'::timestamp with time zone)) +(31 rows) + +----------------------------------------------------------------------- +-- Test the cagg_watermark function. The watermark gives the point +-- where to UNION raw and materialized data in real-time +-- aggregation. Specifically, test that the watermark caching works as +-- expected. +----------------------------------------------------------------------- +-- Insert some more data so that there is something to UNION in +-- real-time aggregation. +insert into conditions values ( '2018-12-02 20:10:00-08', 'SFO', 55, 45); +insert into conditions values ( '2018-12-02 21:20:00-08', 'SFO', 65, 45); +insert into conditions values ( '2018-12-02 20:30:00-08', 'NYC', 65, 45); +insert into conditions values ( '2018-12-02 21:50:00-08', 'NYC', 45, 30); +-- Test join of two caggs. Joining two caggs will force the cache to +-- reset every time the watermark function is invoked on a different +-- cagg in the same query. +SELECT mat_hypertable_id AS mat_id, + raw_hypertable_id AS raw_id, + schema_name AS mat_schema, + table_name AS mat_name, + format('%I.%I', schema_name, table_name) AS mat_table +FROM _timescaledb_catalog.continuous_agg ca, _timescaledb_catalog.hypertable h +WHERE user_view_name='mat_m1' +AND h.id = ca.mat_hypertable_id \gset +BEGIN; +-- Query without join +SELECT m1.location, m1.timec, sumt, sumh +FROM mat_m1 m1 +ORDER BY m1.location COLLATE "C", m1.timec DESC +LIMIT 10; + location | timec | sumt | sumh +----------+------------------------------+------+------ + NYC | Sun Dec 02 16:00:00 2018 PST | 110 | 75 + NYC | Fri Nov 02 17:00:00 2018 PDT | | + NYC | Thu Nov 01 17:00:00 2018 PDT | 30 | 25 + NYC | Wed Oct 31 17:00:00 2018 PDT | 325 | 200 + NYC | Mon Jan 01 16:00:00 2018 PST | 65 | 45 + SFO | Sun Dec 02 16:00:00 2018 PST | 120 | 90 + SFO | Mon Jan 01 16:00:00 2018 PST | 65 | 45 + SFO | Sun Dec 31 16:00:00 2017 PST | 55 | 45 + por | Mon Jan 01 16:00:00 2018 PST | 100 | 100 +(9 rows) + +-- Query that joins two caggs. This should force the watermark cache +-- to reset when the materialized hypertable ID changes. A hash join +-- could potentially read all values from mat_m1 then all values from +-- mat_m2. This would be the optimal situation for cagg_watermark +-- caching. We want to avoid it in tests to see that caching doesn't +-- do anything wrong in worse situations (e.g., a nested loop join). +SET enable_hashjoin=false; +SELECT m1.location, m1.timec, sumt, sumh, firsth, lasth, maxtemp, mintemp +FROM mat_m1 m1 RIGHT JOIN mat_m2 m2 +ON (m1.location = m2.location +AND m1.timec = m2.timec) +ORDER BY m1.location COLLATE "C", m1.timec DESC +LIMIT 10; + location | timec | sumt | sumh | firsth | lasth | maxtemp | mintemp +----------+------------------------------+------+------+--------+-------+---------+--------- + NYC | Sun Dec 02 16:00:00 2018 PST | 110 | 75 | 45 | 30 | 65 | 45 + NYC | Fri Nov 02 17:00:00 2018 PDT | | | | | | + NYC | Thu Nov 01 17:00:00 2018 PDT | 30 | 25 | 10 | | 20 | 10 + NYC | Wed Oct 31 17:00:00 2018 PDT | 325 | 200 | 30 | 50 | 85 | 45 + NYC | Mon Jan 01 16:00:00 2018 PST | 65 | 45 | 45 | 45 | 65 | 65 + SFO | Sun Dec 02 16:00:00 2018 PST | 120 | 90 | 45 | 45 | 65 | 55 + SFO | Mon Jan 01 16:00:00 2018 PST | 65 | 45 | 45 | 45 | 65 | 65 + SFO | Sun Dec 31 16:00:00 2017 PST | 55 | 45 | 45 | 45 | 55 | 55 + por | Mon Jan 01 16:00:00 2018 PST | 100 | 100 | 100 | 100 | 100 | 100 +(9 rows) + +-- Show the current watermark +SELECT _timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(:mat_id)); + to_timestamp +------------------------------ + Sat Nov 03 17:00:00 2018 PDT +(1 row) + +-- The watermark should, in this case, be the same as the invalidation +-- threshold +SELECT _timescaledb_functions.to_timestamp(watermark) +FROM _timescaledb_catalog.continuous_aggs_invalidation_threshold +WHERE hypertable_id = :raw_id; + to_timestamp +------------------------------ + Sat Nov 03 17:00:00 2018 PDT +(1 row) + +-- The watermark is the end of materialization (end of last bucket) +-- while the MAX is the start of the last bucket +SELECT max(timec) FROM :mat_table; + max +------------------------------ + Fri Nov 02 17:00:00 2018 PDT +(1 row) + +-- Drop the most recent chunk +SELECT chunk_name, range_start, range_end +FROM timescaledb_information.chunks +WHERE hypertable_name = :'mat_name'; + chunk_name | range_start | range_end +------------------+------------------------------+------------------------------ + _hyper_2_3_chunk | Wed Nov 29 16:00:00 2017 PST | Wed Feb 07 16:00:00 2018 PST + _hyper_2_4_chunk | Wed Sep 05 17:00:00 2018 PDT | Wed Nov 14 16:00:00 2018 PST +(2 rows) + +SELECT drop_chunks('mat_m1', newer_than=>'2018-01-01'::timestamptz); + drop_chunks +---------------------------------------- + _timescaledb_internal._hyper_2_4_chunk +(1 row) + +SELECT chunk_name, range_start, range_end +FROM timescaledb_information.chunks +WHERE hypertable_name = :'mat_name'; + chunk_name | range_start | range_end +------------------+------------------------------+------------------------------ + _hyper_2_3_chunk | Wed Nov 29 16:00:00 2017 PST | Wed Feb 07 16:00:00 2018 PST +(1 row) + +-- The watermark should be updated to reflect the dropped data (i.e., +-- the cache should be reset) +SELECT _timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(:mat_id)); + to_timestamp +------------------------------ + Tue Jan 02 16:00:00 2018 PST +(1 row) + +-- Since we removed the last chunk, the invalidation threshold doesn't +-- move back, while the watermark does. +SELECT _timescaledb_functions.to_timestamp(watermark) +FROM _timescaledb_catalog.continuous_aggs_invalidation_threshold +WHERE hypertable_id = :raw_id; + to_timestamp +------------------------------ + Sat Nov 03 17:00:00 2018 PDT +(1 row) + +-- Compare the new watermark to the MAX time in the table +SELECT max(timec) FROM :mat_table; + max +------------------------------ + Mon Jan 01 16:00:00 2018 PST +(1 row) + +-- Try a subtransaction +SAVEPOINT clear_cagg; +SELECT m1.location, m1.timec, sumt, sumh, firsth, lasth, maxtemp, mintemp +FROM mat_m1 m1 RIGHT JOIN mat_m2 m2 +ON (m1.location = m2.location +AND m1.timec = m2.timec) +ORDER BY m1.location COLLATE "C", m1.timec DESC +LIMIT 10; + location | timec | sumt | sumh | firsth | lasth | maxtemp | mintemp +----------+------------------------------+------+------+--------+-------+---------+--------- + NYC | Sun Dec 02 16:00:00 2018 PST | 110 | 75 | 45 | 30 | 65 | 45 + NYC | Fri Nov 02 17:00:00 2018 PDT | | | | | | + NYC | Thu Nov 01 17:00:00 2018 PDT | 30 | 25 | 10 | | 20 | 10 + NYC | Wed Oct 31 17:00:00 2018 PDT | 325 | 200 | 30 | 50 | 85 | 45 + NYC | Mon Jan 01 16:00:00 2018 PST | 65 | 45 | 45 | 45 | 65 | 65 + SFO | Sun Dec 02 16:00:00 2018 PST | 120 | 90 | 45 | 45 | 65 | 55 + SFO | Mon Jan 01 16:00:00 2018 PST | 65 | 45 | 45 | 45 | 65 | 65 + SFO | Sun Dec 31 16:00:00 2017 PST | 55 | 45 | 45 | 45 | 55 | 55 + por | Mon Jan 01 16:00:00 2018 PST | 100 | 100 | 100 | 100 | 100 | 100 +(9 rows) + +ALTER MATERIALIZED VIEW mat_m1 SET (timescaledb.materialized_only=true); +SELECT m1.location, m1.timec, sumt, sumh, firsth, lasth, maxtemp, mintemp +FROM mat_m1 m1 RIGHT JOIN mat_m2 m2 +ON (m1.location = m2.location +AND m1.timec = m2.timec) +ORDER BY m1.location COLLATE "C" NULLS LAST, m1.timec DESC NULLS LAST, firsth NULLS LAST, + lasth NULLS LAST, mintemp NULLS LAST, maxtemp NULLS LAST +LIMIT 10; + location | timec | sumt | sumh | firsth | lasth | maxtemp | mintemp +----------+------------------------------+------+------+--------+-------+---------+--------- + NYC | Mon Jan 01 16:00:00 2018 PST | 65 | 45 | 45 | 45 | 65 | 65 + SFO | Mon Jan 01 16:00:00 2018 PST | 65 | 45 | 45 | 45 | 65 | 65 + SFO | Sun Dec 31 16:00:00 2017 PST | 55 | 45 | 45 | 45 | 55 | 55 + por | Mon Jan 01 16:00:00 2018 PST | 100 | 100 | 100 | 100 | 100 | 100 + | | | | 10 | | 20 | 10 + | | | | 30 | 50 | 85 | 45 + | | | | 45 | 30 | 65 | 45 + | | | | 45 | 45 | 65 | 55 + | | | | | | | +(9 rows) + +ROLLBACK; diff --git a/tsl/test/expected/cagg_repair.out b/tsl/test/expected/cagg_repair-13.out similarity index 100% rename from tsl/test/expected/cagg_repair.out rename to tsl/test/expected/cagg_repair-13.out diff --git a/tsl/test/expected/cagg_repair-14.out b/tsl/test/expected/cagg_repair-14.out new file mode 100644 index 00000000000..6143f9ef96d --- /dev/null +++ b/tsl/test/expected/cagg_repair-14.out @@ -0,0 +1,355 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\c :TEST_DBNAME :ROLE_SUPERUSER +CREATE PROCEDURE _timescaledb_internal.cagg_try_repair ( + cagg_view REGCLASS, + force_rebuild BOOLEAN +) AS :MODULE_PATHNAME, 'ts_cagg_try_repair' LANGUAGE C SET client_min_messages TO DEBUG1; +CREATE TABLE conditions ( + "time" TIMESTAMPTZ NOT NULL, + city TEXT NOT NULL, + temperature INTEGER NOT NULL, + device_id INTEGER NOT NULL +); +SELECT table_name FROM create_hypertable('conditions', 'time'); + table_name +------------ + conditions +(1 row) + +INSERT INTO + conditions ("time", city, temperature, device_id) +VALUES + ('2021-06-14 00:00:00-00', 'Moscow', 26,1), + ('2021-06-15 00:00:00-00', 'Berlin', 22,2), + ('2021-06-16 00:00:00-00', 'Stockholm', 24,3), + ('2021-06-17 00:00:00-00', 'London', 24,4), + ('2021-06-18 00:00:00-00', 'London', 27,4), + ('2021-06-19 00:00:00-00', 'Moscow', 28,4), + ('2021-06-20 00:00:00-00', 'Moscow', 30,1), + ('2021-06-21 00:00:00-00', 'Berlin', 31,1), + ('2021-06-22 00:00:00-00', 'Stockholm', 34,1), + ('2021-06-23 00:00:00-00', 'Stockholm', 34,2), + ('2021-06-24 00:00:00-00', 'Moscow', 34,2), + ('2021-06-25 00:00:00-00', 'London', 32,3), + ('2021-06-26 00:00:00-00', 'Moscow', 32,3), + ('2021-06-27 00:00:00-00', 'Moscow', 31,3); +CREATE TABLE devices ( + id INTEGER NOT NULL, + name TEXT, + location TEXT +); +INSERT INTO + devices (id, name, location) +VALUES + (1, 'thermo_1', 'Moscow'), + (2, 'thermo_2', 'Berlin'), + (3, 'thermo_3', 'London'), + (4, 'thermo_4', 'Stockholm'); +CREATE MATERIALIZED VIEW conditions_summary +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket(INTERVAL '1 week', "time") AS bucket, + devices.name AS device_name, + MIN(temperature), + MAX(temperature), + SUM(temperature) +FROM + conditions + JOIN devices ON devices.id = conditions.device_id +GROUP BY + 1, 2 +WITH NO DATA; +\d+ conditions_summary + View "public.conditions_summary" + Column | Type | Collation | Nullable | Default | Storage | Description +-------------+--------------------------+-----------+----------+---------+----------+------------- + bucket | timestamp with time zone | | | | plain | + device_name | text | | | | extended | + min | integer | | | | plain | + max | integer | | | | plain | + sum | bigint | | | | plain | +View definition: + SELECT _materialized_hypertable_2.bucket, + _materialized_hypertable_2.device_name, + _materialized_hypertable_2.min, + _materialized_hypertable_2.max, + _materialized_hypertable_2.sum + FROM _timescaledb_internal._materialized_hypertable_2 _materialized_hypertable_2; + +CALL refresh_continuous_aggregate('conditions_summary', NULL, '2021-06-22 00:00:00-00'); +SELECT * FROM conditions_summary ORDER BY bucket, device_name; + bucket | device_name | min | max | sum +------------------------------+-------------+-----+-----+----- + Sun Jun 13 17:00:00 2021 PDT | thermo_1 | 26 | 30 | 56 + Sun Jun 13 17:00:00 2021 PDT | thermo_2 | 22 | 22 | 22 + Sun Jun 13 17:00:00 2021 PDT | thermo_3 | 24 | 24 | 24 + Sun Jun 13 17:00:00 2021 PDT | thermo_4 | 24 | 28 | 79 +(4 rows) + +-- Execute repair for materialized only cagg +CALL _timescaledb_internal.cagg_try_repair('conditions_summary', FALSE); +DEBUG: [cagg_rebuild_view_definition] public.conditions_summary does not have partials, do not check for defects! +\d+ conditions_summary + View "public.conditions_summary" + Column | Type | Collation | Nullable | Default | Storage | Description +-------------+--------------------------+-----------+----------+---------+----------+------------- + bucket | timestamp with time zone | | | | plain | + device_name | text | | | | extended | + min | integer | | | | plain | + max | integer | | | | plain | + sum | bigint | | | | plain | +View definition: + SELECT _materialized_hypertable_2.bucket, + _materialized_hypertable_2.device_name, + _materialized_hypertable_2.min, + _materialized_hypertable_2.max, + _materialized_hypertable_2.sum + FROM _timescaledb_internal._materialized_hypertable_2 _materialized_hypertable_2; + +SELECT * FROM conditions_summary ORDER BY bucket, device_name; + bucket | device_name | min | max | sum +------------------------------+-------------+-----+-----+----- + Sun Jun 13 17:00:00 2021 PDT | thermo_1 | 26 | 30 | 56 + Sun Jun 13 17:00:00 2021 PDT | thermo_2 | 22 | 22 | 22 + Sun Jun 13 17:00:00 2021 PDT | thermo_3 | 24 | 24 | 24 + Sun Jun 13 17:00:00 2021 PDT | thermo_4 | 24 | 28 | 79 +(4 rows) + +CALL _timescaledb_internal.cagg_try_repair('conditions_summary', TRUE); +DEBUG: [cagg_rebuild_view_definition] public.conditions_summary has been rebuilt! +\d+ conditions_summary + View "public.conditions_summary" + Column | Type | Collation | Nullable | Default | Storage | Description +-------------+--------------------------+-----------+----------+---------+----------+------------- + bucket | timestamp with time zone | | | | plain | + device_name | text | | | | extended | + min | integer | | | | plain | + max | integer | | | | plain | + sum | bigint | | | | plain | +View definition: + SELECT _materialized_hypertable_2.bucket, + _materialized_hypertable_2.device_name, + _materialized_hypertable_2.min, + _materialized_hypertable_2.max, + _materialized_hypertable_2.sum + FROM _timescaledb_internal._materialized_hypertable_2 _materialized_hypertable_2; + +SELECT * FROM conditions_summary ORDER BY bucket, device_name; + bucket | device_name | min | max | sum +------------------------------+-------------+-----+-----+----- + Sun Jun 13 17:00:00 2021 PDT | thermo_1 | 26 | 30 | 56 + Sun Jun 13 17:00:00 2021 PDT | thermo_2 | 22 | 22 | 22 + Sun Jun 13 17:00:00 2021 PDT | thermo_3 | 24 | 24 | 24 + Sun Jun 13 17:00:00 2021 PDT | thermo_4 | 24 | 28 | 79 +(4 rows) + +-- Switch to realtime cagg +ALTER MATERIALIZED VIEW conditions_summary SET (timescaledb.materialized_only=false); +\d+ conditions_summary + View "public.conditions_summary" + Column | Type | Collation | Nullable | Default | Storage | Description +-------------+--------------------------+-----------+----------+---------+----------+------------- + bucket | timestamp with time zone | | | | plain | + device_name | text | | | | extended | + min | integer | | | | plain | + max | integer | | | | plain | + sum | bigint | | | | plain | +View definition: + SELECT _materialized_hypertable_2.bucket, + _materialized_hypertable_2.device_name, + _materialized_hypertable_2.min, + _materialized_hypertable_2.max, + _materialized_hypertable_2.sum + FROM _timescaledb_internal._materialized_hypertable_2 _materialized_hypertable_2 + WHERE _materialized_hypertable_2.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(2)), '-infinity'::timestamp with time zone) +UNION ALL + SELECT time_bucket('@ 7 days'::interval, conditions."time") AS bucket, + devices.name AS device_name, + min(conditions.temperature) AS min, + max(conditions.temperature) AS max, + sum(conditions.temperature) AS sum + FROM conditions + JOIN devices ON devices.id = conditions.device_id + WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(2)), '-infinity'::timestamp with time zone) + GROUP BY (time_bucket('@ 7 days'::interval, conditions."time")), devices.name; + +SELECT * FROM conditions_summary ORDER BY bucket, device_name; + bucket | device_name | min | max | sum +------------------------------+-------------+-----+-----+----- + Sun Jun 13 17:00:00 2021 PDT | thermo_1 | 26 | 30 | 56 + Sun Jun 13 17:00:00 2021 PDT | thermo_2 | 22 | 22 | 22 + Sun Jun 13 17:00:00 2021 PDT | thermo_3 | 24 | 24 | 24 + Sun Jun 13 17:00:00 2021 PDT | thermo_4 | 24 | 28 | 79 + Sun Jun 20 17:00:00 2021 PDT | thermo_1 | 31 | 34 | 65 + Sun Jun 20 17:00:00 2021 PDT | thermo_2 | 34 | 34 | 68 + Sun Jun 20 17:00:00 2021 PDT | thermo_3 | 31 | 32 | 95 +(7 rows) + +-- Execute repair for realtime cagg +CALL _timescaledb_internal.cagg_try_repair('conditions_summary', FALSE); +DEBUG: [cagg_rebuild_view_definition] public.conditions_summary does not have partials, do not check for defects! +\d+ conditions_summary + View "public.conditions_summary" + Column | Type | Collation | Nullable | Default | Storage | Description +-------------+--------------------------+-----------+----------+---------+----------+------------- + bucket | timestamp with time zone | | | | plain | + device_name | text | | | | extended | + min | integer | | | | plain | + max | integer | | | | plain | + sum | bigint | | | | plain | +View definition: + SELECT _materialized_hypertable_2.bucket, + _materialized_hypertable_2.device_name, + _materialized_hypertable_2.min, + _materialized_hypertable_2.max, + _materialized_hypertable_2.sum + FROM _timescaledb_internal._materialized_hypertable_2 _materialized_hypertable_2 + WHERE _materialized_hypertable_2.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(2)), '-infinity'::timestamp with time zone) +UNION ALL + SELECT time_bucket('@ 7 days'::interval, conditions."time") AS bucket, + devices.name AS device_name, + min(conditions.temperature) AS min, + max(conditions.temperature) AS max, + sum(conditions.temperature) AS sum + FROM conditions + JOIN devices ON devices.id = conditions.device_id + WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(2)), '-infinity'::timestamp with time zone) + GROUP BY (time_bucket('@ 7 days'::interval, conditions."time")), devices.name; + +SELECT * FROM conditions_summary ORDER BY bucket, device_name; + bucket | device_name | min | max | sum +------------------------------+-------------+-----+-----+----- + Sun Jun 13 17:00:00 2021 PDT | thermo_1 | 26 | 30 | 56 + Sun Jun 13 17:00:00 2021 PDT | thermo_2 | 22 | 22 | 22 + Sun Jun 13 17:00:00 2021 PDT | thermo_3 | 24 | 24 | 24 + Sun Jun 13 17:00:00 2021 PDT | thermo_4 | 24 | 28 | 79 + Sun Jun 20 17:00:00 2021 PDT | thermo_1 | 31 | 34 | 65 + Sun Jun 20 17:00:00 2021 PDT | thermo_2 | 34 | 34 | 68 + Sun Jun 20 17:00:00 2021 PDT | thermo_3 | 31 | 32 | 95 +(7 rows) + +CALL _timescaledb_internal.cagg_try_repair('conditions_summary', TRUE); +DEBUG: [cagg_rebuild_view_definition] public.conditions_summary has been rebuilt! +\d+ conditions_summary + View "public.conditions_summary" + Column | Type | Collation | Nullable | Default | Storage | Description +-------------+--------------------------+-----------+----------+---------+----------+------------- + bucket | timestamp with time zone | | | | plain | + device_name | text | | | | extended | + min | integer | | | | plain | + max | integer | | | | plain | + sum | bigint | | | | plain | +View definition: + SELECT _materialized_hypertable_2.bucket, + _materialized_hypertable_2.device_name, + _materialized_hypertable_2.min, + _materialized_hypertable_2.max, + _materialized_hypertable_2.sum + FROM _timescaledb_internal._materialized_hypertable_2 _materialized_hypertable_2 + WHERE _materialized_hypertable_2.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(2)), '-infinity'::timestamp with time zone) +UNION ALL + SELECT time_bucket('@ 7 days'::interval, conditions."time") AS bucket, + devices.name AS device_name, + min(conditions.temperature) AS min, + max(conditions.temperature) AS max, + sum(conditions.temperature) AS sum + FROM conditions + JOIN devices ON devices.id = conditions.device_id + WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(2)), '-infinity'::timestamp with time zone) + GROUP BY (time_bucket('@ 7 days'::interval, conditions."time")), devices.name; + +SELECT * FROM conditions_summary ORDER BY bucket, device_name; + bucket | device_name | min | max | sum +------------------------------+-------------+-----+-----+----- + Sun Jun 13 17:00:00 2021 PDT | thermo_1 | 26 | 30 | 56 + Sun Jun 13 17:00:00 2021 PDT | thermo_2 | 22 | 22 | 22 + Sun Jun 13 17:00:00 2021 PDT | thermo_3 | 24 | 24 | 24 + Sun Jun 13 17:00:00 2021 PDT | thermo_4 | 24 | 28 | 79 + Sun Jun 20 17:00:00 2021 PDT | thermo_1 | 31 | 34 | 65 + Sun Jun 20 17:00:00 2021 PDT | thermo_2 | 34 | 34 | 68 + Sun Jun 20 17:00:00 2021 PDT | thermo_3 | 31 | 32 | 95 +(7 rows) + +-- Tests without join +CREATE MATERIALIZED VIEW conditions_summary_nojoin +WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT + time_bucket(INTERVAL '1 week', "time") AS bucket, + MIN(temperature), + MAX(temperature), + SUM(temperature) +FROM + conditions +GROUP BY + 1 +WITH NO DATA; +CALL _timescaledb_internal.cagg_try_repair('conditions_summary_nojoin', TRUE); +DEBUG: [cagg_rebuild_view_definition] public.conditions_summary_nojoin does not have JOINS, so no need to rebuild the definition! +\d+ conditions_summary_nojoin + View "public.conditions_summary_nojoin" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+--------------------------+-----------+----------+---------+---------+------------- + bucket | timestamp with time zone | | | | plain | + min | integer | | | | plain | + max | integer | | | | plain | + sum | bigint | | | | plain | +View definition: + SELECT _materialized_hypertable_3.bucket, + _materialized_hypertable_3.min, + _materialized_hypertable_3.max, + _materialized_hypertable_3.sum + FROM _timescaledb_internal._materialized_hypertable_3 + WHERE _materialized_hypertable_3.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(3)), '-infinity'::timestamp with time zone) +UNION ALL + SELECT time_bucket('@ 7 days'::interval, conditions."time") AS bucket, + min(conditions.temperature) AS min, + max(conditions.temperature) AS max, + sum(conditions.temperature) AS sum + FROM conditions + WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(3)), '-infinity'::timestamp with time zone) + GROUP BY (time_bucket('@ 7 days'::interval, conditions."time")); + +-- Tests with old cagg format +CREATE MATERIALIZED VIEW conditions_summary_old_format +WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) AS +SELECT + time_bucket(INTERVAL '1 week', "time") AS bucket, + MIN(temperature), + MAX(temperature), + SUM(temperature) +FROM + conditions +GROUP BY + 1 +WITH NO DATA; +-- Should rebuild without forcing +CALL _timescaledb_internal.cagg_try_repair('conditions_summary_old_format', FALSE); +DEBUG: [cagg_rebuild_view_definition] public.conditions_summary_old_format has been rebuilt! +\d+ conditions_summary_old_format + View "public.conditions_summary_old_format" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+--------------------------+-----------+----------+---------+---------+------------- + bucket | timestamp with time zone | | | | plain | + min | integer | | | | plain | + max | integer | | | | plain | + sum | bigint | | | | plain | +View definition: + SELECT _materialized_hypertable_4.bucket, + _timescaledb_functions.finalize_agg('pg_catalog.min(integer)'::text, NULL::name, NULL::name, '{{pg_catalog,int4}}'::name[], _materialized_hypertable_4.agg_2_2, NULL::integer) AS min, + _timescaledb_functions.finalize_agg('pg_catalog.max(integer)'::text, NULL::name, NULL::name, '{{pg_catalog,int4}}'::name[], _materialized_hypertable_4.agg_3_3, NULL::integer) AS max, + _timescaledb_functions.finalize_agg('pg_catalog.sum(integer)'::text, NULL::name, NULL::name, '{{pg_catalog,int4}}'::name[], _materialized_hypertable_4.agg_4_4, NULL::bigint) AS sum + FROM _timescaledb_internal._materialized_hypertable_4 + WHERE _materialized_hypertable_4.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(4)), '-infinity'::timestamp with time zone) + GROUP BY _materialized_hypertable_4.bucket +UNION ALL + SELECT time_bucket('@ 7 days'::interval, conditions."time") AS bucket, + min(conditions.temperature) AS min, + max(conditions.temperature) AS max, + sum(conditions.temperature) AS sum + FROM conditions + WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(4)), '-infinity'::timestamp with time zone) + GROUP BY (time_bucket('@ 7 days'::interval, conditions."time")); + +DROP PROCEDURE _timescaledb_internal.cagg_try_repair (REGCLASS, BOOLEAN); diff --git a/tsl/test/expected/cagg_repair-15.out b/tsl/test/expected/cagg_repair-15.out new file mode 100644 index 00000000000..6143f9ef96d --- /dev/null +++ b/tsl/test/expected/cagg_repair-15.out @@ -0,0 +1,355 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\c :TEST_DBNAME :ROLE_SUPERUSER +CREATE PROCEDURE _timescaledb_internal.cagg_try_repair ( + cagg_view REGCLASS, + force_rebuild BOOLEAN +) AS :MODULE_PATHNAME, 'ts_cagg_try_repair' LANGUAGE C SET client_min_messages TO DEBUG1; +CREATE TABLE conditions ( + "time" TIMESTAMPTZ NOT NULL, + city TEXT NOT NULL, + temperature INTEGER NOT NULL, + device_id INTEGER NOT NULL +); +SELECT table_name FROM create_hypertable('conditions', 'time'); + table_name +------------ + conditions +(1 row) + +INSERT INTO + conditions ("time", city, temperature, device_id) +VALUES + ('2021-06-14 00:00:00-00', 'Moscow', 26,1), + ('2021-06-15 00:00:00-00', 'Berlin', 22,2), + ('2021-06-16 00:00:00-00', 'Stockholm', 24,3), + ('2021-06-17 00:00:00-00', 'London', 24,4), + ('2021-06-18 00:00:00-00', 'London', 27,4), + ('2021-06-19 00:00:00-00', 'Moscow', 28,4), + ('2021-06-20 00:00:00-00', 'Moscow', 30,1), + ('2021-06-21 00:00:00-00', 'Berlin', 31,1), + ('2021-06-22 00:00:00-00', 'Stockholm', 34,1), + ('2021-06-23 00:00:00-00', 'Stockholm', 34,2), + ('2021-06-24 00:00:00-00', 'Moscow', 34,2), + ('2021-06-25 00:00:00-00', 'London', 32,3), + ('2021-06-26 00:00:00-00', 'Moscow', 32,3), + ('2021-06-27 00:00:00-00', 'Moscow', 31,3); +CREATE TABLE devices ( + id INTEGER NOT NULL, + name TEXT, + location TEXT +); +INSERT INTO + devices (id, name, location) +VALUES + (1, 'thermo_1', 'Moscow'), + (2, 'thermo_2', 'Berlin'), + (3, 'thermo_3', 'London'), + (4, 'thermo_4', 'Stockholm'); +CREATE MATERIALIZED VIEW conditions_summary +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket(INTERVAL '1 week', "time") AS bucket, + devices.name AS device_name, + MIN(temperature), + MAX(temperature), + SUM(temperature) +FROM + conditions + JOIN devices ON devices.id = conditions.device_id +GROUP BY + 1, 2 +WITH NO DATA; +\d+ conditions_summary + View "public.conditions_summary" + Column | Type | Collation | Nullable | Default | Storage | Description +-------------+--------------------------+-----------+----------+---------+----------+------------- + bucket | timestamp with time zone | | | | plain | + device_name | text | | | | extended | + min | integer | | | | plain | + max | integer | | | | plain | + sum | bigint | | | | plain | +View definition: + SELECT _materialized_hypertable_2.bucket, + _materialized_hypertable_2.device_name, + _materialized_hypertable_2.min, + _materialized_hypertable_2.max, + _materialized_hypertable_2.sum + FROM _timescaledb_internal._materialized_hypertable_2 _materialized_hypertable_2; + +CALL refresh_continuous_aggregate('conditions_summary', NULL, '2021-06-22 00:00:00-00'); +SELECT * FROM conditions_summary ORDER BY bucket, device_name; + bucket | device_name | min | max | sum +------------------------------+-------------+-----+-----+----- + Sun Jun 13 17:00:00 2021 PDT | thermo_1 | 26 | 30 | 56 + Sun Jun 13 17:00:00 2021 PDT | thermo_2 | 22 | 22 | 22 + Sun Jun 13 17:00:00 2021 PDT | thermo_3 | 24 | 24 | 24 + Sun Jun 13 17:00:00 2021 PDT | thermo_4 | 24 | 28 | 79 +(4 rows) + +-- Execute repair for materialized only cagg +CALL _timescaledb_internal.cagg_try_repair('conditions_summary', FALSE); +DEBUG: [cagg_rebuild_view_definition] public.conditions_summary does not have partials, do not check for defects! +\d+ conditions_summary + View "public.conditions_summary" + Column | Type | Collation | Nullable | Default | Storage | Description +-------------+--------------------------+-----------+----------+---------+----------+------------- + bucket | timestamp with time zone | | | | plain | + device_name | text | | | | extended | + min | integer | | | | plain | + max | integer | | | | plain | + sum | bigint | | | | plain | +View definition: + SELECT _materialized_hypertable_2.bucket, + _materialized_hypertable_2.device_name, + _materialized_hypertable_2.min, + _materialized_hypertable_2.max, + _materialized_hypertable_2.sum + FROM _timescaledb_internal._materialized_hypertable_2 _materialized_hypertable_2; + +SELECT * FROM conditions_summary ORDER BY bucket, device_name; + bucket | device_name | min | max | sum +------------------------------+-------------+-----+-----+----- + Sun Jun 13 17:00:00 2021 PDT | thermo_1 | 26 | 30 | 56 + Sun Jun 13 17:00:00 2021 PDT | thermo_2 | 22 | 22 | 22 + Sun Jun 13 17:00:00 2021 PDT | thermo_3 | 24 | 24 | 24 + Sun Jun 13 17:00:00 2021 PDT | thermo_4 | 24 | 28 | 79 +(4 rows) + +CALL _timescaledb_internal.cagg_try_repair('conditions_summary', TRUE); +DEBUG: [cagg_rebuild_view_definition] public.conditions_summary has been rebuilt! +\d+ conditions_summary + View "public.conditions_summary" + Column | Type | Collation | Nullable | Default | Storage | Description +-------------+--------------------------+-----------+----------+---------+----------+------------- + bucket | timestamp with time zone | | | | plain | + device_name | text | | | | extended | + min | integer | | | | plain | + max | integer | | | | plain | + sum | bigint | | | | plain | +View definition: + SELECT _materialized_hypertable_2.bucket, + _materialized_hypertable_2.device_name, + _materialized_hypertable_2.min, + _materialized_hypertable_2.max, + _materialized_hypertable_2.sum + FROM _timescaledb_internal._materialized_hypertable_2 _materialized_hypertable_2; + +SELECT * FROM conditions_summary ORDER BY bucket, device_name; + bucket | device_name | min | max | sum +------------------------------+-------------+-----+-----+----- + Sun Jun 13 17:00:00 2021 PDT | thermo_1 | 26 | 30 | 56 + Sun Jun 13 17:00:00 2021 PDT | thermo_2 | 22 | 22 | 22 + Sun Jun 13 17:00:00 2021 PDT | thermo_3 | 24 | 24 | 24 + Sun Jun 13 17:00:00 2021 PDT | thermo_4 | 24 | 28 | 79 +(4 rows) + +-- Switch to realtime cagg +ALTER MATERIALIZED VIEW conditions_summary SET (timescaledb.materialized_only=false); +\d+ conditions_summary + View "public.conditions_summary" + Column | Type | Collation | Nullable | Default | Storage | Description +-------------+--------------------------+-----------+----------+---------+----------+------------- + bucket | timestamp with time zone | | | | plain | + device_name | text | | | | extended | + min | integer | | | | plain | + max | integer | | | | plain | + sum | bigint | | | | plain | +View definition: + SELECT _materialized_hypertable_2.bucket, + _materialized_hypertable_2.device_name, + _materialized_hypertable_2.min, + _materialized_hypertable_2.max, + _materialized_hypertable_2.sum + FROM _timescaledb_internal._materialized_hypertable_2 _materialized_hypertable_2 + WHERE _materialized_hypertable_2.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(2)), '-infinity'::timestamp with time zone) +UNION ALL + SELECT time_bucket('@ 7 days'::interval, conditions."time") AS bucket, + devices.name AS device_name, + min(conditions.temperature) AS min, + max(conditions.temperature) AS max, + sum(conditions.temperature) AS sum + FROM conditions + JOIN devices ON devices.id = conditions.device_id + WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(2)), '-infinity'::timestamp with time zone) + GROUP BY (time_bucket('@ 7 days'::interval, conditions."time")), devices.name; + +SELECT * FROM conditions_summary ORDER BY bucket, device_name; + bucket | device_name | min | max | sum +------------------------------+-------------+-----+-----+----- + Sun Jun 13 17:00:00 2021 PDT | thermo_1 | 26 | 30 | 56 + Sun Jun 13 17:00:00 2021 PDT | thermo_2 | 22 | 22 | 22 + Sun Jun 13 17:00:00 2021 PDT | thermo_3 | 24 | 24 | 24 + Sun Jun 13 17:00:00 2021 PDT | thermo_4 | 24 | 28 | 79 + Sun Jun 20 17:00:00 2021 PDT | thermo_1 | 31 | 34 | 65 + Sun Jun 20 17:00:00 2021 PDT | thermo_2 | 34 | 34 | 68 + Sun Jun 20 17:00:00 2021 PDT | thermo_3 | 31 | 32 | 95 +(7 rows) + +-- Execute repair for realtime cagg +CALL _timescaledb_internal.cagg_try_repair('conditions_summary', FALSE); +DEBUG: [cagg_rebuild_view_definition] public.conditions_summary does not have partials, do not check for defects! +\d+ conditions_summary + View "public.conditions_summary" + Column | Type | Collation | Nullable | Default | Storage | Description +-------------+--------------------------+-----------+----------+---------+----------+------------- + bucket | timestamp with time zone | | | | plain | + device_name | text | | | | extended | + min | integer | | | | plain | + max | integer | | | | plain | + sum | bigint | | | | plain | +View definition: + SELECT _materialized_hypertable_2.bucket, + _materialized_hypertable_2.device_name, + _materialized_hypertable_2.min, + _materialized_hypertable_2.max, + _materialized_hypertable_2.sum + FROM _timescaledb_internal._materialized_hypertable_2 _materialized_hypertable_2 + WHERE _materialized_hypertable_2.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(2)), '-infinity'::timestamp with time zone) +UNION ALL + SELECT time_bucket('@ 7 days'::interval, conditions."time") AS bucket, + devices.name AS device_name, + min(conditions.temperature) AS min, + max(conditions.temperature) AS max, + sum(conditions.temperature) AS sum + FROM conditions + JOIN devices ON devices.id = conditions.device_id + WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(2)), '-infinity'::timestamp with time zone) + GROUP BY (time_bucket('@ 7 days'::interval, conditions."time")), devices.name; + +SELECT * FROM conditions_summary ORDER BY bucket, device_name; + bucket | device_name | min | max | sum +------------------------------+-------------+-----+-----+----- + Sun Jun 13 17:00:00 2021 PDT | thermo_1 | 26 | 30 | 56 + Sun Jun 13 17:00:00 2021 PDT | thermo_2 | 22 | 22 | 22 + Sun Jun 13 17:00:00 2021 PDT | thermo_3 | 24 | 24 | 24 + Sun Jun 13 17:00:00 2021 PDT | thermo_4 | 24 | 28 | 79 + Sun Jun 20 17:00:00 2021 PDT | thermo_1 | 31 | 34 | 65 + Sun Jun 20 17:00:00 2021 PDT | thermo_2 | 34 | 34 | 68 + Sun Jun 20 17:00:00 2021 PDT | thermo_3 | 31 | 32 | 95 +(7 rows) + +CALL _timescaledb_internal.cagg_try_repair('conditions_summary', TRUE); +DEBUG: [cagg_rebuild_view_definition] public.conditions_summary has been rebuilt! +\d+ conditions_summary + View "public.conditions_summary" + Column | Type | Collation | Nullable | Default | Storage | Description +-------------+--------------------------+-----------+----------+---------+----------+------------- + bucket | timestamp with time zone | | | | plain | + device_name | text | | | | extended | + min | integer | | | | plain | + max | integer | | | | plain | + sum | bigint | | | | plain | +View definition: + SELECT _materialized_hypertable_2.bucket, + _materialized_hypertable_2.device_name, + _materialized_hypertable_2.min, + _materialized_hypertable_2.max, + _materialized_hypertable_2.sum + FROM _timescaledb_internal._materialized_hypertable_2 _materialized_hypertable_2 + WHERE _materialized_hypertable_2.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(2)), '-infinity'::timestamp with time zone) +UNION ALL + SELECT time_bucket('@ 7 days'::interval, conditions."time") AS bucket, + devices.name AS device_name, + min(conditions.temperature) AS min, + max(conditions.temperature) AS max, + sum(conditions.temperature) AS sum + FROM conditions + JOIN devices ON devices.id = conditions.device_id + WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(2)), '-infinity'::timestamp with time zone) + GROUP BY (time_bucket('@ 7 days'::interval, conditions."time")), devices.name; + +SELECT * FROM conditions_summary ORDER BY bucket, device_name; + bucket | device_name | min | max | sum +------------------------------+-------------+-----+-----+----- + Sun Jun 13 17:00:00 2021 PDT | thermo_1 | 26 | 30 | 56 + Sun Jun 13 17:00:00 2021 PDT | thermo_2 | 22 | 22 | 22 + Sun Jun 13 17:00:00 2021 PDT | thermo_3 | 24 | 24 | 24 + Sun Jun 13 17:00:00 2021 PDT | thermo_4 | 24 | 28 | 79 + Sun Jun 20 17:00:00 2021 PDT | thermo_1 | 31 | 34 | 65 + Sun Jun 20 17:00:00 2021 PDT | thermo_2 | 34 | 34 | 68 + Sun Jun 20 17:00:00 2021 PDT | thermo_3 | 31 | 32 | 95 +(7 rows) + +-- Tests without join +CREATE MATERIALIZED VIEW conditions_summary_nojoin +WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT + time_bucket(INTERVAL '1 week', "time") AS bucket, + MIN(temperature), + MAX(temperature), + SUM(temperature) +FROM + conditions +GROUP BY + 1 +WITH NO DATA; +CALL _timescaledb_internal.cagg_try_repair('conditions_summary_nojoin', TRUE); +DEBUG: [cagg_rebuild_view_definition] public.conditions_summary_nojoin does not have JOINS, so no need to rebuild the definition! +\d+ conditions_summary_nojoin + View "public.conditions_summary_nojoin" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+--------------------------+-----------+----------+---------+---------+------------- + bucket | timestamp with time zone | | | | plain | + min | integer | | | | plain | + max | integer | | | | plain | + sum | bigint | | | | plain | +View definition: + SELECT _materialized_hypertable_3.bucket, + _materialized_hypertable_3.min, + _materialized_hypertable_3.max, + _materialized_hypertable_3.sum + FROM _timescaledb_internal._materialized_hypertable_3 + WHERE _materialized_hypertable_3.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(3)), '-infinity'::timestamp with time zone) +UNION ALL + SELECT time_bucket('@ 7 days'::interval, conditions."time") AS bucket, + min(conditions.temperature) AS min, + max(conditions.temperature) AS max, + sum(conditions.temperature) AS sum + FROM conditions + WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(3)), '-infinity'::timestamp with time zone) + GROUP BY (time_bucket('@ 7 days'::interval, conditions."time")); + +-- Tests with old cagg format +CREATE MATERIALIZED VIEW conditions_summary_old_format +WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) AS +SELECT + time_bucket(INTERVAL '1 week', "time") AS bucket, + MIN(temperature), + MAX(temperature), + SUM(temperature) +FROM + conditions +GROUP BY + 1 +WITH NO DATA; +-- Should rebuild without forcing +CALL _timescaledb_internal.cagg_try_repair('conditions_summary_old_format', FALSE); +DEBUG: [cagg_rebuild_view_definition] public.conditions_summary_old_format has been rebuilt! +\d+ conditions_summary_old_format + View "public.conditions_summary_old_format" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+--------------------------+-----------+----------+---------+---------+------------- + bucket | timestamp with time zone | | | | plain | + min | integer | | | | plain | + max | integer | | | | plain | + sum | bigint | | | | plain | +View definition: + SELECT _materialized_hypertable_4.bucket, + _timescaledb_functions.finalize_agg('pg_catalog.min(integer)'::text, NULL::name, NULL::name, '{{pg_catalog,int4}}'::name[], _materialized_hypertable_4.agg_2_2, NULL::integer) AS min, + _timescaledb_functions.finalize_agg('pg_catalog.max(integer)'::text, NULL::name, NULL::name, '{{pg_catalog,int4}}'::name[], _materialized_hypertable_4.agg_3_3, NULL::integer) AS max, + _timescaledb_functions.finalize_agg('pg_catalog.sum(integer)'::text, NULL::name, NULL::name, '{{pg_catalog,int4}}'::name[], _materialized_hypertable_4.agg_4_4, NULL::bigint) AS sum + FROM _timescaledb_internal._materialized_hypertable_4 + WHERE _materialized_hypertable_4.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(4)), '-infinity'::timestamp with time zone) + GROUP BY _materialized_hypertable_4.bucket +UNION ALL + SELECT time_bucket('@ 7 days'::interval, conditions."time") AS bucket, + min(conditions.temperature) AS min, + max(conditions.temperature) AS max, + sum(conditions.temperature) AS sum + FROM conditions + WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(4)), '-infinity'::timestamp with time zone) + GROUP BY (time_bucket('@ 7 days'::interval, conditions."time")); + +DROP PROCEDURE _timescaledb_internal.cagg_try_repair (REGCLASS, BOOLEAN); diff --git a/tsl/test/expected/cagg_repair-16.out b/tsl/test/expected/cagg_repair-16.out new file mode 100644 index 00000000000..bd9c628773b --- /dev/null +++ b/tsl/test/expected/cagg_repair-16.out @@ -0,0 +1,355 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\c :TEST_DBNAME :ROLE_SUPERUSER +CREATE PROCEDURE _timescaledb_internal.cagg_try_repair ( + cagg_view REGCLASS, + force_rebuild BOOLEAN +) AS :MODULE_PATHNAME, 'ts_cagg_try_repair' LANGUAGE C SET client_min_messages TO DEBUG1; +CREATE TABLE conditions ( + "time" TIMESTAMPTZ NOT NULL, + city TEXT NOT NULL, + temperature INTEGER NOT NULL, + device_id INTEGER NOT NULL +); +SELECT table_name FROM create_hypertable('conditions', 'time'); + table_name +------------ + conditions +(1 row) + +INSERT INTO + conditions ("time", city, temperature, device_id) +VALUES + ('2021-06-14 00:00:00-00', 'Moscow', 26,1), + ('2021-06-15 00:00:00-00', 'Berlin', 22,2), + ('2021-06-16 00:00:00-00', 'Stockholm', 24,3), + ('2021-06-17 00:00:00-00', 'London', 24,4), + ('2021-06-18 00:00:00-00', 'London', 27,4), + ('2021-06-19 00:00:00-00', 'Moscow', 28,4), + ('2021-06-20 00:00:00-00', 'Moscow', 30,1), + ('2021-06-21 00:00:00-00', 'Berlin', 31,1), + ('2021-06-22 00:00:00-00', 'Stockholm', 34,1), + ('2021-06-23 00:00:00-00', 'Stockholm', 34,2), + ('2021-06-24 00:00:00-00', 'Moscow', 34,2), + ('2021-06-25 00:00:00-00', 'London', 32,3), + ('2021-06-26 00:00:00-00', 'Moscow', 32,3), + ('2021-06-27 00:00:00-00', 'Moscow', 31,3); +CREATE TABLE devices ( + id INTEGER NOT NULL, + name TEXT, + location TEXT +); +INSERT INTO + devices (id, name, location) +VALUES + (1, 'thermo_1', 'Moscow'), + (2, 'thermo_2', 'Berlin'), + (3, 'thermo_3', 'London'), + (4, 'thermo_4', 'Stockholm'); +CREATE MATERIALIZED VIEW conditions_summary +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket(INTERVAL '1 week', "time") AS bucket, + devices.name AS device_name, + MIN(temperature), + MAX(temperature), + SUM(temperature) +FROM + conditions + JOIN devices ON devices.id = conditions.device_id +GROUP BY + 1, 2 +WITH NO DATA; +\d+ conditions_summary + View "public.conditions_summary" + Column | Type | Collation | Nullable | Default | Storage | Description +-------------+--------------------------+-----------+----------+---------+----------+------------- + bucket | timestamp with time zone | | | | plain | + device_name | text | | | | extended | + min | integer | | | | plain | + max | integer | | | | plain | + sum | bigint | | | | plain | +View definition: + SELECT bucket, + device_name, + min, + max, + sum + FROM _timescaledb_internal._materialized_hypertable_2 _materialized_hypertable_2; + +CALL refresh_continuous_aggregate('conditions_summary', NULL, '2021-06-22 00:00:00-00'); +SELECT * FROM conditions_summary ORDER BY bucket, device_name; + bucket | device_name | min | max | sum +------------------------------+-------------+-----+-----+----- + Sun Jun 13 17:00:00 2021 PDT | thermo_1 | 26 | 30 | 56 + Sun Jun 13 17:00:00 2021 PDT | thermo_2 | 22 | 22 | 22 + Sun Jun 13 17:00:00 2021 PDT | thermo_3 | 24 | 24 | 24 + Sun Jun 13 17:00:00 2021 PDT | thermo_4 | 24 | 28 | 79 +(4 rows) + +-- Execute repair for materialized only cagg +CALL _timescaledb_internal.cagg_try_repair('conditions_summary', FALSE); +DEBUG: [cagg_rebuild_view_definition] public.conditions_summary does not have partials, do not check for defects! +\d+ conditions_summary + View "public.conditions_summary" + Column | Type | Collation | Nullable | Default | Storage | Description +-------------+--------------------------+-----------+----------+---------+----------+------------- + bucket | timestamp with time zone | | | | plain | + device_name | text | | | | extended | + min | integer | | | | plain | + max | integer | | | | plain | + sum | bigint | | | | plain | +View definition: + SELECT bucket, + device_name, + min, + max, + sum + FROM _timescaledb_internal._materialized_hypertable_2 _materialized_hypertable_2; + +SELECT * FROM conditions_summary ORDER BY bucket, device_name; + bucket | device_name | min | max | sum +------------------------------+-------------+-----+-----+----- + Sun Jun 13 17:00:00 2021 PDT | thermo_1 | 26 | 30 | 56 + Sun Jun 13 17:00:00 2021 PDT | thermo_2 | 22 | 22 | 22 + Sun Jun 13 17:00:00 2021 PDT | thermo_3 | 24 | 24 | 24 + Sun Jun 13 17:00:00 2021 PDT | thermo_4 | 24 | 28 | 79 +(4 rows) + +CALL _timescaledb_internal.cagg_try_repair('conditions_summary', TRUE); +DEBUG: [cagg_rebuild_view_definition] public.conditions_summary has been rebuilt! +\d+ conditions_summary + View "public.conditions_summary" + Column | Type | Collation | Nullable | Default | Storage | Description +-------------+--------------------------+-----------+----------+---------+----------+------------- + bucket | timestamp with time zone | | | | plain | + device_name | text | | | | extended | + min | integer | | | | plain | + max | integer | | | | plain | + sum | bigint | | | | plain | +View definition: + SELECT bucket, + device_name, + min, + max, + sum + FROM _timescaledb_internal._materialized_hypertable_2 _materialized_hypertable_2; + +SELECT * FROM conditions_summary ORDER BY bucket, device_name; + bucket | device_name | min | max | sum +------------------------------+-------------+-----+-----+----- + Sun Jun 13 17:00:00 2021 PDT | thermo_1 | 26 | 30 | 56 + Sun Jun 13 17:00:00 2021 PDT | thermo_2 | 22 | 22 | 22 + Sun Jun 13 17:00:00 2021 PDT | thermo_3 | 24 | 24 | 24 + Sun Jun 13 17:00:00 2021 PDT | thermo_4 | 24 | 28 | 79 +(4 rows) + +-- Switch to realtime cagg +ALTER MATERIALIZED VIEW conditions_summary SET (timescaledb.materialized_only=false); +\d+ conditions_summary + View "public.conditions_summary" + Column | Type | Collation | Nullable | Default | Storage | Description +-------------+--------------------------+-----------+----------+---------+----------+------------- + bucket | timestamp with time zone | | | | plain | + device_name | text | | | | extended | + min | integer | | | | plain | + max | integer | | | | plain | + sum | bigint | | | | plain | +View definition: + SELECT _materialized_hypertable_2.bucket, + _materialized_hypertable_2.device_name, + _materialized_hypertable_2.min, + _materialized_hypertable_2.max, + _materialized_hypertable_2.sum + FROM _timescaledb_internal._materialized_hypertable_2 _materialized_hypertable_2 + WHERE _materialized_hypertable_2.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(2)), '-infinity'::timestamp with time zone) +UNION ALL + SELECT time_bucket('@ 7 days'::interval, conditions."time") AS bucket, + devices.name AS device_name, + min(conditions.temperature) AS min, + max(conditions.temperature) AS max, + sum(conditions.temperature) AS sum + FROM conditions + JOIN devices ON devices.id = conditions.device_id + WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(2)), '-infinity'::timestamp with time zone) + GROUP BY (time_bucket('@ 7 days'::interval, conditions."time")), devices.name; + +SELECT * FROM conditions_summary ORDER BY bucket, device_name; + bucket | device_name | min | max | sum +------------------------------+-------------+-----+-----+----- + Sun Jun 13 17:00:00 2021 PDT | thermo_1 | 26 | 30 | 56 + Sun Jun 13 17:00:00 2021 PDT | thermo_2 | 22 | 22 | 22 + Sun Jun 13 17:00:00 2021 PDT | thermo_3 | 24 | 24 | 24 + Sun Jun 13 17:00:00 2021 PDT | thermo_4 | 24 | 28 | 79 + Sun Jun 20 17:00:00 2021 PDT | thermo_1 | 31 | 34 | 65 + Sun Jun 20 17:00:00 2021 PDT | thermo_2 | 34 | 34 | 68 + Sun Jun 20 17:00:00 2021 PDT | thermo_3 | 31 | 32 | 95 +(7 rows) + +-- Execute repair for realtime cagg +CALL _timescaledb_internal.cagg_try_repair('conditions_summary', FALSE); +DEBUG: [cagg_rebuild_view_definition] public.conditions_summary does not have partials, do not check for defects! +\d+ conditions_summary + View "public.conditions_summary" + Column | Type | Collation | Nullable | Default | Storage | Description +-------------+--------------------------+-----------+----------+---------+----------+------------- + bucket | timestamp with time zone | | | | plain | + device_name | text | | | | extended | + min | integer | | | | plain | + max | integer | | | | plain | + sum | bigint | | | | plain | +View definition: + SELECT _materialized_hypertable_2.bucket, + _materialized_hypertable_2.device_name, + _materialized_hypertable_2.min, + _materialized_hypertable_2.max, + _materialized_hypertable_2.sum + FROM _timescaledb_internal._materialized_hypertable_2 _materialized_hypertable_2 + WHERE _materialized_hypertable_2.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(2)), '-infinity'::timestamp with time zone) +UNION ALL + SELECT time_bucket('@ 7 days'::interval, conditions."time") AS bucket, + devices.name AS device_name, + min(conditions.temperature) AS min, + max(conditions.temperature) AS max, + sum(conditions.temperature) AS sum + FROM conditions + JOIN devices ON devices.id = conditions.device_id + WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(2)), '-infinity'::timestamp with time zone) + GROUP BY (time_bucket('@ 7 days'::interval, conditions."time")), devices.name; + +SELECT * FROM conditions_summary ORDER BY bucket, device_name; + bucket | device_name | min | max | sum +------------------------------+-------------+-----+-----+----- + Sun Jun 13 17:00:00 2021 PDT | thermo_1 | 26 | 30 | 56 + Sun Jun 13 17:00:00 2021 PDT | thermo_2 | 22 | 22 | 22 + Sun Jun 13 17:00:00 2021 PDT | thermo_3 | 24 | 24 | 24 + Sun Jun 13 17:00:00 2021 PDT | thermo_4 | 24 | 28 | 79 + Sun Jun 20 17:00:00 2021 PDT | thermo_1 | 31 | 34 | 65 + Sun Jun 20 17:00:00 2021 PDT | thermo_2 | 34 | 34 | 68 + Sun Jun 20 17:00:00 2021 PDT | thermo_3 | 31 | 32 | 95 +(7 rows) + +CALL _timescaledb_internal.cagg_try_repair('conditions_summary', TRUE); +DEBUG: [cagg_rebuild_view_definition] public.conditions_summary has been rebuilt! +\d+ conditions_summary + View "public.conditions_summary" + Column | Type | Collation | Nullable | Default | Storage | Description +-------------+--------------------------+-----------+----------+---------+----------+------------- + bucket | timestamp with time zone | | | | plain | + device_name | text | | | | extended | + min | integer | | | | plain | + max | integer | | | | plain | + sum | bigint | | | | plain | +View definition: + SELECT _materialized_hypertable_2.bucket, + _materialized_hypertable_2.device_name, + _materialized_hypertable_2.min, + _materialized_hypertable_2.max, + _materialized_hypertable_2.sum + FROM _timescaledb_internal._materialized_hypertable_2 _materialized_hypertable_2 + WHERE _materialized_hypertable_2.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(2)), '-infinity'::timestamp with time zone) +UNION ALL + SELECT time_bucket('@ 7 days'::interval, conditions."time") AS bucket, + devices.name AS device_name, + min(conditions.temperature) AS min, + max(conditions.temperature) AS max, + sum(conditions.temperature) AS sum + FROM conditions + JOIN devices ON devices.id = conditions.device_id + WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(2)), '-infinity'::timestamp with time zone) + GROUP BY (time_bucket('@ 7 days'::interval, conditions."time")), devices.name; + +SELECT * FROM conditions_summary ORDER BY bucket, device_name; + bucket | device_name | min | max | sum +------------------------------+-------------+-----+-----+----- + Sun Jun 13 17:00:00 2021 PDT | thermo_1 | 26 | 30 | 56 + Sun Jun 13 17:00:00 2021 PDT | thermo_2 | 22 | 22 | 22 + Sun Jun 13 17:00:00 2021 PDT | thermo_3 | 24 | 24 | 24 + Sun Jun 13 17:00:00 2021 PDT | thermo_4 | 24 | 28 | 79 + Sun Jun 20 17:00:00 2021 PDT | thermo_1 | 31 | 34 | 65 + Sun Jun 20 17:00:00 2021 PDT | thermo_2 | 34 | 34 | 68 + Sun Jun 20 17:00:00 2021 PDT | thermo_3 | 31 | 32 | 95 +(7 rows) + +-- Tests without join +CREATE MATERIALIZED VIEW conditions_summary_nojoin +WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT + time_bucket(INTERVAL '1 week', "time") AS bucket, + MIN(temperature), + MAX(temperature), + SUM(temperature) +FROM + conditions +GROUP BY + 1 +WITH NO DATA; +CALL _timescaledb_internal.cagg_try_repair('conditions_summary_nojoin', TRUE); +DEBUG: [cagg_rebuild_view_definition] public.conditions_summary_nojoin does not have JOINS, so no need to rebuild the definition! +\d+ conditions_summary_nojoin + View "public.conditions_summary_nojoin" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+--------------------------+-----------+----------+---------+---------+------------- + bucket | timestamp with time zone | | | | plain | + min | integer | | | | plain | + max | integer | | | | plain | + sum | bigint | | | | plain | +View definition: + SELECT _materialized_hypertable_3.bucket, + _materialized_hypertable_3.min, + _materialized_hypertable_3.max, + _materialized_hypertable_3.sum + FROM _timescaledb_internal._materialized_hypertable_3 + WHERE _materialized_hypertable_3.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(3)), '-infinity'::timestamp with time zone) +UNION ALL + SELECT time_bucket('@ 7 days'::interval, conditions."time") AS bucket, + min(conditions.temperature) AS min, + max(conditions.temperature) AS max, + sum(conditions.temperature) AS sum + FROM conditions + WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(3)), '-infinity'::timestamp with time zone) + GROUP BY (time_bucket('@ 7 days'::interval, conditions."time")); + +-- Tests with old cagg format +CREATE MATERIALIZED VIEW conditions_summary_old_format +WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) AS +SELECT + time_bucket(INTERVAL '1 week', "time") AS bucket, + MIN(temperature), + MAX(temperature), + SUM(temperature) +FROM + conditions +GROUP BY + 1 +WITH NO DATA; +-- Should rebuild without forcing +CALL _timescaledb_internal.cagg_try_repair('conditions_summary_old_format', FALSE); +DEBUG: [cagg_rebuild_view_definition] public.conditions_summary_old_format has been rebuilt! +\d+ conditions_summary_old_format + View "public.conditions_summary_old_format" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+--------------------------+-----------+----------+---------+---------+------------- + bucket | timestamp with time zone | | | | plain | + min | integer | | | | plain | + max | integer | | | | plain | + sum | bigint | | | | plain | +View definition: + SELECT _materialized_hypertable_4.bucket, + _timescaledb_functions.finalize_agg('pg_catalog.min(integer)'::text, NULL::name, NULL::name, '{{pg_catalog,int4}}'::name[], _materialized_hypertable_4.agg_2_2, NULL::integer) AS min, + _timescaledb_functions.finalize_agg('pg_catalog.max(integer)'::text, NULL::name, NULL::name, '{{pg_catalog,int4}}'::name[], _materialized_hypertable_4.agg_3_3, NULL::integer) AS max, + _timescaledb_functions.finalize_agg('pg_catalog.sum(integer)'::text, NULL::name, NULL::name, '{{pg_catalog,int4}}'::name[], _materialized_hypertable_4.agg_4_4, NULL::bigint) AS sum + FROM _timescaledb_internal._materialized_hypertable_4 + WHERE _materialized_hypertable_4.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(4)), '-infinity'::timestamp with time zone) + GROUP BY _materialized_hypertable_4.bucket +UNION ALL + SELECT time_bucket('@ 7 days'::interval, conditions."time") AS bucket, + min(conditions.temperature) AS min, + max(conditions.temperature) AS max, + sum(conditions.temperature) AS sum + FROM conditions + WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(4)), '-infinity'::timestamp with time zone) + GROUP BY (time_bucket('@ 7 days'::interval, conditions."time")); + +DROP PROCEDURE _timescaledb_internal.cagg_try_repair (REGCLASS, BOOLEAN); diff --git a/tsl/test/expected/cagg_union_view-16.out b/tsl/test/expected/cagg_union_view-16.out new file mode 100644 index 00000000000..086bd21d24c --- /dev/null +++ b/tsl/test/expected/cagg_union_view-16.out @@ -0,0 +1,1023 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- disable background workers to make results reproducible +\c :TEST_DBNAME :ROLE_SUPERUSER +SELECT _timescaledb_functions.stop_background_workers(); + stop_background_workers +------------------------- + t +(1 row) + +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +\set PREFIX 'EXPLAIN (analyze, costs off, timing off, summary off)' +CREATE TABLE metrics(f1 int, f2 int, time timestamptz NOT NULL, device_id int, value float); +SELECT create_hypertable('metrics','time'); + create_hypertable +---------------------- + (1,public,metrics,t) +(1 row) + +ALTER TABLE metrics DROP COLUMN f1; +INSERT INTO metrics(time, device_id, value) SELECT '2000-01-01'::timestamptz, device_id, 0.5 FROM generate_series(1,3) g(device_id); +-- +-- test switching continuous agg view between different modes +-- +-- check default view for new continuous aggregate +CREATE MATERIALIZED VIEW metrics_summary + WITH (timescaledb.continuous, timescaledb.materialized_only=false) +AS + SELECT time_bucket('1d',time), avg(value) FROM metrics GROUP BY 1 WITH NO DATA; +ALTER TABLE metrics DROP COLUMN f2; +-- this should be union view +SELECT user_view_name, materialized_only FROM _timescaledb_catalog.continuous_agg WHERE user_view_name='metrics_summary'; + user_view_name | materialized_only +-----------------+------------------- + metrics_summary | f +(1 row) + +SELECT pg_get_viewdef('metrics_summary',true); + pg_get_viewdef +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + SELECT _materialized_hypertable_2.time_bucket, + + _materialized_hypertable_2.avg + + FROM _timescaledb_internal._materialized_hypertable_2 + + WHERE _materialized_hypertable_2.time_bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(2)), '-infinity'::timestamp with time zone)+ + UNION ALL + + SELECT time_bucket('@ 1 day'::interval, metrics."time") AS time_bucket, + + avg(metrics.value) AS avg + + FROM metrics + + WHERE metrics."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(2)), '-infinity'::timestamp with time zone) + + GROUP BY (time_bucket('@ 1 day'::interval, metrics."time")); +(1 row) + +SELECT time_bucket,avg FROM metrics_summary ORDER BY 1; + time_bucket | avg +------------------------------+----- + Fri Dec 31 16:00:00 1999 PST | 0.5 +(1 row) + +-- downgrade view to non-union view +ALTER MATERIALIZED VIEW metrics_summary SET (timescaledb.materialized_only=true); +-- this should be view without union +SELECT user_view_name, materialized_only FROM _timescaledb_catalog.continuous_agg WHERE user_view_name='metrics_summary'; + user_view_name | materialized_only +-----------------+------------------- + metrics_summary | t +(1 row) + +SELECT pg_get_viewdef('metrics_summary',true); + pg_get_viewdef +----------------------------------------------------------- + SELECT time_bucket, + + avg + + FROM _timescaledb_internal._materialized_hypertable_2; +(1 row) + +SELECT time_bucket,avg FROM metrics_summary ORDER BY 1; + time_bucket | avg +-------------+----- +(0 rows) + +-- upgrade view to union view again +ALTER MATERIALIZED VIEW metrics_summary SET (timescaledb.materialized_only=false); +-- this should be union view +SELECT user_view_name, materialized_only FROM _timescaledb_catalog.continuous_agg WHERE user_view_name='metrics_summary'; + user_view_name | materialized_only +-----------------+------------------- + metrics_summary | f +(1 row) + +SELECT pg_get_viewdef('metrics_summary',true); + pg_get_viewdef +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + SELECT _materialized_hypertable_2.time_bucket, + + _materialized_hypertable_2.avg + + FROM _timescaledb_internal._materialized_hypertable_2 + + WHERE _materialized_hypertable_2.time_bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(2)), '-infinity'::timestamp with time zone)+ + UNION ALL + + SELECT time_bucket('@ 1 day'::interval, metrics."time") AS time_bucket, + + avg(metrics.value) AS avg + + FROM metrics + + WHERE metrics."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(2)), '-infinity'::timestamp with time zone) + + GROUP BY (time_bucket('@ 1 day'::interval, metrics."time")); +(1 row) + +SELECT time_bucket,avg FROM metrics_summary ORDER BY 1; + time_bucket | avg +------------------------------+----- + Fri Dec 31 16:00:00 1999 PST | 0.5 +(1 row) + +-- try upgrade view to union view that is already union view +ALTER MATERIALIZED VIEW metrics_summary SET (timescaledb.materialized_only=false); +-- this should be union view +SELECT user_view_name, materialized_only FROM _timescaledb_catalog.continuous_agg WHERE user_view_name='metrics_summary'; + user_view_name | materialized_only +-----------------+------------------- + metrics_summary | f +(1 row) + +SELECT pg_get_viewdef('metrics_summary',true); + pg_get_viewdef +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + SELECT _materialized_hypertable_2.time_bucket, + + _materialized_hypertable_2.avg + + FROM _timescaledb_internal._materialized_hypertable_2 + + WHERE _materialized_hypertable_2.time_bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(2)), '-infinity'::timestamp with time zone)+ + UNION ALL + + SELECT time_bucket('@ 1 day'::interval, metrics."time") AS time_bucket, + + avg(metrics.value) AS avg + + FROM metrics + + WHERE metrics."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(2)), '-infinity'::timestamp with time zone) + + GROUP BY (time_bucket('@ 1 day'::interval, metrics."time")); +(1 row) + +SELECT time_bucket,avg FROM metrics_summary ORDER BY 1; + time_bucket | avg +------------------------------+----- + Fri Dec 31 16:00:00 1999 PST | 0.5 +(1 row) + +-- refresh +CALL refresh_continuous_aggregate('metrics_summary', NULL, NULL); +-- result should not change after refresh for union view +SELECT time_bucket,avg FROM metrics_summary ORDER BY 1; + time_bucket | avg +------------------------------+----- + Fri Dec 31 16:00:00 1999 PST | 0.5 +(1 row) + +-- downgrade view to non-union view +ALTER MATERIALIZED VIEW metrics_summary SET (timescaledb.materialized_only=true); +-- this should be view without union +SELECT user_view_name, materialized_only FROM _timescaledb_catalog.continuous_agg WHERE user_view_name='metrics_summary'; + user_view_name | materialized_only +-----------------+------------------- + metrics_summary | t +(1 row) + +SELECT pg_get_viewdef('metrics_summary',true); + pg_get_viewdef +----------------------------------------------------------- + SELECT time_bucket, + + avg + + FROM _timescaledb_internal._materialized_hypertable_2; +(1 row) + +-- view should have results now after refresh +SELECT time_bucket,avg FROM metrics_summary ORDER BY 1; + time_bucket | avg +------------------------------+----- + Fri Dec 31 16:00:00 1999 PST | 0.5 +(1 row) + +DROP MATERIALIZED VIEW metrics_summary; +NOTICE: drop cascades to table _timescaledb_internal._hyper_2_2_chunk +-- check default view for new continuous aggregate with materialized_only to true +CREATE MATERIALIZED VIEW metrics_summary + WITH (timescaledb.continuous, timescaledb.materialized_only=true) +AS + SELECT time_bucket('1d',time), avg(value) FROM metrics GROUP BY 1 WITH NO DATA; +-- this should be view without union +SELECT user_view_name, materialized_only FROM _timescaledb_catalog.continuous_agg WHERE user_view_name='metrics_summary'; + user_view_name | materialized_only +-----------------+------------------- + metrics_summary | t +(1 row) + +SELECT pg_get_viewdef('metrics_summary',true); + pg_get_viewdef +----------------------------------------------------------- + SELECT time_bucket, + + avg + + FROM _timescaledb_internal._materialized_hypertable_3; +(1 row) + +SELECT time_bucket,avg FROM metrics_summary ORDER BY 1; + time_bucket | avg +-------------+----- +(0 rows) + +-- upgrade view to union view +ALTER MATERIALIZED VIEW metrics_summary SET (timescaledb.materialized_only=false); +-- this should be union view +SELECT user_view_name, materialized_only FROM _timescaledb_catalog.continuous_agg WHERE user_view_name='metrics_summary'; + user_view_name | materialized_only +-----------------+------------------- + metrics_summary | f +(1 row) + +SELECT pg_get_viewdef('metrics_summary',true); + pg_get_viewdef +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + SELECT _materialized_hypertable_3.time_bucket, + + _materialized_hypertable_3.avg + + FROM _timescaledb_internal._materialized_hypertable_3 + + WHERE _materialized_hypertable_3.time_bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(3)), '-infinity'::timestamp with time zone)+ + UNION ALL + + SELECT time_bucket('@ 1 day'::interval, metrics."time") AS time_bucket, + + avg(metrics.value) AS avg + + FROM metrics + + WHERE metrics."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(3)), '-infinity'::timestamp with time zone) + + GROUP BY (time_bucket('@ 1 day'::interval, metrics."time")); +(1 row) + +SELECT time_bucket,avg FROM metrics_summary ORDER BY 1; + time_bucket | avg +------------------------------+----- + Fri Dec 31 16:00:00 1999 PST | 0.5 +(1 row) + +-- downgrade view to non-union view +ALTER MATERIALIZED VIEW metrics_summary SET (timescaledb.materialized_only=true); +-- this should be view without union +SELECT user_view_name, materialized_only FROM _timescaledb_catalog.continuous_agg WHERE user_view_name='metrics_summary'; + user_view_name | materialized_only +-----------------+------------------- + metrics_summary | t +(1 row) + +SELECT pg_get_viewdef('metrics_summary',true); + pg_get_viewdef +----------------------------------------------------------- + SELECT time_bucket, + + avg + + FROM _timescaledb_internal._materialized_hypertable_3; +(1 row) + +SELECT time_bucket,avg FROM metrics_summary ORDER BY 1; + time_bucket | avg +-------------+----- +(0 rows) + +DROP MATERIALIZED VIEW metrics_summary; +-- +-- test queries on union view +-- +CREATE MATERIALIZED VIEW metrics_summary + WITH (timescaledb.continuous, timescaledb.materialized_only=true) +AS + SELECT time_bucket('1d',time), avg(value) FROM metrics GROUP BY 1 WITH NO DATA; +-- should be marked as materialized_only in catalog +SELECT user_view_name, materialized_only FROM _timescaledb_catalog.continuous_agg WHERE user_view_name='metrics_summary'; + user_view_name | materialized_only +-----------------+------------------- + metrics_summary | t +(1 row) + +-- query should not have results since cagg is materialized only and no refresh has happened yet +SELECT time_bucket,avg FROM metrics_summary ORDER BY 1; + time_bucket | avg +-------------+----- +(0 rows) + +ALTER MATERIALIZED VIEW metrics_summary SET (timescaledb.materialized_only=false); +-- after switch to union view all results should be returned +SELECT time_bucket,avg FROM metrics_summary ORDER BY 1; + time_bucket | avg +------------------------------+----- + Fri Dec 31 16:00:00 1999 PST | 0.5 +(1 row) + +CALL refresh_continuous_aggregate('metrics_summary', NULL, NULL); +ALTER MATERIALIZED VIEW metrics_summary SET (timescaledb.materialized_only=true); +-- materialized only view should return data now too because refresh has happened +SELECT time_bucket,avg FROM metrics_summary ORDER BY 1; + time_bucket | avg +------------------------------+----- + Fri Dec 31 16:00:00 1999 PST | 0.5 +(1 row) + +-- add some more data +INSERT INTO metrics(time, device_id, value) SELECT '2000-02-01'::timestamptz, device_id, device_id/10.0 FROM generate_series(1,3) g(device_id); +-- materialized only view should not have new data yet +SELECT time_bucket,avg FROM metrics_summary ORDER BY 1; + time_bucket | avg +------------------------------+----- + Fri Dec 31 16:00:00 1999 PST | 0.5 +(1 row) + +-- but union view should +ALTER MATERIALIZED VIEW metrics_summary SET (timescaledb.materialized_only=false); +SELECT time_bucket,avg FROM metrics_summary ORDER BY 1; + time_bucket | avg +------------------------------+----- + Fri Dec 31 16:00:00 1999 PST | 0.5 + Mon Jan 31 16:00:00 2000 PST | 0.2 +(2 rows) + +-- and after refresh non union view should have new data too +CALL refresh_continuous_aggregate('metrics_summary', NULL, '2000-01-30'); +ALTER MATERIALIZED VIEW metrics_summary SET (timescaledb.materialized_only=true); +SELECT time_bucket,avg FROM metrics_summary ORDER BY 1; + time_bucket | avg +------------------------------+----- + Fri Dec 31 16:00:00 1999 PST | 0.5 +(1 row) + +-- hardcoding now to 50 will lead to 30 watermark +CREATE OR REPLACE FUNCTION boundary_test_int_now() + RETURNS INT LANGUAGE SQL STABLE AS +$BODY$ + SELECT 50; +$BODY$; +-- test watermark interaction with just in time aggregates +CREATE TABLE boundary_test(time int, value float); +SELECT create_hypertable('boundary_test','time',chunk_time_interval:=10); +NOTICE: adding not-null constraint to column "time" + create_hypertable +---------------------------- + (5,public,boundary_test,t) +(1 row) + +SELECT set_integer_now_func('boundary_test','boundary_test_int_now'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE MATERIALIZED VIEW boundary_view + WITH (timescaledb.continuous, timescaledb.materialized_only=false) +AS + SELECT time_bucket(10,time), avg(value) FROM boundary_test GROUP BY 1 WITH NO DATA; +INSERT INTO boundary_test SELECT i, i*10 FROM generate_series(10,40,10) AS g(i); +SELECT mat_hypertable_id AS boundary_view_id +FROM _timescaledb_catalog.continuous_agg +WHERE user_view_name = 'boundary_view' \gset +-- watermark should be NULL +SELECT _timescaledb_functions.cagg_watermark(:boundary_view_id); + cagg_watermark +---------------- + -2147483648 +(1 row) + +-- first UNION child should have no rows because no materialization has happened yet and 2nd child should have 4 rows +:PREFIX SELECT * FROM boundary_view; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------- + HashAggregate (actual rows=4 loops=1) + Group Key: time_bucket(10, boundary_test."time") + Batches: 1 + -> Result (actual rows=4 loops=1) + -> Custom Scan (ChunkAppend) on boundary_test (actual rows=4 loops=1) + Chunks excluded during startup: 0 + -> Index Scan Backward using _hyper_5_5_chunk_boundary_test_time_idx on _hyper_5_5_chunk (actual rows=1 loops=1) + Index Cond: ("time" >= COALESCE((_timescaledb_functions.cagg_watermark(6))::integer, '-2147483648'::integer)) + -> Index Scan Backward using _hyper_5_6_chunk_boundary_test_time_idx on _hyper_5_6_chunk (actual rows=1 loops=1) + Index Cond: ("time" >= COALESCE((_timescaledb_functions.cagg_watermark(6))::integer, '-2147483648'::integer)) + -> Index Scan Backward using _hyper_5_7_chunk_boundary_test_time_idx on _hyper_5_7_chunk (actual rows=1 loops=1) + Index Cond: ("time" >= COALESCE((_timescaledb_functions.cagg_watermark(6))::integer, '-2147483648'::integer)) + -> Index Scan Backward using _hyper_5_8_chunk_boundary_test_time_idx on _hyper_5_8_chunk (actual rows=1 loops=1) + Index Cond: ("time" >= COALESCE((_timescaledb_functions.cagg_watermark(6))::integer, '-2147483648'::integer)) +(14 rows) + +-- result should have 4 rows +SELECT * FROM boundary_view ORDER BY time_bucket; + time_bucket | avg +-------------+----- + 10 | 100 + 20 | 200 + 30 | 300 + 40 | 400 +(4 rows) + +-- Refresh up to 30 to leave some unmaterialized data at the head and +-- thus have something to union. +CALL refresh_continuous_aggregate('boundary_view', NULL, 30); +-- watermark should be 30 +SELECT _timescaledb_functions.cagg_watermark(:boundary_view_id); + cagg_watermark +---------------- + 30 +(1 row) + +-- both sides of the UNION should return 2 rows +:PREFIX SELECT * FROM boundary_view; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------- + Append (actual rows=4 loops=1) + -> Custom Scan (ChunkAppend) on _materialized_hypertable_6 (actual rows=2 loops=1) + Chunks excluded during startup: 0 + -> Index Scan using _hyper_6_9_chunk__materialized_hypertable_6_time_bucket_idx on _hyper_6_9_chunk (actual rows=2 loops=1) + Index Cond: (time_bucket < COALESCE((_timescaledb_functions.cagg_watermark(6))::integer, '-2147483648'::integer)) + -> HashAggregate (actual rows=2 loops=1) + Group Key: time_bucket(10, boundary_test."time") + Batches: 1 + -> Result (actual rows=2 loops=1) + -> Custom Scan (ChunkAppend) on boundary_test (actual rows=2 loops=1) + Chunks excluded during startup: 2 + -> Index Scan Backward using _hyper_5_7_chunk_boundary_test_time_idx on _hyper_5_7_chunk (actual rows=1 loops=1) + Index Cond: ("time" >= COALESCE((_timescaledb_functions.cagg_watermark(6))::integer, '-2147483648'::integer)) + -> Index Scan Backward using _hyper_5_8_chunk_boundary_test_time_idx on _hyper_5_8_chunk (actual rows=1 loops=1) + Index Cond: ("time" >= COALESCE((_timescaledb_functions.cagg_watermark(6))::integer, '-2147483648'::integer)) +(15 rows) + +-- result should have 4 rows +SELECT * FROM boundary_view ORDER BY time_bucket; + time_bucket | avg +-------------+----- + 10 | 100 + 20 | 200 + 30 | 300 + 40 | 400 +(4 rows) + +---- TEST union view with WHERE, GROUP BY and HAVING clause ---- +create table ht_intdata (a integer, b integer, c integer); +select table_name FROM create_hypertable('ht_intdata', 'a', chunk_time_interval=> 10); +NOTICE: adding not-null constraint to column "a" + table_name +------------ + ht_intdata +(1 row) + +INSERT into ht_intdata values( 3 , 16 , 20); +INSERT into ht_intdata values( 1 , 10 , 20); +INSERT into ht_intdata values( 1 , 11 , 20); +INSERT into ht_intdata values( 1 , 12 , 20); +INSERT into ht_intdata values( 1 , 13 , 20); +INSERT into ht_intdata values( 1 , 14 , 20); +INSERT into ht_intdata values( 2 , 14 , 20); +INSERT into ht_intdata values( 2 , 15 , 20); +INSERT into ht_intdata values( 2 , 16 , 20); +INSERT into ht_intdata values( 20 , 16 , 20); +INSERT into ht_intdata values( 20 , 26 , 20); +INSERT into ht_intdata values( 20 , 16 , 20); +INSERT into ht_intdata values( 21 , 15 , 30); +INSERT into ht_intdata values( 21 , 15 , 30); +INSERT into ht_intdata values( 21 , 15 , 30); +CREATE OR REPLACE FUNCTION integer_now_ht_intdata() returns int LANGUAGE SQL STABLE as $$ SELECT coalesce(max(a), 0) FROM ht_intdata $$; +SELECT set_integer_now_func('ht_intdata', 'integer_now_ht_intdata'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE MATERIALIZED VIEW mat_m1(a, countb, sumbc, spreadcb, avgc) +WITH (timescaledb.continuous, timescaledb.materialized_only=false) +AS +SELECT time_bucket(1, a), count(*), sum(b+c), max(c)-min(b), avg(c)::int +FROM ht_intdata +WHERE b < 16 +GROUP BY time_bucket(1, a) +HAVING sum(c) > 50 WITH NO DATA; +-- Do partial refresh to have something to union +CALL refresh_continuous_aggregate('mat_m1', NULL, 11); +--results from real time cont.agg and direct query should match +SELECT time_bucket(1, a), count(*), sum(b+c), max(c)-min(b), avg(c)::int +FROM ht_intdata +WHERE b < 16 +GROUP BY time_bucket(1, a) +HAVING sum(c) > 50 +ORDER BY 1; + time_bucket | count | sum | ?column? | avg +-------------+-------+-----+----------+----- + 1 | 5 | 160 | 10 | 20 + 21 | 3 | 135 | 15 | 30 +(2 rows) + +SELECT * FROM mat_m1 ORDER BY 1; + a | countb | sumbc | spreadcb | avgc +----+--------+-------+----------+------ + 1 | 5 | 160 | 10 | 20 + 21 | 3 | 135 | 15 | 30 +(2 rows) + +--verify that materialized only doesn't have rows with a> 20 +ALTER MATERIALIZED VIEW mat_m1 SET(timescaledb.materialized_only = true); +SELECT * FROM mat_m1 ORDER BY 1; + a | countb | sumbc | spreadcb | avgc +---+--------+-------+----------+------ + 1 | 5 | 160 | 10 | 20 +(1 row) + +--again revert the view to include real time aggregates +ALTER MATERIALIZED VIEW mat_m1 SET(timescaledb.materialized_only = false); +INSERT into ht_intdata values( 31 , 15 , 30); +INSERT into ht_intdata values( 31 , 14 , 70); +--cagg was not refreshed, should include all rows +SELECT * FROM mat_m1 ORDER BY 1; + a | countb | sumbc | spreadcb | avgc +----+--------+-------+----------+------ + 1 | 5 | 160 | 10 | 20 + 21 | 3 | 135 | 15 | 30 + 31 | 2 | 129 | 56 | 50 +(3 rows) + +CALL refresh_continuous_aggregate('mat_m1', NULL, NULL); +SELECT * FROM mat_m1 ORDER BY 1; + a | countb | sumbc | spreadcb | avgc +----+--------+-------+----------+------ + 1 | 5 | 160 | 10 | 20 + 21 | 3 | 135 | 15 | 30 + 31 | 2 | 129 | 56 | 50 +(3 rows) + +--the selects against mat_m1 before and after refresh should match this query +SELECT time_bucket(1, a), count(*), sum(b+c), max(c)-min(b), avg(c)::int +FROM ht_intdata +WHERE b < 16 +GROUP BY time_bucket(1, a) +HAVING sum(c) > 50 +ORDER BY 1; + time_bucket | count | sum | ?column? | avg +-------------+-------+-----+----------+----- + 1 | 5 | 160 | 10 | 20 + 21 | 3 | 135 | 15 | 30 + 31 | 2 | 129 | 56 | 50 +(3 rows) + +DROP MATERIALIZED VIEW mat_m1; +NOTICE: drop cascades to table _timescaledb_internal._hyper_8_12_chunk +--- TEST union view with multiple WHERE and HAVING clauses +CREATE MATERIALIZED VIEW mat_m1 +WITH (timescaledb.continuous, timescaledb.materialized_only=false) +AS +SELECT time_bucket(5, a), sum(b+c) +FROM ht_intdata +WHERE b < 16 and c > 20 +GROUP BY time_bucket(5, a) +HAVING sum(c) > 50 and avg(b)::int > 12 WITH NO DATA; +INSERT into ht_intdata values( 42 , 15 , 80); +INSERT into ht_intdata values( 42 , 15 , 18); +INSERT into ht_intdata values( 41 , 18 , 21); +-- Do partial refresh so that we leave data to UNION +CALL refresh_continuous_aggregate('mat_m1', NULL, 25); +SELECT mat_hypertable_id AS mat_m1_id +FROM _timescaledb_catalog.continuous_agg +WHERE user_view_name = 'mat_m1' \gset +-- Show the new watermark +SELECT _timescaledb_functions.cagg_watermark(:mat_m1_id); + cagg_watermark +---------------- + 25 +(1 row) + +-- Data inserted after refresh and after cagg_watermark should be +-- reflected in the real-time aggregation +INSERT into ht_intdata VALUES (34, 13, 65), (26, 12, 78), (28, 9, 32); +--view and direct query should return same results +SELECT * from mat_m1 ORDER BY 1; + time_bucket | sum +-------------+----- + 20 | 135 + 30 | 207 + 40 | 95 +(3 rows) + +SELECT time_bucket(5, a), sum(b+c) +FROM ht_intdata +WHERE b < 16 and c > 20 +GROUP BY time_bucket(5, a) +HAVING sum(c) > 50 and avg(b)::int > 12 +ORDER by 1; + time_bucket | sum +-------------+----- + 20 | 135 + 30 | 207 + 40 | 95 +(3 rows) + +-- plan output +:PREFIX SELECT * FROM mat_m1 ORDER BY 1; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------- + Sort (actual rows=3 loops=1) + Sort Key: _materialized_hypertable_9.time_bucket + Sort Method: quicksort + -> Append (actual rows=3 loops=1) + -> Custom Scan (ChunkAppend) on _materialized_hypertable_9 (actual rows=1 loops=1) + Chunks excluded during startup: 0 + -> Index Scan using _hyper_9_15_chunk__materialized_hypertable_9_time_bucket_idx on _hyper_9_15_chunk (actual rows=1 loops=1) + Index Cond: (time_bucket < COALESCE((_timescaledb_functions.cagg_watermark(9))::integer, '-2147483648'::integer)) + -> HashAggregate (actual rows=2 loops=1) + Group Key: time_bucket(5, ht_intdata.a) + Filter: ((sum(ht_intdata.c) > 50) AND ((avg(ht_intdata.b))::integer > 12)) + Batches: 1 + Rows Removed by Filter: 1 + -> Result (actual rows=6 loops=1) + -> Custom Scan (ChunkAppend) on ht_intdata (actual rows=6 loops=1) + Chunks excluded during startup: 1 + -> Index Scan Backward using _hyper_7_11_chunk_ht_intdata_a_idx on _hyper_7_11_chunk (actual rows=2 loops=1) + Index Cond: (a >= COALESCE((_timescaledb_functions.cagg_watermark(9))::integer, '-2147483648'::integer)) + Filter: ((b < 16) AND (c > 20)) + -> Index Scan Backward using _hyper_7_13_chunk_ht_intdata_a_idx on _hyper_7_13_chunk (actual rows=3 loops=1) + Index Cond: (a >= COALESCE((_timescaledb_functions.cagg_watermark(9))::integer, '-2147483648'::integer)) + Filter: ((b < 16) AND (c > 20)) + -> Index Scan Backward using _hyper_7_14_chunk_ht_intdata_a_idx on _hyper_7_14_chunk (actual rows=1 loops=1) + Index Cond: (a >= COALESCE((_timescaledb_functions.cagg_watermark(9))::integer, '-2147483648'::integer)) + Filter: ((b < 16) AND (c > 20)) + Rows Removed by Filter: 2 +(26 rows) + +-- Test caggs with different time types +CREATE TABLE smallint_table (time smallint, value int); +CREATE TABLE int_table (time int, value int); +CREATE TABLE bigint_table (time bigint, value int); +CREATE TABLE date_table (time date, value int); +CREATE TABLE timestamp_table (time timestamp, value int); +CREATE TABLE timestamptz_table (time timestamptz, value int); +SELECT create_hypertable('smallint_table', 'time', chunk_time_interval=>20); +NOTICE: adding not-null constraint to column "time" + create_hypertable +------------------------------ + (10,public,smallint_table,t) +(1 row) + +SELECT create_hypertable('int_table', 'time', chunk_time_interval=>20); +NOTICE: adding not-null constraint to column "time" + create_hypertable +------------------------- + (11,public,int_table,t) +(1 row) + +SELECT create_hypertable('bigint_table', 'time', chunk_time_interval=>20); +NOTICE: adding not-null constraint to column "time" + create_hypertable +---------------------------- + (12,public,bigint_table,t) +(1 row) + +SELECT create_hypertable('date_table', 'time'); +NOTICE: adding not-null constraint to column "time" + create_hypertable +-------------------------- + (13,public,date_table,t) +(1 row) + +SELECT create_hypertable('timestamp_table', 'time'); +WARNING: column type "timestamp without time zone" used for "time" does not follow best practices +NOTICE: adding not-null constraint to column "time" + create_hypertable +------------------------------- + (14,public,timestamp_table,t) +(1 row) + +SELECT create_hypertable('timestamptz_table', 'time'); +NOTICE: adding not-null constraint to column "time" + create_hypertable +--------------------------------- + (15,public,timestamptz_table,t) +(1 row) + +CREATE OR REPLACE FUNCTION smallint_now() +RETURNS smallint LANGUAGE SQL STABLE AS +$$ + SELECT coalesce(max(time), 0)::smallint + FROM smallint_table +$$; +CREATE OR REPLACE FUNCTION int_now() +RETURNS int LANGUAGE SQL STABLE AS +$$ + SELECT coalesce(max(time), 0) + FROM int_table +$$; +CREATE OR REPLACE FUNCTION bigint_now() +RETURNS bigint LANGUAGE SQL STABLE AS +$$ + SELECT coalesce(max(time), 0)::bigint + FROM bigint_table +$$; +SELECT set_integer_now_func('smallint_table', 'smallint_now'); + set_integer_now_func +---------------------- + +(1 row) + +SELECT set_integer_now_func('int_table', 'int_now'); + set_integer_now_func +---------------------- + +(1 row) + +SELECT set_integer_now_func('bigint_table', 'bigint_now'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE MATERIALIZED VIEW smallint_agg +WITH (timescaledb.continuous, timescaledb.materialized_only=false) +AS +SELECT time_bucket(SMALLINT '10', time) AS bucket, avg(value) +FROM smallint_table +GROUP BY 1 WITH NO DATA; +CREATE MATERIALIZED VIEW int_agg +WITH (timescaledb.continuous, timescaledb.materialized_only=false) +AS +SELECT time_bucket(5, time) AS bucket, avg(value) +FROM int_table +GROUP BY 1 WITH NO DATA; +CREATE MATERIALIZED VIEW bigint_agg +WITH (timescaledb.continuous, timescaledb.materialized_only=false) +AS +SELECT time_bucket(BIGINT '10', time) AS bucket, avg(value) +FROM bigint_table +GROUP BY 1 WITH NO DATA; +CREATE MATERIALIZED VIEW date_agg +WITH (timescaledb.continuous, timescaledb.materialized_only=false) +AS +SELECT time_bucket('2 days', time) AS bucket, avg(value) +FROM date_table +GROUP BY 1 WITH NO DATA; +CREATE MATERIALIZED VIEW timestamp_agg +WITH (timescaledb.continuous, timescaledb.materialized_only=false) +AS +SELECT time_bucket('2 days', time) AS bucket, avg(value) +FROM timestamp_table +GROUP BY 1 WITH NO DATA; +CREATE MATERIALIZED VIEW timestamptz_agg +WITH (timescaledb.continuous, timescaledb.materialized_only=false) +AS +SELECT time_bucket('2 days', time) AS bucket, avg(value) +FROM timestamptz_table +GROUP BY 1 WITH NO DATA; +-- Refresh first without data +CALL refresh_continuous_aggregate('int_agg', NULL, NULL); +NOTICE: continuous aggregate "int_agg" is already up-to-date +CALL refresh_continuous_aggregate('smallint_agg', NULL, NULL); +NOTICE: continuous aggregate "smallint_agg" is already up-to-date +CALL refresh_continuous_aggregate('bigint_agg', NULL, NULL); +NOTICE: continuous aggregate "bigint_agg" is already up-to-date +CALL refresh_continuous_aggregate('date_agg', NULL, NULL); +NOTICE: continuous aggregate "date_agg" is already up-to-date +CALL refresh_continuous_aggregate('timestamp_agg', NULL, NULL); +NOTICE: continuous aggregate "timestamp_agg" is already up-to-date +CALL refresh_continuous_aggregate('timestamptz_agg', NULL, NULL); +NOTICE: continuous aggregate "timestamptz_agg" is already up-to-date +-- Watermarks at min for the above caggs: +SELECT + user_view_name, + _timescaledb_functions.cagg_watermark(mat_hypertable_id), + _timescaledb_functions.cagg_watermark_materialized(mat_hypertable_id), + _timescaledb_functions.cagg_watermark(mat_hypertable_id) = _timescaledb_functions.cagg_watermark_materialized(mat_hypertable_id) AS is_equal +FROM _timescaledb_catalog.continuous_agg +NATURAL JOIN _timescaledb_catalog.continuous_aggs_watermark +ORDER BY 1, 2, 3; + user_view_name | cagg_watermark | cagg_watermark_materialized | is_equal +-----------------+----------------------+-----------------------------+---------- + bigint_agg | -9223372036854775808 | -9223372036854775808 | t + boundary_view | 30 | 30 | t + date_agg | -210866803200000000 | -210866803200000000 | t + int_agg | -2147483648 | -2147483648 | t + mat_m1 | 25 | 25 | t + metrics_summary | 946771200000000 | 946771200000000 | t + smallint_agg | -32768 | -32768 | t + timestamp_agg | -210866803200000000 | -210866803200000000 | t + timestamptz_agg | -210866803200000000 | -210866803200000000 | t +(9 rows) + +INSERT INTO smallint_table VALUES (1, 1), (2, 2), (6, 6), (8, 8), (19, 19); +INSERT INTO int_table VALUES (1, 1), (2, 2), (6, 6), (8, 8), (19, 19); +INSERT INTO bigint_table VALUES (1, 1), (2, 2), (6, 6), (8, 8), (19, 19); +INSERT INTO date_table VALUES ('2020-01-01', 1), ('2020-01-02', 2), ('2020-01-06', 6), ('2020-01-08', 8); +INSERT INTO timestamp_table VALUES ('2020-01-01', 1), ('2020-01-02', 2), ('2020-01-06', 6), ('2020-01-08', 8); +INSERT INTO timestamptz_table VALUES ('2020-01-01', 1), ('2020-01-02', 2), ('2020-01-06', 6), ('2020-01-08', 8); +-- Refresh to move the watermarks +CALL refresh_continuous_aggregate('int_agg', NULL, NULL); +CALL refresh_continuous_aggregate('smallint_agg', NULL, NULL); +CALL refresh_continuous_aggregate('bigint_agg', NULL, NULL); +CALL refresh_continuous_aggregate('date_agg', NULL, NULL); +CALL refresh_continuous_aggregate('timestamp_agg', NULL, NULL); +CALL refresh_continuous_aggregate('timestamptz_agg', NULL, NULL); +-- Watermarks should reflect the new materializations +SELECT user_view_name, _timescaledb_functions.cagg_watermark(mat_hypertable_id) +FROM _timescaledb_catalog.continuous_agg +ORDER BY 1,2; + user_view_name | cagg_watermark +-----------------+------------------ + bigint_agg | 20 + boundary_view | 30 + date_agg | 1578614400000000 + int_agg | 20 + mat_m1 | 25 + metrics_summary | 946771200000000 + smallint_agg | 20 + timestamp_agg | 1578614400000000 + timestamptz_agg | 1578614400000000 +(9 rows) + +-- Test overflow of valid ranges by inserting values close to the max +-- supported time values. Adding one bucket to these values overflow +-- the valid time ranges so the watermark should end up at the end of +-- valid range for the type. +INSERT INTO smallint_table VALUES (32765, 1); +INSERT INTO int_table VALUES (2147483645, 1); +INSERT INTO bigint_table VALUES (9223372036854775804, 1); +INSERT INTO date_table VALUES ('294247-01-01', 1); +INSERT INTO timestamp_table VALUES ('294247-01-01 00:00:00-00', 1); +INSERT INTO timestamptz_table VALUES ('294247-01-01 00:00:00-00', 1); +CALL refresh_continuous_aggregate('int_agg', NULL, NULL); +CALL refresh_continuous_aggregate('smallint_agg', NULL, NULL); +CALL refresh_continuous_aggregate('bigint_agg', NULL, NULL); +CALL refresh_continuous_aggregate('date_agg', NULL, NULL); +CALL refresh_continuous_aggregate('timestamp_agg', NULL, NULL); +CALL refresh_continuous_aggregate('timestamptz_agg', NULL, NULL); +-- Watermarks after refresh hould be at the end of the valid range for +-- the time type +SELECT user_view_name, _timescaledb_functions.cagg_watermark(mat_hypertable_id) +FROM _timescaledb_catalog.continuous_agg +ORDER BY 1,2; + user_view_name | cagg_watermark +-----------------+--------------------- + bigint_agg | 9223372036854775807 + boundary_view | 30 + date_agg | 9223372036854775807 + int_agg | 2147483647 + mat_m1 | 25 + metrics_summary | 946771200000000 + smallint_agg | 32767 + timestamp_agg | 9223372036854775807 + timestamptz_agg | 9223372036854775807 +(9 rows) + +-- Querying the aggs should work even when fully materialized +SELECT * FROM smallint_agg +ORDER BY 1,2; + bucket | avg +--------+------------------------ + 0 | 4.2500000000000000 + 10 | 19.0000000000000000 + 32760 | 1.00000000000000000000 +(3 rows) + +SELECT * FROM int_agg +ORDER BY 1,2; + bucket | avg +------------+------------------------ + 0 | 1.5000000000000000 + 5 | 7.0000000000000000 + 15 | 19.0000000000000000 + 2147483645 | 1.00000000000000000000 +(4 rows) + +SELECT * FROM bigint_agg +ORDER BY 1,2; + bucket | avg +---------------------+------------------------ + 0 | 4.2500000000000000 + 10 | 19.0000000000000000 + 9223372036854775800 | 1.00000000000000000000 +(3 rows) + +SELECT * FROM date_agg +ORDER BY 1,2; + bucket | avg +--------------+------------------------ + 12-31-2019 | 1.00000000000000000000 + 01-02-2020 | 2.0000000000000000 + 01-06-2020 | 6.0000000000000000 + 01-08-2020 | 8.0000000000000000 + 12-31-294246 | 1.00000000000000000000 +(5 rows) + +SELECT * FROM timestamp_agg +ORDER BY 1,2; + bucket | avg +----------------------------+------------------------ + Tue Dec 31 00:00:00 2019 | 1.00000000000000000000 + Thu Jan 02 00:00:00 2020 | 2.0000000000000000 + Mon Jan 06 00:00:00 2020 | 6.0000000000000000 + Wed Jan 08 00:00:00 2020 | 8.0000000000000000 + Thu Dec 31 00:00:00 294246 | 1.00000000000000000000 +(5 rows) + +SELECT * FROM timestamptz_agg +ORDER BY 1,2; + bucket | avg +--------------------------------+------------------------ + Mon Dec 30 16:00:00 2019 PST | 1.00000000000000000000 + Wed Jan 01 16:00:00 2020 PST | 2.0000000000000000 + Sun Jan 05 16:00:00 2020 PST | 6.0000000000000000 + Tue Jan 07 16:00:00 2020 PST | 8.0000000000000000 + Wed Dec 30 16:00:00 294246 PST | 1.00000000000000000000 +(5 rows) + +\set ON_ERROR_STOP 0 +------------------------------------- +-- Test invalid inputs for cagg_watermark +------------------------------------- +-- Non-existing materialized hypertable +SELECT _timescaledb_functions.cagg_watermark(100); +ERROR: invalid materialized hypertable ID: 100 +-- NULL hypertable ID. Function is STRICT, so does nothing: +SELECT _timescaledb_functions.cagg_watermark(NULL); + cagg_watermark +---------------- + +(1 row) + +\set ON_ERROR_STOP 1 +-- Remove stored watermark, so query and refresh should error +\c :TEST_DBNAME :ROLE_SUPERUSER +DELETE FROM _timescaledb_catalog.continuous_aggs_watermark; +\set ON_ERROR_STOP 0 +SELECT * FROM smallint_agg +ORDER BY 1,2; +ERROR: watermark not defined for continuous aggregate: 16 +SELECT * FROM int_agg +ORDER BY 1,2; +ERROR: watermark not defined for continuous aggregate: 17 +SELECT * FROM bigint_agg +ORDER BY 1,2; +ERROR: watermark not defined for continuous aggregate: 18 +SELECT * FROM date_agg +ORDER BY 1,2; +ERROR: watermark not defined for continuous aggregate: 19 +SELECT * FROM timestamp_agg +ORDER BY 1,2; +ERROR: watermark not defined for continuous aggregate: 20 +SELECT * FROM timestamptz_agg +ORDER BY 1,2; +ERROR: watermark not defined for continuous aggregate: 21 +CALL refresh_continuous_aggregate('int_agg', NULL, NULL); +ERROR: watermark not defined for continuous aggregate: 17 +CALL refresh_continuous_aggregate('smallint_agg', NULL, NULL); +ERROR: watermark not defined for continuous aggregate: 16 +CALL refresh_continuous_aggregate('bigint_agg', NULL, NULL); +ERROR: watermark not defined for continuous aggregate: 18 +CALL refresh_continuous_aggregate('date_agg', NULL, NULL); +NOTICE: continuous aggregate "date_agg" is already up-to-date +CALL refresh_continuous_aggregate('timestamp_agg', NULL, NULL); +NOTICE: continuous aggregate "timestamp_agg" is already up-to-date +CALL refresh_continuous_aggregate('timestamptz_agg', NULL, NULL); +NOTICE: continuous aggregate "timestamptz_agg" is already up-to-date +\set ON_ERROR_STOP 1 +-- Fix all continuous aggregates inserting materialized watermark into the metadata table +INSERT INTO _timescaledb_catalog.continuous_aggs_watermark (mat_hypertable_id, watermark) +SELECT a.mat_hypertable_id, _timescaledb_functions.cagg_watermark_materialized(a.mat_hypertable_id) +FROM _timescaledb_catalog.continuous_agg a +LEFT JOIN _timescaledb_catalog.continuous_aggs_watermark b ON b.mat_hypertable_id = a.mat_hypertable_id +WHERE b.mat_hypertable_id IS NULL +ORDER BY 1; +SELECT * FROM smallint_agg +ORDER BY 1,2; + bucket | avg +--------+------------------------ + 0 | 4.2500000000000000 + 10 | 19.0000000000000000 + 32760 | 1.00000000000000000000 +(3 rows) + +SELECT * FROM int_agg +ORDER BY 1,2; + bucket | avg +------------+------------------------ + 0 | 1.5000000000000000 + 5 | 7.0000000000000000 + 15 | 19.0000000000000000 + 2147483645 | 1.00000000000000000000 +(4 rows) + +SELECT * FROM bigint_agg +ORDER BY 1,2; + bucket | avg +---------------------+------------------------ + 0 | 4.2500000000000000 + 10 | 19.0000000000000000 + 9223372036854775800 | 1.00000000000000000000 +(3 rows) + +SELECT * FROM date_agg +ORDER BY 1,2; + bucket | avg +--------------+------------------------ + 12-31-2019 | 1.00000000000000000000 + 01-02-2020 | 2.0000000000000000 + 01-06-2020 | 6.0000000000000000 + 01-08-2020 | 8.0000000000000000 + 12-31-294246 | 1.00000000000000000000 +(5 rows) + +SELECT * FROM timestamp_agg +ORDER BY 1,2; + bucket | avg +----------------------------+------------------------ + Tue Dec 31 00:00:00 2019 | 1.00000000000000000000 + Thu Jan 02 00:00:00 2020 | 2.0000000000000000 + Mon Jan 06 00:00:00 2020 | 6.0000000000000000 + Wed Jan 08 00:00:00 2020 | 8.0000000000000000 + Thu Dec 31 00:00:00 294246 | 1.00000000000000000000 +(5 rows) + +SELECT * FROM timestamptz_agg +ORDER BY 1,2; + bucket | avg +--------------------------------+------------------------ + Mon Dec 30 16:00:00 2019 PST | 1.00000000000000000000 + Wed Jan 01 16:00:00 2020 PST | 2.0000000000000000 + Sun Jan 05 16:00:00 2020 PST | 6.0000000000000000 + Tue Jan 07 16:00:00 2020 PST | 8.0000000000000000 + Wed Dec 30 16:00:00 294246 PST | 1.00000000000000000000 +(5 rows) + +CALL refresh_continuous_aggregate('int_agg', NULL, NULL); +CALL refresh_continuous_aggregate('smallint_agg', NULL, NULL); +CALL refresh_continuous_aggregate('bigint_agg', NULL, NULL); +CALL refresh_continuous_aggregate('date_agg', NULL, NULL); +NOTICE: continuous aggregate "date_agg" is already up-to-date +CALL refresh_continuous_aggregate('timestamp_agg', NULL, NULL); +NOTICE: continuous aggregate "timestamp_agg" is already up-to-date +CALL refresh_continuous_aggregate('timestamptz_agg', NULL, NULL); +NOTICE: continuous aggregate "timestamptz_agg" is already up-to-date diff --git a/tsl/test/expected/cagg_usage.out b/tsl/test/expected/cagg_usage-13.out similarity index 100% rename from tsl/test/expected/cagg_usage.out rename to tsl/test/expected/cagg_usage-13.out diff --git a/tsl/test/expected/cagg_usage-14.out b/tsl/test/expected/cagg_usage-14.out new file mode 100644 index 00000000000..4d67ff9a2ef --- /dev/null +++ b/tsl/test/expected/cagg_usage-14.out @@ -0,0 +1,470 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- TEST SETUP -- +\set ON_ERROR_STOP 0 +SET client_min_messages TO NOTICE; +SET work_mem TO '64MB'; +-- START OF USAGE TEST -- +--First create your hypertable +CREATE TABLE device_readings ( + observation_time TIMESTAMPTZ NOT NULL, + device_id TEXT NOT NULL, + metric DOUBLE PRECISION NOT NULL, + PRIMARY KEY(observation_time, device_id) +); +SELECT table_name FROM create_hypertable('device_readings', 'observation_time'); + table_name +----------------- + device_readings +(1 row) + +--Next, create your continuous aggregate view +CREATE MATERIALIZED VIEW device_summary +WITH (timescaledb.continuous, timescaledb.materialized_only=true) --This flag is what makes the view continuous +AS +SELECT + time_bucket('1 hour', observation_time) as bucket, --time_bucket is required + device_id, + avg(metric) as metric_avg, --We can use regular aggregates + max(metric)-min(metric) as metric_spread --We can also use expressions on aggregates and constants +FROM + device_readings +GROUP BY bucket, device_id WITH NO DATA; --We have to group by the bucket column, but can also add other group-by columns +SELECT add_continuous_aggregate_policy('device_summary', NULL, '2 h'::interval, '2 h'::interval); + add_continuous_aggregate_policy +--------------------------------- + 1000 +(1 row) + +--Next, insert some data into the raw hypertable +INSERT INTO device_readings +SELECT ts, 'device_1', (EXTRACT(EPOCH FROM ts)) from generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '30 minutes') ts; +INSERT INTO device_readings +SELECT ts, 'device_2', (EXTRACT(EPOCH FROM ts)) from generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '30 minutes') ts; +--Initially, it will be empty. +SELECT * FROM device_summary; + bucket | device_id | metric_avg | metric_spread +--------+-----------+------------+--------------- +(0 rows) + +-- Simulate a policy that refreshes with lag, i.e., it doesn't refresh +-- the entire data set. In this case up to the given date. +CALL refresh_continuous_aggregate('device_summary', NULL, '2018-12-30 22:00'); +--Now you can run selects over your view as normal +SELECT * FROM device_summary WHERE metric_spread = 1800 ORDER BY bucket DESC, device_id LIMIT 10; + bucket | device_id | metric_avg | metric_spread +------------------------------+-----------+------------+--------------- + Sun Dec 30 21:00:00 2018 PST | device_1 | 1546204500 | 1800 + Sun Dec 30 21:00:00 2018 PST | device_2 | 1546204500 | 1800 + Sun Dec 30 20:00:00 2018 PST | device_1 | 1546200900 | 1800 + Sun Dec 30 20:00:00 2018 PST | device_2 | 1546200900 | 1800 + Sun Dec 30 19:00:00 2018 PST | device_1 | 1546197300 | 1800 + Sun Dec 30 19:00:00 2018 PST | device_2 | 1546197300 | 1800 + Sun Dec 30 18:00:00 2018 PST | device_1 | 1546193700 | 1800 + Sun Dec 30 18:00:00 2018 PST | device_2 | 1546193700 | 1800 + Sun Dec 30 17:00:00 2018 PST | device_1 | 1546190100 | 1800 + Sun Dec 30 17:00:00 2018 PST | device_2 | 1546190100 | 1800 +(10 rows) + +--You can view informaton about your continuous aggregates. The meaning of these fields will be explained further down. +\x +SELECT * FROM timescaledb_information.continuous_aggregates; +-[ RECORD 1 ]---------------------+------------------------------------------------------------------------------------------------------------- +hypertable_schema | public +hypertable_name | device_readings +view_schema | public +view_name | device_summary +view_owner | default_perm_user +materialized_only | t +compression_enabled | f +materialization_hypertable_schema | _timescaledb_internal +materialization_hypertable_name | _materialized_hypertable_2 +view_definition | SELECT time_bucket('@ 1 hour'::interval, device_readings.observation_time) AS bucket, + + | device_readings.device_id, + + | avg(device_readings.metric) AS metric_avg, + + | (max(device_readings.metric) - min(device_readings.metric)) AS metric_spread + + | FROM device_readings + + | GROUP BY (time_bucket('@ 1 hour'::interval, device_readings.observation_time)), device_readings.device_id; +finalized | t + +\x +-- Refresh interval +-- +-- The refresh interval determines how often the background worker +-- for automatic materialization will run. The default is (2 x bucket_width) +SELECT schedule_interval FROM _timescaledb_config.bgw_job WHERE id = 1000; + schedule_interval +------------------- + @ 2 hours +(1 row) + +-- You can change this setting with ALTER VIEW (equivalently, specify in WITH clause of CREATE VIEW) +SELECT alter_job(1000, schedule_interval := '1h'); + alter_job +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (1000,"@ 1 hour","@ 0",-1,"@ 2 hours",t,"{""end_offset"": ""@ 2 hours"", ""start_offset"": null, ""mat_hypertable_id"": 2}",-infinity,_timescaledb_functions.policy_refresh_continuous_aggregate_check,f,,) +(1 row) + +SELECT schedule_interval FROM _timescaledb_config.bgw_job WHERE id = 1000; + schedule_interval +------------------- + @ 1 hour +(1 row) + +-- +-- Refresh with lag +-- +-- It is possible to use a policy or manual refresh with a lag, which +-- means the materialization will not contain the most up-to-date +-- data. +SELECT max(observation_time) FROM device_readings; + max +------------------------------ + Mon Dec 31 00:00:00 2018 PST +(1 row) + +SELECT max(bucket) FROM device_summary; + max +------------------------------ + Sun Dec 30 21:00:00 2018 PST +(1 row) + +CALL refresh_continuous_aggregate('device_summary', NULL, '2018-12-31 01:00'); +SELECT max(observation_time) FROM device_readings; + max +------------------------------ + Mon Dec 31 00:00:00 2018 PST +(1 row) + +SELECT max(bucket) FROM device_summary; + max +------------------------------ + Mon Dec 31 00:00:00 2018 PST +(1 row) + +-- +-- Invalidations +-- +--Changes to the raw table, for values that have already been materialized are propagated asynchronously, after the materialization next runs. +--Before update: +SELECT * FROM device_summary WHERE device_id = 'device_1' and bucket = 'Sun Dec 30 13:00:00 2018 PST'; + bucket | device_id | metric_avg | metric_spread +------------------------------+-----------+------------+--------------- + Sun Dec 30 13:00:00 2018 PST | device_1 | 1546175700 | 1800 +(1 row) + +INSERT INTO device_readings VALUES ('Sun Dec 30 13:01:00 2018 PST', 'device_1', 1.0); +--Change not reflected before materializer runs. +SELECT * FROM device_summary WHERE device_id = 'device_1' and bucket = 'Sun Dec 30 13:00:00 2018 PST'; + bucket | device_id | metric_avg | metric_spread +------------------------------+-----------+------------+--------------- + Sun Dec 30 13:00:00 2018 PST | device_1 | 1546175700 | 1800 +(1 row) + +CALL refresh_continuous_aggregate('device_summary', NULL, NULL); +--But is reflected after. +SELECT * FROM device_summary WHERE device_id = 'device_1' and bucket = 'Sun Dec 30 13:00:00 2018 PST'; + bucket | device_id | metric_avg | metric_spread +------------------------------+-----------+------------------+--------------- + Sun Dec 30 13:00:00 2018 PST | device_1 | 1030783800.33333 | 1546176599 +(1 row) + +-- +-- Dealing with timezones +-- +-- You cannot use any functions that depend on the local timezone setting inside a continuous aggregate. +-- For example you cannot cast to the local time. This is because +-- a timezone setting can alter from user-to-user and thus +-- cannot be materialized. +DROP MATERIALIZED VIEW device_summary; +NOTICE: drop cascades to table _timescaledb_internal._hyper_2_6_chunk +CREATE MATERIALIZED VIEW device_summary +WITH (timescaledb.continuous, timescaledb.materialized_only=true) +AS +SELECT + time_bucket('1 hour', observation_time) as bucket, + min(observation_time::timestamp) as min_time, --note the cast to localtime + device_id, + avg(metric) as metric_avg, + max(metric)-min(metric) as metric_spread +FROM + device_readings +GROUP BY bucket, device_id WITH NO DATA; +ERROR: only immutable functions supported in continuous aggregate view +--note the error. +-- You have two options: +-- Option 1: be explicit in your timezone: +DROP MATERIALIZED VIEW device_summary; +ERROR: materialized view "device_summary" does not exist +CREATE MATERIALIZED VIEW device_summary +WITH (timescaledb.continuous, timescaledb.materialized_only=true) +AS +SELECT + time_bucket('1 hour', observation_time) as bucket, + min(observation_time AT TIME ZONE 'EST') as min_time, --note the explict timezone + device_id, + avg(metric) as metric_avg, + max(metric)-min(metric) as metric_spread +FROM + device_readings +GROUP BY bucket, device_id WITH NO DATA; +DROP MATERIALIZED VIEW device_summary; +-- Option 2: Keep things as TIMESTAMPTZ in the view and convert to local time when +-- querying from the view +DROP MATERIALIZED VIEW device_summary; +ERROR: materialized view "device_summary" does not exist +CREATE MATERIALIZED VIEW device_summary +WITH (timescaledb.continuous, timescaledb.materialized_only=true) +AS +SELECT + time_bucket('1 hour', observation_time) as bucket, + min(observation_time) as min_time, --this is a TIMESTAMPTZ + device_id, + avg(metric) as metric_avg, + max(metric)-min(metric) as metric_spread +FROM + device_readings +GROUP BY bucket, device_id WITH DATA; +NOTICE: refreshing continuous aggregate "device_summary" +SELECT min(min_time)::timestamp FROM device_summary; + min +-------------------------- + Sat Dec 01 00:00:00 2018 +(1 row) + +-- +-- test just in time aggregate / materialization only view +-- +-- hardcoding now to 50 will lead to 30 watermark +CREATE OR REPLACE FUNCTION device_readings_int_now() + RETURNS INT LANGUAGE SQL STABLE AS +$BODY$ + SELECT 50; +$BODY$; +CREATE TABLE device_readings_int(time int, value float); +SELECT create_hypertable('device_readings_int','time',chunk_time_interval:=10); +NOTICE: adding not-null constraint to column "time" + create_hypertable +---------------------------------- + (5,public,device_readings_int,t) +(1 row) + +SELECT set_integer_now_func('device_readings_int','device_readings_int_now'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE MATERIALIZED VIEW device_readings_mat_only + WITH (timescaledb.continuous, timescaledb.materialized_only=true) +AS + SELECT time_bucket(10,time), avg(value) FROM device_readings_int GROUP BY 1 WITH NO DATA; +CREATE MATERIALIZED VIEW device_readings_jit + WITH (timescaledb.continuous, timescaledb.materialized_only=false) +AS + SELECT time_bucket(10,time), avg(value) FROM device_readings_int GROUP BY 1 WITH NO DATA; +INSERT INTO device_readings_int SELECT i, i*10 FROM generate_series(10,40,10) AS g(i); +-- materialization only should have 0 rows +SELECT * FROM device_readings_mat_only ORDER BY time_bucket; + time_bucket | avg +-------------+----- +(0 rows) + +-- jit aggregate should have 4 rows +SELECT * FROM device_readings_jit ORDER BY time_bucket; + time_bucket | avg +-------------+----- + 10 | 100 + 20 | 200 + 30 | 300 + 40 | 400 +(4 rows) + +-- simulate a refresh policy with lag, i.e., one that doesn't refresh +-- up to the latest data. Max value is 40. +CALL refresh_continuous_aggregate('device_readings_mat_only', NULL, 30); +CALL refresh_continuous_aggregate('device_readings_jit', NULL, 30); +-- materialization only should have 2 rows +SELECT * FROM device_readings_mat_only ORDER BY time_bucket; + time_bucket | avg +-------------+----- + 10 | 100 + 20 | 200 +(2 rows) + +-- jit aggregate should have 4 rows +SELECT * FROM device_readings_jit ORDER BY time_bucket; + time_bucket | avg +-------------+----- + 10 | 100 + 20 | 200 + 30 | 300 + 40 | 400 +(4 rows) + +-- add 2 more rows +INSERT INTO device_readings_int SELECT i, i*10 FROM generate_series(50,60,10) AS g(i); +-- materialization only should have 2 rows +SELECT * FROM device_readings_mat_only ORDER BY time_bucket; + time_bucket | avg +-------------+----- + 10 | 100 + 20 | 200 +(2 rows) + +-- jit aggregate should have 6 rows +SELECT * FROM device_readings_jit ORDER BY time_bucket; + time_bucket | avg +-------------+----- + 10 | 100 + 20 | 200 + 30 | 300 + 40 | 400 + 50 | 500 + 60 | 600 +(6 rows) + +-- hardcoding now to 100 will lead to 80 watermark +CREATE OR REPLACE FUNCTION device_readings_int_now() + RETURNS INT LANGUAGE SQL STABLE AS +$BODY$ + SELECT 100; +$BODY$; +-- refresh should materialize all now +CALL refresh_continuous_aggregate('device_readings_mat_only', NULL, NULL); +CALL refresh_continuous_aggregate('device_readings_jit', NULL, NULL); +-- materialization only should have 6 rows +SELECT * FROM device_readings_mat_only ORDER BY time_bucket; + time_bucket | avg +-------------+----- + 10 | 100 + 20 | 200 + 30 | 300 + 40 | 400 + 50 | 500 + 60 | 600 +(6 rows) + +-- jit aggregate should have 6 rows +SELECT * FROM device_readings_jit ORDER BY time_bucket; + time_bucket | avg +-------------+----- + 10 | 100 + 20 | 200 + 30 | 300 + 40 | 400 + 50 | 500 + 60 | 600 +(6 rows) + +-- START OF BASIC USAGE TESTS -- +-- Check that continuous aggregate and materialized table is dropped +-- together. +CREATE TABLE whatever(time TIMESTAMPTZ NOT NULL, metric INTEGER); +SELECT * FROM create_hypertable('whatever', 'time'); + hypertable_id | schema_name | table_name | created +---------------+-------------+------------+--------- + 8 | public | whatever | t +(1 row) + +CREATE MATERIALIZED VIEW whatever_summary WITH (timescaledb.continuous) AS +SELECT time_bucket('1 hour', time) AS bucket, avg(metric) + FROM whatever GROUP BY bucket WITH NO DATA; +SELECT (SELECT format('%1$I.%2$I', schema_name, table_name)::regclass::oid + FROM _timescaledb_catalog.hypertable + WHERE id = raw_hypertable_id) AS raw_table + , (SELECT format('%1$I.%2$I', schema_name, table_name)::regclass::oid + FROM _timescaledb_catalog.hypertable + WHERE id = mat_hypertable_id) AS mat_table +FROM _timescaledb_catalog.continuous_agg +WHERE user_view_name = 'whatever_summary' \gset +SELECT relname FROM pg_class WHERE oid = :mat_table; + relname +---------------------------- + _materialized_hypertable_9 +(1 row) + +---------------------------------------------------------------- +-- Should generate an error since the cagg is dependent on the table. +DROP TABLE whatever; +ERROR: cannot drop table whatever because other objects depend on it +---------------------------------------------------------------- +-- Checking that a cagg cannot be dropped if there is a dependent +-- object on it. +CREATE VIEW whatever_summary_dependency AS SELECT * FROM whatever_summary; +-- Should generate an error +DROP MATERIALIZED VIEW whatever_summary; +ERROR: cannot drop view whatever_summary because other objects depend on it +-- Dropping the dependent view so that we can do a proper drop below. +DROP VIEW whatever_summary_dependency; +---------------------------------------------------------------- +-- Dropping the cagg should also remove the materialized table +DROP MATERIALIZED VIEW whatever_summary; +SELECT relname FROM pg_class WHERE oid = :mat_table; + relname +--------- +(0 rows) + +---------------------------------------------------------------- +-- Cleanup +DROP TABLE whatever; +-- Check that continuous_agg_invalidation_trigger() handles no arguments properly +SELECT _timescaledb_functions.continuous_agg_invalidation_trigger(); +ERROR: must supply hypertable id +-- END OF BASIC USAGE TESTS -- +CREATE TABLE metrics(time timestamptz, device TEXT, value float); +SELECT table_name FROM create_hypertable('metrics','time'); +NOTICE: adding not-null constraint to column "time" + table_name +------------ + metrics +(1 row) + +INSERT INTO metrics SELECT generate_series('1999-12-20'::timestamptz,'2000-02-01'::timestamptz,'12 day'::interval), 'dev1', 0.25; +SELECT current_setting('timezone'); + current_setting +----------------- + PST8PDT +(1 row) + +-- should be blocked because non-immutable expression +\set ON_ERROR_STOP 0 +CREATE MATERIALIZED VIEW cagg1 WITH (timescaledb.continuous,timescaledb.materialized_only=true) AS SELECT time_bucket('1 day', time, current_setting('timezone')) FROM metrics GROUP BY 1; +ERROR: only immutable expressions allowed in time bucket function +\set ON_ERROR_STOP 1 +CREATE MATERIALIZED VIEW cagg1 WITH (timescaledb.continuous,timescaledb.materialized_only=true) AS SELECT time_bucket('1 day', time, 'PST8PDT') FROM metrics GROUP BY 1; +NOTICE: refreshing continuous aggregate "cagg1" +SELECT * FROM cagg1; + time_bucket +------------------------------ + Mon Dec 20 00:00:00 1999 PST + Sat Jan 01 00:00:00 2000 PST + Thu Jan 13 00:00:00 2000 PST + Tue Jan 25 00:00:00 2000 PST +(4 rows) + +CREATE MATERIALIZED VIEW cagg2 WITH (timescaledb.continuous,timescaledb.materialized_only=true) AS SELECT time_bucket('1 month', time, 'PST8PDT') FROM metrics GROUP BY 1; +NOTICE: refreshing continuous aggregate "cagg2" +SELECT * FROM cagg2; + time_bucket +------------------------------ + Wed Dec 01 00:00:00 1999 PST + Sat Jan 01 00:00:00 2000 PST +(2 rows) + +-- custom origin +CREATE MATERIALIZED VIEW cagg3 WITH (timescaledb.continuous,timescaledb.materialized_only=true) AS SELECT time_bucket('1 month', time, 'PST8PDT', '2000-01-01'::timestamptz) FROM metrics GROUP BY 1; +NOTICE: refreshing continuous aggregate "cagg3" +SELECT * FROM cagg3; + time_bucket +------------------------------ + Wed Dec 01 00:00:00 1999 PST + Sat Jan 01 00:00:00 2000 PST +(2 rows) + +-- offset not supported atm +\set ON_ERROR_STOP 0 +CREATE MATERIALIZED VIEW cagg4 WITH (timescaledb.continuous,timescaledb.materialized_only=true) AS SELECT time_bucket('1 month', time, 'PST8PDT', "offset":= INTERVAL '15 day') FROM metrics GROUP BY 1; +ERROR: continuous aggregate view must include a valid time bucket function +\set ON_ERROR_STOP 1 diff --git a/tsl/test/expected/cagg_usage-15.out b/tsl/test/expected/cagg_usage-15.out new file mode 100644 index 00000000000..4d67ff9a2ef --- /dev/null +++ b/tsl/test/expected/cagg_usage-15.out @@ -0,0 +1,470 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- TEST SETUP -- +\set ON_ERROR_STOP 0 +SET client_min_messages TO NOTICE; +SET work_mem TO '64MB'; +-- START OF USAGE TEST -- +--First create your hypertable +CREATE TABLE device_readings ( + observation_time TIMESTAMPTZ NOT NULL, + device_id TEXT NOT NULL, + metric DOUBLE PRECISION NOT NULL, + PRIMARY KEY(observation_time, device_id) +); +SELECT table_name FROM create_hypertable('device_readings', 'observation_time'); + table_name +----------------- + device_readings +(1 row) + +--Next, create your continuous aggregate view +CREATE MATERIALIZED VIEW device_summary +WITH (timescaledb.continuous, timescaledb.materialized_only=true) --This flag is what makes the view continuous +AS +SELECT + time_bucket('1 hour', observation_time) as bucket, --time_bucket is required + device_id, + avg(metric) as metric_avg, --We can use regular aggregates + max(metric)-min(metric) as metric_spread --We can also use expressions on aggregates and constants +FROM + device_readings +GROUP BY bucket, device_id WITH NO DATA; --We have to group by the bucket column, but can also add other group-by columns +SELECT add_continuous_aggregate_policy('device_summary', NULL, '2 h'::interval, '2 h'::interval); + add_continuous_aggregate_policy +--------------------------------- + 1000 +(1 row) + +--Next, insert some data into the raw hypertable +INSERT INTO device_readings +SELECT ts, 'device_1', (EXTRACT(EPOCH FROM ts)) from generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '30 minutes') ts; +INSERT INTO device_readings +SELECT ts, 'device_2', (EXTRACT(EPOCH FROM ts)) from generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '30 minutes') ts; +--Initially, it will be empty. +SELECT * FROM device_summary; + bucket | device_id | metric_avg | metric_spread +--------+-----------+------------+--------------- +(0 rows) + +-- Simulate a policy that refreshes with lag, i.e., it doesn't refresh +-- the entire data set. In this case up to the given date. +CALL refresh_continuous_aggregate('device_summary', NULL, '2018-12-30 22:00'); +--Now you can run selects over your view as normal +SELECT * FROM device_summary WHERE metric_spread = 1800 ORDER BY bucket DESC, device_id LIMIT 10; + bucket | device_id | metric_avg | metric_spread +------------------------------+-----------+------------+--------------- + Sun Dec 30 21:00:00 2018 PST | device_1 | 1546204500 | 1800 + Sun Dec 30 21:00:00 2018 PST | device_2 | 1546204500 | 1800 + Sun Dec 30 20:00:00 2018 PST | device_1 | 1546200900 | 1800 + Sun Dec 30 20:00:00 2018 PST | device_2 | 1546200900 | 1800 + Sun Dec 30 19:00:00 2018 PST | device_1 | 1546197300 | 1800 + Sun Dec 30 19:00:00 2018 PST | device_2 | 1546197300 | 1800 + Sun Dec 30 18:00:00 2018 PST | device_1 | 1546193700 | 1800 + Sun Dec 30 18:00:00 2018 PST | device_2 | 1546193700 | 1800 + Sun Dec 30 17:00:00 2018 PST | device_1 | 1546190100 | 1800 + Sun Dec 30 17:00:00 2018 PST | device_2 | 1546190100 | 1800 +(10 rows) + +--You can view informaton about your continuous aggregates. The meaning of these fields will be explained further down. +\x +SELECT * FROM timescaledb_information.continuous_aggregates; +-[ RECORD 1 ]---------------------+------------------------------------------------------------------------------------------------------------- +hypertable_schema | public +hypertable_name | device_readings +view_schema | public +view_name | device_summary +view_owner | default_perm_user +materialized_only | t +compression_enabled | f +materialization_hypertable_schema | _timescaledb_internal +materialization_hypertable_name | _materialized_hypertable_2 +view_definition | SELECT time_bucket('@ 1 hour'::interval, device_readings.observation_time) AS bucket, + + | device_readings.device_id, + + | avg(device_readings.metric) AS metric_avg, + + | (max(device_readings.metric) - min(device_readings.metric)) AS metric_spread + + | FROM device_readings + + | GROUP BY (time_bucket('@ 1 hour'::interval, device_readings.observation_time)), device_readings.device_id; +finalized | t + +\x +-- Refresh interval +-- +-- The refresh interval determines how often the background worker +-- for automatic materialization will run. The default is (2 x bucket_width) +SELECT schedule_interval FROM _timescaledb_config.bgw_job WHERE id = 1000; + schedule_interval +------------------- + @ 2 hours +(1 row) + +-- You can change this setting with ALTER VIEW (equivalently, specify in WITH clause of CREATE VIEW) +SELECT alter_job(1000, schedule_interval := '1h'); + alter_job +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (1000,"@ 1 hour","@ 0",-1,"@ 2 hours",t,"{""end_offset"": ""@ 2 hours"", ""start_offset"": null, ""mat_hypertable_id"": 2}",-infinity,_timescaledb_functions.policy_refresh_continuous_aggregate_check,f,,) +(1 row) + +SELECT schedule_interval FROM _timescaledb_config.bgw_job WHERE id = 1000; + schedule_interval +------------------- + @ 1 hour +(1 row) + +-- +-- Refresh with lag +-- +-- It is possible to use a policy or manual refresh with a lag, which +-- means the materialization will not contain the most up-to-date +-- data. +SELECT max(observation_time) FROM device_readings; + max +------------------------------ + Mon Dec 31 00:00:00 2018 PST +(1 row) + +SELECT max(bucket) FROM device_summary; + max +------------------------------ + Sun Dec 30 21:00:00 2018 PST +(1 row) + +CALL refresh_continuous_aggregate('device_summary', NULL, '2018-12-31 01:00'); +SELECT max(observation_time) FROM device_readings; + max +------------------------------ + Mon Dec 31 00:00:00 2018 PST +(1 row) + +SELECT max(bucket) FROM device_summary; + max +------------------------------ + Mon Dec 31 00:00:00 2018 PST +(1 row) + +-- +-- Invalidations +-- +--Changes to the raw table, for values that have already been materialized are propagated asynchronously, after the materialization next runs. +--Before update: +SELECT * FROM device_summary WHERE device_id = 'device_1' and bucket = 'Sun Dec 30 13:00:00 2018 PST'; + bucket | device_id | metric_avg | metric_spread +------------------------------+-----------+------------+--------------- + Sun Dec 30 13:00:00 2018 PST | device_1 | 1546175700 | 1800 +(1 row) + +INSERT INTO device_readings VALUES ('Sun Dec 30 13:01:00 2018 PST', 'device_1', 1.0); +--Change not reflected before materializer runs. +SELECT * FROM device_summary WHERE device_id = 'device_1' and bucket = 'Sun Dec 30 13:00:00 2018 PST'; + bucket | device_id | metric_avg | metric_spread +------------------------------+-----------+------------+--------------- + Sun Dec 30 13:00:00 2018 PST | device_1 | 1546175700 | 1800 +(1 row) + +CALL refresh_continuous_aggregate('device_summary', NULL, NULL); +--But is reflected after. +SELECT * FROM device_summary WHERE device_id = 'device_1' and bucket = 'Sun Dec 30 13:00:00 2018 PST'; + bucket | device_id | metric_avg | metric_spread +------------------------------+-----------+------------------+--------------- + Sun Dec 30 13:00:00 2018 PST | device_1 | 1030783800.33333 | 1546176599 +(1 row) + +-- +-- Dealing with timezones +-- +-- You cannot use any functions that depend on the local timezone setting inside a continuous aggregate. +-- For example you cannot cast to the local time. This is because +-- a timezone setting can alter from user-to-user and thus +-- cannot be materialized. +DROP MATERIALIZED VIEW device_summary; +NOTICE: drop cascades to table _timescaledb_internal._hyper_2_6_chunk +CREATE MATERIALIZED VIEW device_summary +WITH (timescaledb.continuous, timescaledb.materialized_only=true) +AS +SELECT + time_bucket('1 hour', observation_time) as bucket, + min(observation_time::timestamp) as min_time, --note the cast to localtime + device_id, + avg(metric) as metric_avg, + max(metric)-min(metric) as metric_spread +FROM + device_readings +GROUP BY bucket, device_id WITH NO DATA; +ERROR: only immutable functions supported in continuous aggregate view +--note the error. +-- You have two options: +-- Option 1: be explicit in your timezone: +DROP MATERIALIZED VIEW device_summary; +ERROR: materialized view "device_summary" does not exist +CREATE MATERIALIZED VIEW device_summary +WITH (timescaledb.continuous, timescaledb.materialized_only=true) +AS +SELECT + time_bucket('1 hour', observation_time) as bucket, + min(observation_time AT TIME ZONE 'EST') as min_time, --note the explict timezone + device_id, + avg(metric) as metric_avg, + max(metric)-min(metric) as metric_spread +FROM + device_readings +GROUP BY bucket, device_id WITH NO DATA; +DROP MATERIALIZED VIEW device_summary; +-- Option 2: Keep things as TIMESTAMPTZ in the view and convert to local time when +-- querying from the view +DROP MATERIALIZED VIEW device_summary; +ERROR: materialized view "device_summary" does not exist +CREATE MATERIALIZED VIEW device_summary +WITH (timescaledb.continuous, timescaledb.materialized_only=true) +AS +SELECT + time_bucket('1 hour', observation_time) as bucket, + min(observation_time) as min_time, --this is a TIMESTAMPTZ + device_id, + avg(metric) as metric_avg, + max(metric)-min(metric) as metric_spread +FROM + device_readings +GROUP BY bucket, device_id WITH DATA; +NOTICE: refreshing continuous aggregate "device_summary" +SELECT min(min_time)::timestamp FROM device_summary; + min +-------------------------- + Sat Dec 01 00:00:00 2018 +(1 row) + +-- +-- test just in time aggregate / materialization only view +-- +-- hardcoding now to 50 will lead to 30 watermark +CREATE OR REPLACE FUNCTION device_readings_int_now() + RETURNS INT LANGUAGE SQL STABLE AS +$BODY$ + SELECT 50; +$BODY$; +CREATE TABLE device_readings_int(time int, value float); +SELECT create_hypertable('device_readings_int','time',chunk_time_interval:=10); +NOTICE: adding not-null constraint to column "time" + create_hypertable +---------------------------------- + (5,public,device_readings_int,t) +(1 row) + +SELECT set_integer_now_func('device_readings_int','device_readings_int_now'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE MATERIALIZED VIEW device_readings_mat_only + WITH (timescaledb.continuous, timescaledb.materialized_only=true) +AS + SELECT time_bucket(10,time), avg(value) FROM device_readings_int GROUP BY 1 WITH NO DATA; +CREATE MATERIALIZED VIEW device_readings_jit + WITH (timescaledb.continuous, timescaledb.materialized_only=false) +AS + SELECT time_bucket(10,time), avg(value) FROM device_readings_int GROUP BY 1 WITH NO DATA; +INSERT INTO device_readings_int SELECT i, i*10 FROM generate_series(10,40,10) AS g(i); +-- materialization only should have 0 rows +SELECT * FROM device_readings_mat_only ORDER BY time_bucket; + time_bucket | avg +-------------+----- +(0 rows) + +-- jit aggregate should have 4 rows +SELECT * FROM device_readings_jit ORDER BY time_bucket; + time_bucket | avg +-------------+----- + 10 | 100 + 20 | 200 + 30 | 300 + 40 | 400 +(4 rows) + +-- simulate a refresh policy with lag, i.e., one that doesn't refresh +-- up to the latest data. Max value is 40. +CALL refresh_continuous_aggregate('device_readings_mat_only', NULL, 30); +CALL refresh_continuous_aggregate('device_readings_jit', NULL, 30); +-- materialization only should have 2 rows +SELECT * FROM device_readings_mat_only ORDER BY time_bucket; + time_bucket | avg +-------------+----- + 10 | 100 + 20 | 200 +(2 rows) + +-- jit aggregate should have 4 rows +SELECT * FROM device_readings_jit ORDER BY time_bucket; + time_bucket | avg +-------------+----- + 10 | 100 + 20 | 200 + 30 | 300 + 40 | 400 +(4 rows) + +-- add 2 more rows +INSERT INTO device_readings_int SELECT i, i*10 FROM generate_series(50,60,10) AS g(i); +-- materialization only should have 2 rows +SELECT * FROM device_readings_mat_only ORDER BY time_bucket; + time_bucket | avg +-------------+----- + 10 | 100 + 20 | 200 +(2 rows) + +-- jit aggregate should have 6 rows +SELECT * FROM device_readings_jit ORDER BY time_bucket; + time_bucket | avg +-------------+----- + 10 | 100 + 20 | 200 + 30 | 300 + 40 | 400 + 50 | 500 + 60 | 600 +(6 rows) + +-- hardcoding now to 100 will lead to 80 watermark +CREATE OR REPLACE FUNCTION device_readings_int_now() + RETURNS INT LANGUAGE SQL STABLE AS +$BODY$ + SELECT 100; +$BODY$; +-- refresh should materialize all now +CALL refresh_continuous_aggregate('device_readings_mat_only', NULL, NULL); +CALL refresh_continuous_aggregate('device_readings_jit', NULL, NULL); +-- materialization only should have 6 rows +SELECT * FROM device_readings_mat_only ORDER BY time_bucket; + time_bucket | avg +-------------+----- + 10 | 100 + 20 | 200 + 30 | 300 + 40 | 400 + 50 | 500 + 60 | 600 +(6 rows) + +-- jit aggregate should have 6 rows +SELECT * FROM device_readings_jit ORDER BY time_bucket; + time_bucket | avg +-------------+----- + 10 | 100 + 20 | 200 + 30 | 300 + 40 | 400 + 50 | 500 + 60 | 600 +(6 rows) + +-- START OF BASIC USAGE TESTS -- +-- Check that continuous aggregate and materialized table is dropped +-- together. +CREATE TABLE whatever(time TIMESTAMPTZ NOT NULL, metric INTEGER); +SELECT * FROM create_hypertable('whatever', 'time'); + hypertable_id | schema_name | table_name | created +---------------+-------------+------------+--------- + 8 | public | whatever | t +(1 row) + +CREATE MATERIALIZED VIEW whatever_summary WITH (timescaledb.continuous) AS +SELECT time_bucket('1 hour', time) AS bucket, avg(metric) + FROM whatever GROUP BY bucket WITH NO DATA; +SELECT (SELECT format('%1$I.%2$I', schema_name, table_name)::regclass::oid + FROM _timescaledb_catalog.hypertable + WHERE id = raw_hypertable_id) AS raw_table + , (SELECT format('%1$I.%2$I', schema_name, table_name)::regclass::oid + FROM _timescaledb_catalog.hypertable + WHERE id = mat_hypertable_id) AS mat_table +FROM _timescaledb_catalog.continuous_agg +WHERE user_view_name = 'whatever_summary' \gset +SELECT relname FROM pg_class WHERE oid = :mat_table; + relname +---------------------------- + _materialized_hypertable_9 +(1 row) + +---------------------------------------------------------------- +-- Should generate an error since the cagg is dependent on the table. +DROP TABLE whatever; +ERROR: cannot drop table whatever because other objects depend on it +---------------------------------------------------------------- +-- Checking that a cagg cannot be dropped if there is a dependent +-- object on it. +CREATE VIEW whatever_summary_dependency AS SELECT * FROM whatever_summary; +-- Should generate an error +DROP MATERIALIZED VIEW whatever_summary; +ERROR: cannot drop view whatever_summary because other objects depend on it +-- Dropping the dependent view so that we can do a proper drop below. +DROP VIEW whatever_summary_dependency; +---------------------------------------------------------------- +-- Dropping the cagg should also remove the materialized table +DROP MATERIALIZED VIEW whatever_summary; +SELECT relname FROM pg_class WHERE oid = :mat_table; + relname +--------- +(0 rows) + +---------------------------------------------------------------- +-- Cleanup +DROP TABLE whatever; +-- Check that continuous_agg_invalidation_trigger() handles no arguments properly +SELECT _timescaledb_functions.continuous_agg_invalidation_trigger(); +ERROR: must supply hypertable id +-- END OF BASIC USAGE TESTS -- +CREATE TABLE metrics(time timestamptz, device TEXT, value float); +SELECT table_name FROM create_hypertable('metrics','time'); +NOTICE: adding not-null constraint to column "time" + table_name +------------ + metrics +(1 row) + +INSERT INTO metrics SELECT generate_series('1999-12-20'::timestamptz,'2000-02-01'::timestamptz,'12 day'::interval), 'dev1', 0.25; +SELECT current_setting('timezone'); + current_setting +----------------- + PST8PDT +(1 row) + +-- should be blocked because non-immutable expression +\set ON_ERROR_STOP 0 +CREATE MATERIALIZED VIEW cagg1 WITH (timescaledb.continuous,timescaledb.materialized_only=true) AS SELECT time_bucket('1 day', time, current_setting('timezone')) FROM metrics GROUP BY 1; +ERROR: only immutable expressions allowed in time bucket function +\set ON_ERROR_STOP 1 +CREATE MATERIALIZED VIEW cagg1 WITH (timescaledb.continuous,timescaledb.materialized_only=true) AS SELECT time_bucket('1 day', time, 'PST8PDT') FROM metrics GROUP BY 1; +NOTICE: refreshing continuous aggregate "cagg1" +SELECT * FROM cagg1; + time_bucket +------------------------------ + Mon Dec 20 00:00:00 1999 PST + Sat Jan 01 00:00:00 2000 PST + Thu Jan 13 00:00:00 2000 PST + Tue Jan 25 00:00:00 2000 PST +(4 rows) + +CREATE MATERIALIZED VIEW cagg2 WITH (timescaledb.continuous,timescaledb.materialized_only=true) AS SELECT time_bucket('1 month', time, 'PST8PDT') FROM metrics GROUP BY 1; +NOTICE: refreshing continuous aggregate "cagg2" +SELECT * FROM cagg2; + time_bucket +------------------------------ + Wed Dec 01 00:00:00 1999 PST + Sat Jan 01 00:00:00 2000 PST +(2 rows) + +-- custom origin +CREATE MATERIALIZED VIEW cagg3 WITH (timescaledb.continuous,timescaledb.materialized_only=true) AS SELECT time_bucket('1 month', time, 'PST8PDT', '2000-01-01'::timestamptz) FROM metrics GROUP BY 1; +NOTICE: refreshing continuous aggregate "cagg3" +SELECT * FROM cagg3; + time_bucket +------------------------------ + Wed Dec 01 00:00:00 1999 PST + Sat Jan 01 00:00:00 2000 PST +(2 rows) + +-- offset not supported atm +\set ON_ERROR_STOP 0 +CREATE MATERIALIZED VIEW cagg4 WITH (timescaledb.continuous,timescaledb.materialized_only=true) AS SELECT time_bucket('1 month', time, 'PST8PDT', "offset":= INTERVAL '15 day') FROM metrics GROUP BY 1; +ERROR: continuous aggregate view must include a valid time bucket function +\set ON_ERROR_STOP 1 diff --git a/tsl/test/expected/cagg_usage-16.out b/tsl/test/expected/cagg_usage-16.out new file mode 100644 index 00000000000..ad626d0bfc3 --- /dev/null +++ b/tsl/test/expected/cagg_usage-16.out @@ -0,0 +1,470 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- TEST SETUP -- +\set ON_ERROR_STOP 0 +SET client_min_messages TO NOTICE; +SET work_mem TO '64MB'; +-- START OF USAGE TEST -- +--First create your hypertable +CREATE TABLE device_readings ( + observation_time TIMESTAMPTZ NOT NULL, + device_id TEXT NOT NULL, + metric DOUBLE PRECISION NOT NULL, + PRIMARY KEY(observation_time, device_id) +); +SELECT table_name FROM create_hypertable('device_readings', 'observation_time'); + table_name +----------------- + device_readings +(1 row) + +--Next, create your continuous aggregate view +CREATE MATERIALIZED VIEW device_summary +WITH (timescaledb.continuous, timescaledb.materialized_only=true) --This flag is what makes the view continuous +AS +SELECT + time_bucket('1 hour', observation_time) as bucket, --time_bucket is required + device_id, + avg(metric) as metric_avg, --We can use regular aggregates + max(metric)-min(metric) as metric_spread --We can also use expressions on aggregates and constants +FROM + device_readings +GROUP BY bucket, device_id WITH NO DATA; --We have to group by the bucket column, but can also add other group-by columns +SELECT add_continuous_aggregate_policy('device_summary', NULL, '2 h'::interval, '2 h'::interval); + add_continuous_aggregate_policy +--------------------------------- + 1000 +(1 row) + +--Next, insert some data into the raw hypertable +INSERT INTO device_readings +SELECT ts, 'device_1', (EXTRACT(EPOCH FROM ts)) from generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '30 minutes') ts; +INSERT INTO device_readings +SELECT ts, 'device_2', (EXTRACT(EPOCH FROM ts)) from generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '30 minutes') ts; +--Initially, it will be empty. +SELECT * FROM device_summary; + bucket | device_id | metric_avg | metric_spread +--------+-----------+------------+--------------- +(0 rows) + +-- Simulate a policy that refreshes with lag, i.e., it doesn't refresh +-- the entire data set. In this case up to the given date. +CALL refresh_continuous_aggregate('device_summary', NULL, '2018-12-30 22:00'); +--Now you can run selects over your view as normal +SELECT * FROM device_summary WHERE metric_spread = 1800 ORDER BY bucket DESC, device_id LIMIT 10; + bucket | device_id | metric_avg | metric_spread +------------------------------+-----------+------------+--------------- + Sun Dec 30 21:00:00 2018 PST | device_1 | 1546204500 | 1800 + Sun Dec 30 21:00:00 2018 PST | device_2 | 1546204500 | 1800 + Sun Dec 30 20:00:00 2018 PST | device_1 | 1546200900 | 1800 + Sun Dec 30 20:00:00 2018 PST | device_2 | 1546200900 | 1800 + Sun Dec 30 19:00:00 2018 PST | device_1 | 1546197300 | 1800 + Sun Dec 30 19:00:00 2018 PST | device_2 | 1546197300 | 1800 + Sun Dec 30 18:00:00 2018 PST | device_1 | 1546193700 | 1800 + Sun Dec 30 18:00:00 2018 PST | device_2 | 1546193700 | 1800 + Sun Dec 30 17:00:00 2018 PST | device_1 | 1546190100 | 1800 + Sun Dec 30 17:00:00 2018 PST | device_2 | 1546190100 | 1800 +(10 rows) + +--You can view informaton about your continuous aggregates. The meaning of these fields will be explained further down. +\x +SELECT * FROM timescaledb_information.continuous_aggregates; +-[ RECORD 1 ]---------------------+----------------------------------------------------------------------------- +hypertable_schema | public +hypertable_name | device_readings +view_schema | public +view_name | device_summary +view_owner | default_perm_user +materialized_only | t +compression_enabled | f +materialization_hypertable_schema | _timescaledb_internal +materialization_hypertable_name | _materialized_hypertable_2 +view_definition | SELECT time_bucket('@ 1 hour'::interval, observation_time) AS bucket, + + | device_id, + + | avg(metric) AS metric_avg, + + | (max(metric) - min(metric)) AS metric_spread + + | FROM device_readings + + | GROUP BY (time_bucket('@ 1 hour'::interval, observation_time)), device_id; +finalized | t + +\x +-- Refresh interval +-- +-- The refresh interval determines how often the background worker +-- for automatic materialization will run. The default is (2 x bucket_width) +SELECT schedule_interval FROM _timescaledb_config.bgw_job WHERE id = 1000; + schedule_interval +------------------- + @ 2 hours +(1 row) + +-- You can change this setting with ALTER VIEW (equivalently, specify in WITH clause of CREATE VIEW) +SELECT alter_job(1000, schedule_interval := '1h'); + alter_job +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (1000,"@ 1 hour","@ 0",-1,"@ 2 hours",t,"{""end_offset"": ""@ 2 hours"", ""start_offset"": null, ""mat_hypertable_id"": 2}",-infinity,_timescaledb_functions.policy_refresh_continuous_aggregate_check,f,,) +(1 row) + +SELECT schedule_interval FROM _timescaledb_config.bgw_job WHERE id = 1000; + schedule_interval +------------------- + @ 1 hour +(1 row) + +-- +-- Refresh with lag +-- +-- It is possible to use a policy or manual refresh with a lag, which +-- means the materialization will not contain the most up-to-date +-- data. +SELECT max(observation_time) FROM device_readings; + max +------------------------------ + Mon Dec 31 00:00:00 2018 PST +(1 row) + +SELECT max(bucket) FROM device_summary; + max +------------------------------ + Sun Dec 30 21:00:00 2018 PST +(1 row) + +CALL refresh_continuous_aggregate('device_summary', NULL, '2018-12-31 01:00'); +SELECT max(observation_time) FROM device_readings; + max +------------------------------ + Mon Dec 31 00:00:00 2018 PST +(1 row) + +SELECT max(bucket) FROM device_summary; + max +------------------------------ + Mon Dec 31 00:00:00 2018 PST +(1 row) + +-- +-- Invalidations +-- +--Changes to the raw table, for values that have already been materialized are propagated asynchronously, after the materialization next runs. +--Before update: +SELECT * FROM device_summary WHERE device_id = 'device_1' and bucket = 'Sun Dec 30 13:00:00 2018 PST'; + bucket | device_id | metric_avg | metric_spread +------------------------------+-----------+------------+--------------- + Sun Dec 30 13:00:00 2018 PST | device_1 | 1546175700 | 1800 +(1 row) + +INSERT INTO device_readings VALUES ('Sun Dec 30 13:01:00 2018 PST', 'device_1', 1.0); +--Change not reflected before materializer runs. +SELECT * FROM device_summary WHERE device_id = 'device_1' and bucket = 'Sun Dec 30 13:00:00 2018 PST'; + bucket | device_id | metric_avg | metric_spread +------------------------------+-----------+------------+--------------- + Sun Dec 30 13:00:00 2018 PST | device_1 | 1546175700 | 1800 +(1 row) + +CALL refresh_continuous_aggregate('device_summary', NULL, NULL); +--But is reflected after. +SELECT * FROM device_summary WHERE device_id = 'device_1' and bucket = 'Sun Dec 30 13:00:00 2018 PST'; + bucket | device_id | metric_avg | metric_spread +------------------------------+-----------+------------------+--------------- + Sun Dec 30 13:00:00 2018 PST | device_1 | 1030783800.33333 | 1546176599 +(1 row) + +-- +-- Dealing with timezones +-- +-- You cannot use any functions that depend on the local timezone setting inside a continuous aggregate. +-- For example you cannot cast to the local time. This is because +-- a timezone setting can alter from user-to-user and thus +-- cannot be materialized. +DROP MATERIALIZED VIEW device_summary; +NOTICE: drop cascades to table _timescaledb_internal._hyper_2_6_chunk +CREATE MATERIALIZED VIEW device_summary +WITH (timescaledb.continuous, timescaledb.materialized_only=true) +AS +SELECT + time_bucket('1 hour', observation_time) as bucket, + min(observation_time::timestamp) as min_time, --note the cast to localtime + device_id, + avg(metric) as metric_avg, + max(metric)-min(metric) as metric_spread +FROM + device_readings +GROUP BY bucket, device_id WITH NO DATA; +ERROR: only immutable functions supported in continuous aggregate view +--note the error. +-- You have two options: +-- Option 1: be explicit in your timezone: +DROP MATERIALIZED VIEW device_summary; +ERROR: materialized view "device_summary" does not exist +CREATE MATERIALIZED VIEW device_summary +WITH (timescaledb.continuous, timescaledb.materialized_only=true) +AS +SELECT + time_bucket('1 hour', observation_time) as bucket, + min(observation_time AT TIME ZONE 'EST') as min_time, --note the explict timezone + device_id, + avg(metric) as metric_avg, + max(metric)-min(metric) as metric_spread +FROM + device_readings +GROUP BY bucket, device_id WITH NO DATA; +DROP MATERIALIZED VIEW device_summary; +-- Option 2: Keep things as TIMESTAMPTZ in the view and convert to local time when +-- querying from the view +DROP MATERIALIZED VIEW device_summary; +ERROR: materialized view "device_summary" does not exist +CREATE MATERIALIZED VIEW device_summary +WITH (timescaledb.continuous, timescaledb.materialized_only=true) +AS +SELECT + time_bucket('1 hour', observation_time) as bucket, + min(observation_time) as min_time, --this is a TIMESTAMPTZ + device_id, + avg(metric) as metric_avg, + max(metric)-min(metric) as metric_spread +FROM + device_readings +GROUP BY bucket, device_id WITH DATA; +NOTICE: refreshing continuous aggregate "device_summary" +SELECT min(min_time)::timestamp FROM device_summary; + min +-------------------------- + Sat Dec 01 00:00:00 2018 +(1 row) + +-- +-- test just in time aggregate / materialization only view +-- +-- hardcoding now to 50 will lead to 30 watermark +CREATE OR REPLACE FUNCTION device_readings_int_now() + RETURNS INT LANGUAGE SQL STABLE AS +$BODY$ + SELECT 50; +$BODY$; +CREATE TABLE device_readings_int(time int, value float); +SELECT create_hypertable('device_readings_int','time',chunk_time_interval:=10); +NOTICE: adding not-null constraint to column "time" + create_hypertable +---------------------------------- + (5,public,device_readings_int,t) +(1 row) + +SELECT set_integer_now_func('device_readings_int','device_readings_int_now'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE MATERIALIZED VIEW device_readings_mat_only + WITH (timescaledb.continuous, timescaledb.materialized_only=true) +AS + SELECT time_bucket(10,time), avg(value) FROM device_readings_int GROUP BY 1 WITH NO DATA; +CREATE MATERIALIZED VIEW device_readings_jit + WITH (timescaledb.continuous, timescaledb.materialized_only=false) +AS + SELECT time_bucket(10,time), avg(value) FROM device_readings_int GROUP BY 1 WITH NO DATA; +INSERT INTO device_readings_int SELECT i, i*10 FROM generate_series(10,40,10) AS g(i); +-- materialization only should have 0 rows +SELECT * FROM device_readings_mat_only ORDER BY time_bucket; + time_bucket | avg +-------------+----- +(0 rows) + +-- jit aggregate should have 4 rows +SELECT * FROM device_readings_jit ORDER BY time_bucket; + time_bucket | avg +-------------+----- + 10 | 100 + 20 | 200 + 30 | 300 + 40 | 400 +(4 rows) + +-- simulate a refresh policy with lag, i.e., one that doesn't refresh +-- up to the latest data. Max value is 40. +CALL refresh_continuous_aggregate('device_readings_mat_only', NULL, 30); +CALL refresh_continuous_aggregate('device_readings_jit', NULL, 30); +-- materialization only should have 2 rows +SELECT * FROM device_readings_mat_only ORDER BY time_bucket; + time_bucket | avg +-------------+----- + 10 | 100 + 20 | 200 +(2 rows) + +-- jit aggregate should have 4 rows +SELECT * FROM device_readings_jit ORDER BY time_bucket; + time_bucket | avg +-------------+----- + 10 | 100 + 20 | 200 + 30 | 300 + 40 | 400 +(4 rows) + +-- add 2 more rows +INSERT INTO device_readings_int SELECT i, i*10 FROM generate_series(50,60,10) AS g(i); +-- materialization only should have 2 rows +SELECT * FROM device_readings_mat_only ORDER BY time_bucket; + time_bucket | avg +-------------+----- + 10 | 100 + 20 | 200 +(2 rows) + +-- jit aggregate should have 6 rows +SELECT * FROM device_readings_jit ORDER BY time_bucket; + time_bucket | avg +-------------+----- + 10 | 100 + 20 | 200 + 30 | 300 + 40 | 400 + 50 | 500 + 60 | 600 +(6 rows) + +-- hardcoding now to 100 will lead to 80 watermark +CREATE OR REPLACE FUNCTION device_readings_int_now() + RETURNS INT LANGUAGE SQL STABLE AS +$BODY$ + SELECT 100; +$BODY$; +-- refresh should materialize all now +CALL refresh_continuous_aggregate('device_readings_mat_only', NULL, NULL); +CALL refresh_continuous_aggregate('device_readings_jit', NULL, NULL); +-- materialization only should have 6 rows +SELECT * FROM device_readings_mat_only ORDER BY time_bucket; + time_bucket | avg +-------------+----- + 10 | 100 + 20 | 200 + 30 | 300 + 40 | 400 + 50 | 500 + 60 | 600 +(6 rows) + +-- jit aggregate should have 6 rows +SELECT * FROM device_readings_jit ORDER BY time_bucket; + time_bucket | avg +-------------+----- + 10 | 100 + 20 | 200 + 30 | 300 + 40 | 400 + 50 | 500 + 60 | 600 +(6 rows) + +-- START OF BASIC USAGE TESTS -- +-- Check that continuous aggregate and materialized table is dropped +-- together. +CREATE TABLE whatever(time TIMESTAMPTZ NOT NULL, metric INTEGER); +SELECT * FROM create_hypertable('whatever', 'time'); + hypertable_id | schema_name | table_name | created +---------------+-------------+------------+--------- + 8 | public | whatever | t +(1 row) + +CREATE MATERIALIZED VIEW whatever_summary WITH (timescaledb.continuous) AS +SELECT time_bucket('1 hour', time) AS bucket, avg(metric) + FROM whatever GROUP BY bucket WITH NO DATA; +SELECT (SELECT format('%1$I.%2$I', schema_name, table_name)::regclass::oid + FROM _timescaledb_catalog.hypertable + WHERE id = raw_hypertable_id) AS raw_table + , (SELECT format('%1$I.%2$I', schema_name, table_name)::regclass::oid + FROM _timescaledb_catalog.hypertable + WHERE id = mat_hypertable_id) AS mat_table +FROM _timescaledb_catalog.continuous_agg +WHERE user_view_name = 'whatever_summary' \gset +SELECT relname FROM pg_class WHERE oid = :mat_table; + relname +---------------------------- + _materialized_hypertable_9 +(1 row) + +---------------------------------------------------------------- +-- Should generate an error since the cagg is dependent on the table. +DROP TABLE whatever; +ERROR: cannot drop table whatever because other objects depend on it +---------------------------------------------------------------- +-- Checking that a cagg cannot be dropped if there is a dependent +-- object on it. +CREATE VIEW whatever_summary_dependency AS SELECT * FROM whatever_summary; +-- Should generate an error +DROP MATERIALIZED VIEW whatever_summary; +ERROR: cannot drop view whatever_summary because other objects depend on it +-- Dropping the dependent view so that we can do a proper drop below. +DROP VIEW whatever_summary_dependency; +---------------------------------------------------------------- +-- Dropping the cagg should also remove the materialized table +DROP MATERIALIZED VIEW whatever_summary; +SELECT relname FROM pg_class WHERE oid = :mat_table; + relname +--------- +(0 rows) + +---------------------------------------------------------------- +-- Cleanup +DROP TABLE whatever; +-- Check that continuous_agg_invalidation_trigger() handles no arguments properly +SELECT _timescaledb_functions.continuous_agg_invalidation_trigger(); +ERROR: must supply hypertable id +-- END OF BASIC USAGE TESTS -- +CREATE TABLE metrics(time timestamptz, device TEXT, value float); +SELECT table_name FROM create_hypertable('metrics','time'); +NOTICE: adding not-null constraint to column "time" + table_name +------------ + metrics +(1 row) + +INSERT INTO metrics SELECT generate_series('1999-12-20'::timestamptz,'2000-02-01'::timestamptz,'12 day'::interval), 'dev1', 0.25; +SELECT current_setting('timezone'); + current_setting +----------------- + PST8PDT +(1 row) + +-- should be blocked because non-immutable expression +\set ON_ERROR_STOP 0 +CREATE MATERIALIZED VIEW cagg1 WITH (timescaledb.continuous,timescaledb.materialized_only=true) AS SELECT time_bucket('1 day', time, current_setting('timezone')) FROM metrics GROUP BY 1; +ERROR: only immutable expressions allowed in time bucket function +\set ON_ERROR_STOP 1 +CREATE MATERIALIZED VIEW cagg1 WITH (timescaledb.continuous,timescaledb.materialized_only=true) AS SELECT time_bucket('1 day', time, 'PST8PDT') FROM metrics GROUP BY 1; +NOTICE: refreshing continuous aggregate "cagg1" +SELECT * FROM cagg1; + time_bucket +------------------------------ + Mon Dec 20 00:00:00 1999 PST + Sat Jan 01 00:00:00 2000 PST + Thu Jan 13 00:00:00 2000 PST + Tue Jan 25 00:00:00 2000 PST +(4 rows) + +CREATE MATERIALIZED VIEW cagg2 WITH (timescaledb.continuous,timescaledb.materialized_only=true) AS SELECT time_bucket('1 month', time, 'PST8PDT') FROM metrics GROUP BY 1; +NOTICE: refreshing continuous aggregate "cagg2" +SELECT * FROM cagg2; + time_bucket +------------------------------ + Wed Dec 01 00:00:00 1999 PST + Sat Jan 01 00:00:00 2000 PST +(2 rows) + +-- custom origin +CREATE MATERIALIZED VIEW cagg3 WITH (timescaledb.continuous,timescaledb.materialized_only=true) AS SELECT time_bucket('1 month', time, 'PST8PDT', '2000-01-01'::timestamptz) FROM metrics GROUP BY 1; +NOTICE: refreshing continuous aggregate "cagg3" +SELECT * FROM cagg3; + time_bucket +------------------------------ + Wed Dec 01 00:00:00 1999 PST + Sat Jan 01 00:00:00 2000 PST +(2 rows) + +-- offset not supported atm +\set ON_ERROR_STOP 0 +CREATE MATERIALIZED VIEW cagg4 WITH (timescaledb.continuous,timescaledb.materialized_only=true) AS SELECT time_bucket('1 month', time, 'PST8PDT', "offset":= INTERVAL '15 day') FROM metrics GROUP BY 1; +ERROR: continuous aggregate view must include a valid time bucket function +\set ON_ERROR_STOP 1 diff --git a/tsl/test/expected/compression_bgw.out b/tsl/test/expected/compression_bgw.out index 7e83ac4ce87..fb242b7f10b 100644 --- a/tsl/test/expected/compression_bgw.out +++ b/tsl/test/expected/compression_bgw.out @@ -274,7 +274,6 @@ ERROR: permission denied to start background process as role "nologin_role" \set ON_ERROR_STOP 1 DROP TABLE test_table_nologin; RESET ROLE; -REVOKE NOLOGIN_ROLE FROM :ROLE_DEFAULT_PERM_USER; \c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER CREATE TABLE conditions( time TIMESTAMPTZ NOT NULL, diff --git a/tsl/test/expected/compression_errors.out b/tsl/test/expected/compression_errors-13.out similarity index 98% rename from tsl/test/expected/compression_errors.out rename to tsl/test/expected/compression_errors-13.out index c4fc5899ebd..457d6151781 100644 --- a/tsl/test/expected/compression_errors.out +++ b/tsl/test/expected/compression_errors-13.out @@ -584,12 +584,12 @@ SELECT 1 FROM test GROUP BY enum_col; 1 (2 rows) -EXPLAIN SELECT DISTINCT 1 FROM test; - QUERY PLAN ----------------------------------------------------------------------------------- - Unique (cost=0.00..50.80 rows=1 width=4) - -> Result (cost=0.00..50.80 rows=2040 width=4) - -> Seq Scan on _hyper_31_19_chunk (cost=0.00..30.40 rows=2040 width=0) +EXPLAIN (COSTS OFF, TIMING OFF, SUMMARY OFF) SELECT DISTINCT 1 FROM test; + QUERY PLAN +-------------------------------------------- + Unique + -> Result + -> Seq Scan on _hyper_31_19_chunk (3 rows) --compress chunks @@ -608,13 +608,13 @@ SELECT 1 FROM test GROUP BY enum_col; 1 (2 rows) -EXPLAIN SELECT DISTINCT 1 FROM test; - QUERY PLAN ------------------------------------------------------------------------------------------------------- - Unique (cost=0.51..21.02 rows=1 width=4) - -> Result (cost=0.51..21.02 rows=2000 width=4) - -> Custom Scan (DecompressChunk) on _hyper_31_19_chunk (cost=0.51..1.02 rows=2000 width=0) - -> Seq Scan on compress_hyper_32_20_chunk (cost=0.00..1.02 rows=2 width=4) +EXPLAIN (COSTS OFF, TIMING OFF, SUMMARY OFF) SELECT DISTINCT 1 FROM test; + QUERY PLAN +----------------------------------------------------------------- + Unique + -> Result + -> Custom Scan (DecompressChunk) on _hyper_31_19_chunk + -> Seq Scan on compress_hyper_32_20_chunk (4 rows) --github issue 4398 diff --git a/tsl/test/expected/compression_errors-14.out b/tsl/test/expected/compression_errors-14.out new file mode 100644 index 00000000000..457d6151781 --- /dev/null +++ b/tsl/test/expected/compression_errors-14.out @@ -0,0 +1,829 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\set ON_ERROR_STOP 0 +\set VERBOSITY default +\set ECHO none +--table with special column names -- +create table foo2 (a integer, "bacB toD" integer, c integer, d integer); +select table_name from create_hypertable('foo2', 'a', chunk_time_interval=> 10); +NOTICE: adding not-null constraint to column "a" +DETAIL: Dimensions cannot have NULL values. + table_name +------------ + foo2 +(1 row) + +create table foo3 (a integer, "bacB toD" integer, c integer, d integer); +select table_name from create_hypertable('foo3', 'a', chunk_time_interval=> 10); +NOTICE: adding not-null constraint to column "a" +DETAIL: Dimensions cannot have NULL values. + table_name +------------ + foo3 +(1 row) + +create table non_compressed (a integer, "bacB toD" integer, c integer, d integer); +select table_name from create_hypertable('non_compressed', 'a', chunk_time_interval=> 10); +NOTICE: adding not-null constraint to column "a" +DETAIL: Dimensions cannot have NULL values. + table_name +---------------- + non_compressed +(1 row) + +insert into non_compressed values( 3 , 16 , 20, 4); +ALTER TABLE foo2 set (timescaledb.compress_segmentby = '"bacB toD",c' , timescaledb.compress_orderby = 'c'); +ERROR: the option timescaledb.compress must be set to true to enable compression +ALTER TABLE foo2 set (timescaledb.compress, timescaledb.compress_segmentby = '"bacB toD",c' , timescaledb.compress_orderby = 'c'); +ERROR: cannot use column "c" for both ordering and segmenting +HINT: Use separate columns for the timescaledb.compress_orderby and timescaledb.compress_segmentby options. +ALTER TABLE foo2 set (timescaledb.compress, timescaledb.compress_segmentby = '"bacB toD",c' , timescaledb.compress_orderby = 'd DESC'); +ALTER TABLE foo2 set (timescaledb.compress, timescaledb.compress_segmentby = '"bacB toD",c' , timescaledb.compress_orderby = 'd'); +-- this is acceptable: having previously set the default value for orderby +-- and skipping orderby on a subsequent alter command +create table default_skipped (a integer not null, b integer, c integer, d integer); +select create_hypertable('default_skipped', 'a', chunk_time_interval=> 10); + create_hypertable +------------------------------ + (6,public,default_skipped,t) +(1 row) + +alter table default_skipped set (timescaledb.compress, timescaledb.compress_segmentby = 'c'); +alter table default_skipped set (timescaledb.compress, timescaledb.compress_segmentby = 'c'); +create table with_rls (a integer, b integer); +ALTER TABLE with_rls ENABLE ROW LEVEL SECURITY; +select table_name from create_hypertable('with_rls', 'a', chunk_time_interval=> 10); +NOTICE: adding not-null constraint to column "a" +DETAIL: Dimensions cannot have NULL values. + table_name +------------ + with_rls +(1 row) + +ALTER TABLE with_rls set (timescaledb.compress, timescaledb.compress_orderby='a'); +ERROR: compression cannot be used on table with row security +--note that the time column "a" should be added to the end of the orderby list +select * from _timescaledb_catalog.hypertable_compression order by attname; + hypertable_id | attname | compression_algorithm_id | segmentby_column_index | orderby_column_index | orderby_asc | orderby_nullsfirst +---------------+----------+--------------------------+------------------------+----------------------+-------------+-------------------- + 1 | a | 4 | | 2 | f | t + 6 | a | 4 | | 1 | f | t + 6 | b | 4 | | | | + 1 | bacB toD | 0 | 1 | | | + 1 | c | 0 | 2 | | | + 6 | c | 0 | 1 | | | + 1 | d | 4 | | 1 | t | f + 6 | d | 4 | | | | +(8 rows) + +ALTER TABLE foo3 set (timescaledb.compress, timescaledb.compress_orderby='d DeSc NullS lAsT'); +--shold allow alter since segment by was empty +ALTER TABLE foo3 set (timescaledb.compress, timescaledb.compress_orderby='d Asc NullS lAsT'); +--this is ok too +ALTER TABLE foo3 set (timescaledb.compress, timescaledb.compress_segmentby = '"bacB toD",c', timescaledb.compress_orderby = 'd DeSc NullS lAsT'); +-- Negative test cases --- +ALTER TABLE foo2 set (timescaledb.compress, timescaledb.compress_segmentby = '"bacB toD",c'); +ERROR: must specify a column to order by +DETAIL: The timescaledb.compress_orderby option was previously set and must also be specified in the updated configuration. +alter table default_skipped set (timescaledb.compress, timescaledb.compress_orderby = 'a asc', timescaledb.compress_segmentby = 'c'); +alter table default_skipped set (timescaledb.compress, timescaledb.compress_segmentby = 'c'); +ERROR: must specify a column to order by +DETAIL: The timescaledb.compress_orderby option was previously set and must also be specified in the updated configuration. +create table reserved_column_prefix (a integer, _ts_meta_foo integer, "bacB toD" integer, c integer, d integer); +select table_name from create_hypertable('reserved_column_prefix', 'a', chunk_time_interval=> 10); +NOTICE: adding not-null constraint to column "a" +DETAIL: Dimensions cannot have NULL values. + table_name +------------------------ + reserved_column_prefix +(1 row) + +ALTER TABLE reserved_column_prefix set (timescaledb.compress); +ERROR: cannot compress tables with reserved column prefix '_ts_meta_' +--basic test with count +create table foo (a integer, b integer, c integer, t text, p point); +ALTER TABLE foo ADD CONSTRAINT chk_existing CHECK(b > 0); +select table_name from create_hypertable('foo', 'a', chunk_time_interval=> 10); +NOTICE: adding not-null constraint to column "a" +DETAIL: Dimensions cannot have NULL values. + table_name +------------ + foo +(1 row) + +insert into foo values( 3 , 16 , 20); +insert into foo values( 10 , 10 , 20); +insert into foo values( 20 , 11 , 20); +insert into foo values( 30 , 12 , 20); +-- should error out -- +ALTER TABLE foo ALTER b SET NOT NULL, set (timescaledb.compress); +ERROR: ALTER TABLE SET does not support multiple clauses +ALTER TABLE foo ALTER b SET NOT NULL; +select attname, attnotnull from pg_attribute where attrelid = (select oid from pg_class where relname like 'foo') and attname like 'b'; + attname | attnotnull +---------+------------ + b | t +(1 row) + +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'd'); +ERROR: column "d" does not exist +HINT: The timescaledb.compress_segmentby option must reference a valid column. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'd'); +ERROR: column "d" does not exist +HINT: The timescaledb.compress_orderby option must reference a valid column. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c desc nulls'); +ERROR: unable to parse ordering option "c desc nulls" +HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c desc nulls thirsty'); +ERROR: unable to parse ordering option "c desc nulls thirsty" +HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c climb nulls first'); +ERROR: unable to parse ordering option "c climb nulls first" +HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c nulls first asC'); +ERROR: unable to parse ordering option "c nulls first asC" +HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c desc nulls first asc'); +ERROR: unable to parse ordering option "c desc nulls first asc" +HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c desc hurry'); +ERROR: unable to parse ordering option "c desc hurry" +HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c descend'); +ERROR: unable to parse ordering option "c descend" +HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c; SELECT 1'); +ERROR: unable to parse ordering option "c; SELECT 1" +HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = '1,2'); +ERROR: unable to parse ordering option "1,2" +HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c + 1'); +ERROR: unable to parse ordering option "c + 1" +HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'random()'); +ERROR: unable to parse ordering option "random()" +HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c LIMIT 1'); +ERROR: unable to parse ordering option "c LIMIT 1" +HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c USING <'); +ERROR: unable to parse ordering option "c USING <" +HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 't COLLATE "en_US"'); +ERROR: unable to parse ordering option "t COLLATE "en_US"" +HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'c asc' , timescaledb.compress_orderby = 'c'); +ERROR: unable to parse segmenting option "c asc" +HINT: The option timescaledb.compress_segmentby must be a set of columns separated by commas. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'c nulls last'); +ERROR: unable to parse segmenting option "c nulls last" +HINT: The option timescaledb.compress_segmentby must be a set of columns separated by commas. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'c + 1'); +ERROR: unable to parse segmenting option "c + 1" +HINT: The option timescaledb.compress_segmentby must be a set of columns separated by commas. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'random()'); +ERROR: unable to parse segmenting option "random()" +HINT: The option timescaledb.compress_segmentby must be a set of columns separated by commas. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'c LIMIT 1'); +ERROR: unable to parse segmenting option "c LIMIT 1" +HINT: The option timescaledb.compress_segmentby must be a set of columns separated by commas. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'c + b'); +ERROR: unable to parse segmenting option "c + b" +HINT: The option timescaledb.compress_segmentby must be a set of columns separated by commas. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'a, p'); +ERROR: invalid ordering column type point +DETAIL: Could not identify a less-than operator for the type. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'b, b'); +ERROR: duplicate column name "b" +HINT: The timescaledb.compress_segmentby option must reference distinct column. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'b, b'); +ERROR: duplicate column name "b" +HINT: The timescaledb.compress_orderby option must reference distinct column. +--should succeed +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'a, b'); +--ddl on ht with compression +ALTER TABLE foo DROP COLUMN a; +ERROR: cannot drop column named in partition key +DETAIL: Cannot drop column that is a hypertable partitioning (space or time) dimension. +ALTER TABLE foo DROP COLUMN b; +ERROR: cannot drop orderby or segmentby column from a hypertable with compression enabled +ALTER TABLE foo ALTER COLUMN t SET NOT NULL; +ERROR: operation not supported on hypertables that have compression enabled +ALTER TABLE foo RESET (timescaledb.compress); +ERROR: compression options cannot be reset +ALTER TABLE foo ADD CONSTRAINT chk CHECK(b > 0); +ERROR: operation not supported on hypertables that have compression enabled +ALTER TABLE foo ADD CONSTRAINT chk UNIQUE(b); +ERROR: operation not supported on hypertables that have compression enabled +ALTER TABLE foo DROP CONSTRAINT chk_existing; +ERROR: operation not supported on hypertables that have compression enabled +--note that the time column "a" should not be added to the end of the order by list again (should appear first) +select hc.* from _timescaledb_catalog.hypertable_compression hc inner join _timescaledb_catalog.hypertable h on (h.id = hc.hypertable_id) where h.table_name = 'foo' order by attname; + hypertable_id | attname | compression_algorithm_id | segmentby_column_index | orderby_column_index | orderby_asc | orderby_nullsfirst +---------------+---------+--------------------------+------------------------+----------------------+-------------+-------------------- + 15 | a | 4 | | 1 | t | f + 15 | b | 4 | | 2 | t | f + 15 | c | 4 | | | | + 15 | p | 1 | | | | + 15 | t | 2 | | | | +(5 rows) + +select decompress_chunk(ch1.schema_name|| '.' || ch1.table_name) +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht where ch1.hypertable_id = ht.id and ht.table_name like 'foo' ORDER BY ch1.id limit 1; +ERROR: chunk "_hyper_15_2_chunk" is not compressed +--test changing the segment by columns +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'a', timescaledb.compress_segmentby = 'b'); +select ch1.schema_name|| '.' || ch1.table_name AS "CHUNK_NAME" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht where ch1.hypertable_id = ht.id and ht.table_name like 'foo' ORDER BY ch1.id limit 1 \gset +select decompress_chunk(:'CHUNK_NAME'); +ERROR: chunk "_hyper_15_2_chunk" is not compressed +select decompress_chunk(:'CHUNK_NAME', if_compressed=>true); +NOTICE: chunk "_hyper_15_2_chunk" is not compressed + decompress_chunk +------------------ + +(1 row) + +--should succeed +select compress_chunk(:'CHUNK_NAME'); + compress_chunk +----------------------------------------- + _timescaledb_internal._hyper_15_2_chunk +(1 row) + +select compress_chunk(:'CHUNK_NAME'); +ERROR: chunk "_hyper_15_2_chunk" is already compressed +select compress_chunk(:'CHUNK_NAME', if_not_compressed=>true); +NOTICE: chunk "_hyper_15_2_chunk" is already compressed + compress_chunk +----------------------------------------- + _timescaledb_internal._hyper_15_2_chunk +(1 row) + +select compress_chunk(ch1.schema_name|| '.' || ch1.table_name) +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht where ch1.hypertable_id = ht.id and ht.table_name like 'non_compressed' ORDER BY ch1.id limit 1; +ERROR: compression not enabled on "non_compressed" +DETAIL: It is not possible to compress chunks on a hypertable or continuous aggregate that does not have compression enabled. +HINT: Enable compression using ALTER TABLE/MATERIALIZED VIEW with the timescaledb.compress option. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'a', timescaledb.compress_segmentby = 'c'); +ERROR: cannot change configuration on already compressed chunks +DETAIL: There are compressed chunks that prevent changing the existing compression configuration. +ALTER TABLE foo set (timescaledb.compress='f'); +ERROR: cannot change configuration on already compressed chunks +DETAIL: There are compressed chunks that prevent changing the existing compression configuration. +ALTER TABLE foo reset (timescaledb.compress); +ERROR: compression options cannot be reset +select decompress_chunk(ch1.schema_name|| '.' || ch1.table_name) +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht where ch1.hypertable_id = ht.id and ht.table_name like 'non_compressed' ORDER BY ch1.id limit 1; +ERROR: missing compressed hypertable +--should succeed +select decompress_chunk(ch1.schema_name|| '.' || ch1.table_name) +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht where ch1.hypertable_id = ht.id and ht.table_name like 'foo' and ch1.compressed_chunk_id IS NOT NULL; + decompress_chunk +----------------------------------------- + _timescaledb_internal._hyper_15_2_chunk +(1 row) + +--should succeed +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'a', timescaledb.compress_segmentby = 'b'); +select hc.* from _timescaledb_catalog.hypertable_compression hc inner join _timescaledb_catalog.hypertable h on (h.id = hc.hypertable_id) where h.table_name = 'foo' order by attname; + hypertable_id | attname | compression_algorithm_id | segmentby_column_index | orderby_column_index | orderby_asc | orderby_nullsfirst +---------------+---------+--------------------------+------------------------+----------------------+-------------+-------------------- + 15 | a | 4 | | 1 | t | f + 15 | b | 0 | 1 | | | + 15 | c | 4 | | | | + 15 | p | 1 | | | | + 15 | t | 2 | | | | +(5 rows) + +SELECT comp_hyper.schema_name|| '.' || comp_hyper.table_name as "COMPRESSED_HYPER_NAME" +FROM _timescaledb_catalog.hypertable comp_hyper +INNER JOIN _timescaledb_catalog.hypertable uncomp_hyper ON (comp_hyper.id = uncomp_hyper.compressed_hypertable_id) +WHERE uncomp_hyper.table_name like 'foo' ORDER BY comp_hyper.id LIMIT 1 \gset +select add_retention_policy(:'COMPRESSED_HYPER_NAME', INTERVAL '4 months', true); +ERROR: cannot add retention policy to compressed hypertable "_compressed_hypertable_18" +HINT: Please add the policy to the corresponding uncompressed hypertable instead. +--Constraint checking for compression +create table fortable(col integer primary key); +create table table_constr( device_id integer, + timec integer , + location integer , + c integer constraint valid_cval check (c > 20) , + d integer, + primary key ( device_id, timec) +); +select table_name from create_hypertable('table_constr', 'timec', chunk_time_interval=> 10); + table_name +-------------- + table_constr +(1 row) + +BEGIN; +ALTER TABLE table_constr set (timescaledb.compress, timescaledb.compress_segmentby = 'd'); +WARNING: column "device_id" should be used for segmenting or ordering +ROLLBACK; +alter table table_constr add constraint table_constr_uk unique (location, timec, device_id); +BEGIN; +ALTER TABLE table_constr set (timescaledb.compress, timescaledb.compress_orderby = 'timec', timescaledb.compress_segmentby = 'device_id'); +WARNING: column "location" should be used for segmenting or ordering +ROLLBACK; +alter table table_constr add constraint table_constr_fk FOREIGN KEY(d) REFERENCES fortable(col) on delete cascade; +ALTER TABLE table_constr set (timescaledb.compress, timescaledb.compress_orderby = 'timec', timescaledb.compress_segmentby = 'device_id, location'); +ERROR: column "d" must be used for segmenting +DETAIL: The foreign key constraint "table_constr_fk" cannot be enforced with the given compression configuration. +--exclusion constraints not allowed +alter table table_constr add constraint table_constr_exclu exclude using btree (timec with = ); +ALTER TABLE table_constr set (timescaledb.compress, timescaledb.compress_orderby = 'timec', timescaledb.compress_segmentby = 'device_id, location, d'); +ERROR: constraint table_constr_exclu is not supported for compression +HINT: Exclusion constraints are not supported on hypertables that are compressed. +alter table table_constr drop constraint table_constr_exclu ; +--now it works +ALTER TABLE table_constr set (timescaledb.compress, timescaledb.compress_orderby = 'timec', timescaledb.compress_segmentby = 'device_id, location, d'); +--can't add fks after compression enabled +alter table table_constr add constraint table_constr_fk_add_after FOREIGN KEY(d) REFERENCES fortable(col) on delete cascade; +ERROR: operation not supported on hypertables that have compression enabled +-- ddl ADD column variants that are not supported +ALTER TABLE table_constr ADD COLUMN newcol integer CHECK ( newcol < 10 ); +ERROR: cannot add column with constraints to a hypertable that has compression enabled +ALTER TABLE table_constr ADD COLUMN newcol integer UNIQUE; +ERROR: cannot add column with constraints to a hypertable that has compression enabled +ALTER TABLE table_constr ADD COLUMN newcol integer PRIMARY KEY; +ERROR: cannot add column with constraints to a hypertable that has compression enabled +ALTER TABLE table_constr ADD COLUMN newcol integer NOT NULL; +ERROR: cannot add column with NOT NULL constraint without default to a hypertable that has compression enabled +ALTER TABLE table_constr ADD COLUMN newcol integer DEFAULT random() + random(); +ERROR: cannot add column with non-constant default expression to a hypertable that has compression enabled +ALTER TABLE table_constr ADD COLUMN IF NOT EXISTS newcol integer REFERENCES fortable(col); +ERROR: cannot add column with constraints to a hypertable that has compression enabled +ALTER TABLE table_constr ADD COLUMN IF NOT EXISTS newcol integer GENERATED ALWAYS AS IDENTITY; +ERROR: cannot add column with constraints to a hypertable that has compression enabled +ALTER TABLE table_constr ADD COLUMN IF NOT EXISTS newcol integer GENERATED BY DEFAULT AS IDENTITY; +ERROR: cannot add column with constraints to a hypertable that has compression enabled +ALTER TABLE table_constr ADD COLUMN newcol nonexistent_type; +ERROR: type "nonexistent_type" does not exist +LINE 1: ALTER TABLE table_constr ADD COLUMN newcol nonexistent_type; + ^ +--FK check should not error even with dropped columns (previously had a bug related to this) +CREATE TABLE table_fk ( + time timestamptz NOT NULL, + id1 int8 NOT NULL, + id2 int8 NOT NULL, + value float8 NULL, + CONSTRAINT fk1 FOREIGN KEY (id1) REFERENCES fortable(col), + CONSTRAINT fk2 FOREIGN KEY (id2) REFERENCES fortable(col) +); +SELECT create_hypertable('table_fk', 'time'); + create_hypertable +------------------------ + (23,public,table_fk,t) +(1 row) + +ALTER TABLE table_fk DROP COLUMN id1; +ALTER TABLE table_fk SET (timescaledb.compress,timescaledb.compress_segmentby = 'id2'); +-- TEST fk cascade delete behavior on compressed chunk -- +insert into fortable values(1); +insert into fortable values(10); +--we want 2 chunks here -- +insert into table_constr values(1000, 1, 44, 44, 1); +insert into table_constr values(1000, 10, 44, 44, 10); +select ch1.schema_name|| '.' || ch1.table_name AS "CHUNK_NAME" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +where ch1.hypertable_id = ht.id and ht.table_name like 'table_constr' +ORDER BY ch1.id limit 1 \gset +-- we have 1 compressed and 1 uncompressed chunk after this. +select compress_chunk(:'CHUNK_NAME'); + compress_chunk +----------------------------------------- + _timescaledb_internal._hyper_19_7_chunk +(1 row) + +SELECT total_chunks , number_compressed_chunks +FROM hypertable_compression_stats('table_constr'); + total_chunks | number_compressed_chunks +--------------+-------------------------- + 2 | 1 +(1 row) + +--github issue 1661 +--disable compression after enabling it on a table that has fk constraints +CREATE TABLE table_constr2( device_id integer, + timec integer , + location integer , + d integer references fortable(col), + primary key ( device_id, timec) +); +SELECT table_name from create_hypertable('table_constr2', 'timec', chunk_time_interval=> 10); + table_name +--------------- + table_constr2 +(1 row) + +INSERT INTO fortable VALUES( 99 ); +INSERT INTO table_constr2 VALUES( 1000, 10, 5, 99); +ALTER TABLE table_constr2 SET (timescaledb.compress, timescaledb.compress_segmentby = 'device_id'); +ERROR: column "d" must be used for segmenting +DETAIL: The foreign key constraint "table_constr2_d_fkey" cannot be enforced with the given compression configuration. + ALTER TABLE table_constr2 SET (timescaledb.compress, timescaledb.compress_segmentby = 'device_id, d'); +--compress a chunk and try to disable compression, it should fail -- +SELECT ch1.schema_name|| '.' || ch1.table_name AS "CHUNK_NAME" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id and ht.table_name like 'table_constr2' \gset +SELECT compress_chunk(:'CHUNK_NAME'); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_25_10_chunk +(1 row) + +ALTER TABLE table_constr2 set (timescaledb.compress=false); +ERROR: cannot change configuration on already compressed chunks +DETAIL: There are compressed chunks that prevent changing the existing compression configuration. +--decompress all chunks and disable compression. +SELECT decompress_chunk(:'CHUNK_NAME'); + decompress_chunk +------------------------------------------ + _timescaledb_internal._hyper_25_10_chunk +(1 row) + +ALTER TABLE table_constr2 SET (timescaledb.compress=false); +-- TEST compression policy +-- modify the config to trigger errors at runtime +CREATE TABLE test_table_int(time bigint, val int); +SELECT create_hypertable('test_table_int', 'time', chunk_time_interval => 1); +NOTICE: adding not-null constraint to column "time" +DETAIL: Dimensions cannot have NULL values. + create_hypertable +------------------------------ + (27,public,test_table_int,t) +(1 row) + +CREATE OR REPLACE function dummy_now() returns BIGINT LANGUAGE SQL IMMUTABLE as 'SELECT 5::BIGINT'; +SELECT set_integer_now_func('test_table_int', 'dummy_now'); + set_integer_now_func +---------------------- + +(1 row) + +INSERT INTO test_table_int SELECT generate_series(1,5), 10; +ALTER TABLE test_table_int set (timescaledb.compress); +SELECT add_compression_policy('test_table_int', 2::int) AS compressjob_id +\gset +\c :TEST_DBNAME :ROLE_SUPERUSER +UPDATE _timescaledb_config.bgw_job +SET config = config - 'compress_after' +WHERE id = :compressjob_id; +SELECT config FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id; + config +----------------------- + {"hypertable_id": 27} +(1 row) + +--should fail +CALL run_job(:compressjob_id); +ERROR: job 1000 config must have compress_after +CONTEXT: PL/pgSQL function _timescaledb_functions.policy_compression(integer,jsonb) line 35 at RAISE +SELECT remove_compression_policy('test_table_int'); + remove_compression_policy +--------------------------- + t +(1 row) + +--again add a new policy that we'll tamper with +SELECT add_compression_policy('test_table_int', 2::int) AS compressjob_id +\gset +UPDATE _timescaledb_config.bgw_job +SET config = config - 'hypertable_id' +WHERE id = :compressjob_id; +SELECT config FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id; + config +----------------------- + {"compress_after": 2} +(1 row) + +--should fail +CALL run_job(:compressjob_id); +ERROR: job 1001 config must have hypertable_id +CONTEXT: PL/pgSQL function _timescaledb_functions.policy_compression(integer,jsonb) line 26 at RAISE +UPDATE _timescaledb_config.bgw_job +SET config = NULL +WHERE id = :compressjob_id; +SELECT config FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id; + config +-------- + +(1 row) + +--should fail +CALL run_job(:compressjob_id); +ERROR: job 1001 has null config +CONTEXT: PL/pgSQL function _timescaledb_functions.policy_compression(integer,jsonb) line 21 at RAISE +-- test ADD COLUMN IF NOT EXISTS +CREATE TABLE metric (time TIMESTAMPTZ NOT NULL, val FLOAT8 NOT NULL, dev_id INT4 NOT NULL); +SELECT create_hypertable('metric', 'time', 'dev_id', 10); + create_hypertable +---------------------- + (29,public,metric,t) +(1 row) + +ALTER TABLE metric SET ( +timescaledb.compress, +timescaledb.compress_segmentby = 'dev_id', +timescaledb.compress_orderby = 'time DESC' +); +INSERT INTO metric(time, val, dev_id) +SELECT s.*, 3.14+1, 1 +FROM generate_series('2021-08-17 00:00:00'::timestamp, + '2021-08-17 00:02:00'::timestamp, '1 s'::interval) s; +SELECT compress_chunk(show_chunks('metric')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_29_17_chunk +(1 row) + +-- column does not exist the first time +ALTER TABLE metric ADD COLUMN IF NOT EXISTS "medium" VARCHAR ; +-- column already exists the second time +ALTER TABLE metric ADD COLUMN IF NOT EXISTS "medium" VARCHAR ; +NOTICE: column "medium" of relation "metric" already exists, skipping +-- also add one without IF NOT EXISTS +ALTER TABLE metric ADD COLUMN "medium_1" VARCHAR ; +ALTER TABLE metric ADD COLUMN "medium_1" VARCHAR ; +ERROR: column "medium_1" of relation "metric" already exists +--github issue 3481 +--GROUP BY error when setting compress_segmentby with an enum column +CREATE TYPE an_enum_type AS ENUM ('home', 'school'); +CREATE TABLE test ( + time timestamp NOT NULL, + enum_col an_enum_type NOT NULL +); +SELECT create_hypertable( + 'test', 'time' +); +WARNING: column type "timestamp without time zone" used for "time" does not follow best practices +HINT: Use datatype TIMESTAMPTZ instead. + create_hypertable +-------------------- + (31,public,test,t) +(1 row) + +INSERT INTO test VALUES ('2001-01-01 00:00', 'home'), + ('2001-01-01 01:00', 'school'), + ('2001-01-01 02:00', 'home'); +--enable compression on enum_col +ALTER TABLE test SET ( + timescaledb.compress, + timescaledb.compress_segmentby = 'enum_col', + timescaledb.compress_orderby = 'time' +); +--below queries will pass before chunks are compressed +SELECT 1 FROM test GROUP BY enum_col; + ?column? +---------- + 1 + 1 +(2 rows) + +EXPLAIN (COSTS OFF, TIMING OFF, SUMMARY OFF) SELECT DISTINCT 1 FROM test; + QUERY PLAN +-------------------------------------------- + Unique + -> Result + -> Seq Scan on _hyper_31_19_chunk +(3 rows) + +--compress chunks +SELECT COMPRESS_CHUNK(X) FROM SHOW_CHUNKS('test') X; + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_31_19_chunk +(1 row) + +ANALYZE test; +--below query should pass after chunks are compressed +SELECT 1 FROM test GROUP BY enum_col; + ?column? +---------- + 1 + 1 +(2 rows) + +EXPLAIN (COSTS OFF, TIMING OFF, SUMMARY OFF) SELECT DISTINCT 1 FROM test; + QUERY PLAN +----------------------------------------------------------------- + Unique + -> Result + -> Custom Scan (DecompressChunk) on _hyper_31_19_chunk + -> Seq Scan on compress_hyper_32_20_chunk +(4 rows) + +--github issue 4398 +SELECT format('CREATE TABLE data_table AS SELECT now() AS tm, %s', array_to_string(array_agg(format('125 AS c%s',a)), ', ')) FROM generate_series(1,550)a \gexec +CREATE TABLE data_table AS SELECT now() AS tm, 125 AS c1, 125 AS c2, 125 AS c3, 125 AS c4, 125 AS c5, 125 AS c6, 125 AS c7, 125 AS c8, 125 AS c9, 125 AS c10, 125 AS c11, 125 AS c12, 125 AS c13, 125 AS c14, 125 AS c15, 125 AS c16, 125 AS c17, 125 AS c18, 125 AS c19, 125 AS c20, 125 AS c21, 125 AS c22, 125 AS c23, 125 AS c24, 125 AS c25, 125 AS c26, 125 AS c27, 125 AS c28, 125 AS c29, 125 AS c30, 125 AS c31, 125 AS c32, 125 AS c33, 125 AS c34, 125 AS c35, 125 AS c36, 125 AS c37, 125 AS c38, 125 AS c39, 125 AS c40, 125 AS c41, 125 AS c42, 125 AS c43, 125 AS c44, 125 AS c45, 125 AS c46, 125 AS c47, 125 AS c48, 125 AS c49, 125 AS c50, 125 AS c51, 125 AS c52, 125 AS c53, 125 AS c54, 125 AS c55, 125 AS c56, 125 AS c57, 125 AS c58, 125 AS c59, 125 AS c60, 125 AS c61, 125 AS c62, 125 AS c63, 125 AS c64, 125 AS c65, 125 AS c66, 125 AS c67, 125 AS c68, 125 AS c69, 125 AS c70, 125 AS c71, 125 AS c72, 125 AS c73, 125 AS c74, 125 AS c75, 125 AS c76, 125 AS c77, 125 AS c78, 125 AS c79, 125 AS c80, 125 AS c81, 125 AS c82, 125 AS c83, 125 AS c84, 125 AS c85, 125 AS c86, 125 AS c87, 125 AS c88, 125 AS c89, 125 AS c90, 125 AS c91, 125 AS c92, 125 AS c93, 125 AS c94, 125 AS c95, 125 AS c96, 125 AS c97, 125 AS c98, 125 AS c99, 125 AS c100, 125 AS c101, 125 AS c102, 125 AS c103, 125 AS c104, 125 AS c105, 125 AS c106, 125 AS c107, 125 AS c108, 125 AS c109, 125 AS c110, 125 AS c111, 125 AS c112, 125 AS c113, 125 AS c114, 125 AS c115, 125 AS c116, 125 AS c117, 125 AS c118, 125 AS c119, 125 AS c120, 125 AS c121, 125 AS c122, 125 AS c123, 125 AS c124, 125 AS c125, 125 AS c126, 125 AS c127, 125 AS c128, 125 AS c129, 125 AS c130, 125 AS c131, 125 AS c132, 125 AS c133, 125 AS c134, 125 AS c135, 125 AS c136, 125 AS c137, 125 AS c138, 125 AS c139, 125 AS c140, 125 AS c141, 125 AS c142, 125 AS c143, 125 AS c144, 125 AS c145, 125 AS c146, 125 AS c147, 125 AS c148, 125 AS c149, 125 AS c150, 125 AS c151, 125 AS c152, 125 AS c153, 125 AS c154, 125 AS c155, 125 AS c156, 125 AS c157, 125 AS c158, 125 AS c159, 125 AS c160, 125 AS c161, 125 AS c162, 125 AS c163, 125 AS c164, 125 AS c165, 125 AS c166, 125 AS c167, 125 AS c168, 125 AS c169, 125 AS c170, 125 AS c171, 125 AS c172, 125 AS c173, 125 AS c174, 125 AS c175, 125 AS c176, 125 AS c177, 125 AS c178, 125 AS c179, 125 AS c180, 125 AS c181, 125 AS c182, 125 AS c183, 125 AS c184, 125 AS c185, 125 AS c186, 125 AS c187, 125 AS c188, 125 AS c189, 125 AS c190, 125 AS c191, 125 AS c192, 125 AS c193, 125 AS c194, 125 AS c195, 125 AS c196, 125 AS c197, 125 AS c198, 125 AS c199, 125 AS c200, 125 AS c201, 125 AS c202, 125 AS c203, 125 AS c204, 125 AS c205, 125 AS c206, 125 AS c207, 125 AS c208, 125 AS c209, 125 AS c210, 125 AS c211, 125 AS c212, 125 AS c213, 125 AS c214, 125 AS c215, 125 AS c216, 125 AS c217, 125 AS c218, 125 AS c219, 125 AS c220, 125 AS c221, 125 AS c222, 125 AS c223, 125 AS c224, 125 AS c225, 125 AS c226, 125 AS c227, 125 AS c228, 125 AS c229, 125 AS c230, 125 AS c231, 125 AS c232, 125 AS c233, 125 AS c234, 125 AS c235, 125 AS c236, 125 AS c237, 125 AS c238, 125 AS c239, 125 AS c240, 125 AS c241, 125 AS c242, 125 AS c243, 125 AS c244, 125 AS c245, 125 AS c246, 125 AS c247, 125 AS c248, 125 AS c249, 125 AS c250, 125 AS c251, 125 AS c252, 125 AS c253, 125 AS c254, 125 AS c255, 125 AS c256, 125 AS c257, 125 AS c258, 125 AS c259, 125 AS c260, 125 AS c261, 125 AS c262, 125 AS c263, 125 AS c264, 125 AS c265, 125 AS c266, 125 AS c267, 125 AS c268, 125 AS c269, 125 AS c270, 125 AS c271, 125 AS c272, 125 AS c273, 125 AS c274, 125 AS c275, 125 AS c276, 125 AS c277, 125 AS c278, 125 AS c279, 125 AS c280, 125 AS c281, 125 AS c282, 125 AS c283, 125 AS c284, 125 AS c285, 125 AS c286, 125 AS c287, 125 AS c288, 125 AS c289, 125 AS c290, 125 AS c291, 125 AS c292, 125 AS c293, 125 AS c294, 125 AS c295, 125 AS c296, 125 AS c297, 125 AS c298, 125 AS c299, 125 AS c300, 125 AS c301, 125 AS c302, 125 AS c303, 125 AS c304, 125 AS c305, 125 AS c306, 125 AS c307, 125 AS c308, 125 AS c309, 125 AS c310, 125 AS c311, 125 AS c312, 125 AS c313, 125 AS c314, 125 AS c315, 125 AS c316, 125 AS c317, 125 AS c318, 125 AS c319, 125 AS c320, 125 AS c321, 125 AS c322, 125 AS c323, 125 AS c324, 125 AS c325, 125 AS c326, 125 AS c327, 125 AS c328, 125 AS c329, 125 AS c330, 125 AS c331, 125 AS c332, 125 AS c333, 125 AS c334, 125 AS c335, 125 AS c336, 125 AS c337, 125 AS c338, 125 AS c339, 125 AS c340, 125 AS c341, 125 AS c342, 125 AS c343, 125 AS c344, 125 AS c345, 125 AS c346, 125 AS c347, 125 AS c348, 125 AS c349, 125 AS c350, 125 AS c351, 125 AS c352, 125 AS c353, 125 AS c354, 125 AS c355, 125 AS c356, 125 AS c357, 125 AS c358, 125 AS c359, 125 AS c360, 125 AS c361, 125 AS c362, 125 AS c363, 125 AS c364, 125 AS c365, 125 AS c366, 125 AS c367, 125 AS c368, 125 AS c369, 125 AS c370, 125 AS c371, 125 AS c372, 125 AS c373, 125 AS c374, 125 AS c375, 125 AS c376, 125 AS c377, 125 AS c378, 125 AS c379, 125 AS c380, 125 AS c381, 125 AS c382, 125 AS c383, 125 AS c384, 125 AS c385, 125 AS c386, 125 AS c387, 125 AS c388, 125 AS c389, 125 AS c390, 125 AS c391, 125 AS c392, 125 AS c393, 125 AS c394, 125 AS c395, 125 AS c396, 125 AS c397, 125 AS c398, 125 AS c399, 125 AS c400, 125 AS c401, 125 AS c402, 125 AS c403, 125 AS c404, 125 AS c405, 125 AS c406, 125 AS c407, 125 AS c408, 125 AS c409, 125 AS c410, 125 AS c411, 125 AS c412, 125 AS c413, 125 AS c414, 125 AS c415, 125 AS c416, 125 AS c417, 125 AS c418, 125 AS c419, 125 AS c420, 125 AS c421, 125 AS c422, 125 AS c423, 125 AS c424, 125 AS c425, 125 AS c426, 125 AS c427, 125 AS c428, 125 AS c429, 125 AS c430, 125 AS c431, 125 AS c432, 125 AS c433, 125 AS c434, 125 AS c435, 125 AS c436, 125 AS c437, 125 AS c438, 125 AS c439, 125 AS c440, 125 AS c441, 125 AS c442, 125 AS c443, 125 AS c444, 125 AS c445, 125 AS c446, 125 AS c447, 125 AS c448, 125 AS c449, 125 AS c450, 125 AS c451, 125 AS c452, 125 AS c453, 125 AS c454, 125 AS c455, 125 AS c456, 125 AS c457, 125 AS c458, 125 AS c459, 125 AS c460, 125 AS c461, 125 AS c462, 125 AS c463, 125 AS c464, 125 AS c465, 125 AS c466, 125 AS c467, 125 AS c468, 125 AS c469, 125 AS c470, 125 AS c471, 125 AS c472, 125 AS c473, 125 AS c474, 125 AS c475, 125 AS c476, 125 AS c477, 125 AS c478, 125 AS c479, 125 AS c480, 125 AS c481, 125 AS c482, 125 AS c483, 125 AS c484, 125 AS c485, 125 AS c486, 125 AS c487, 125 AS c488, 125 AS c489, 125 AS c490, 125 AS c491, 125 AS c492, 125 AS c493, 125 AS c494, 125 AS c495, 125 AS c496, 125 AS c497, 125 AS c498, 125 AS c499, 125 AS c500, 125 AS c501, 125 AS c502, 125 AS c503, 125 AS c504, 125 AS c505, 125 AS c506, 125 AS c507, 125 AS c508, 125 AS c509, 125 AS c510, 125 AS c511, 125 AS c512, 125 AS c513, 125 AS c514, 125 AS c515, 125 AS c516, 125 AS c517, 125 AS c518, 125 AS c519, 125 AS c520, 125 AS c521, 125 AS c522, 125 AS c523, 125 AS c524, 125 AS c525, 125 AS c526, 125 AS c527, 125 AS c528, 125 AS c529, 125 AS c530, 125 AS c531, 125 AS c532, 125 AS c533, 125 AS c534, 125 AS c535, 125 AS c536, 125 AS c537, 125 AS c538, 125 AS c539, 125 AS c540, 125 AS c541, 125 AS c542, 125 AS c543, 125 AS c544, 125 AS c545, 125 AS c546, 125 AS c547, 125 AS c548, 125 AS c549, 125 AS c550 +CREATE TABLE ts_table (LIKE data_table); +SELECT * FROM create_hypertable('ts_table', 'tm'); +NOTICE: adding not-null constraint to column "tm" +DETAIL: Dimensions cannot have NULL values. + hypertable_id | schema_name | table_name | created +---------------+-------------+------------+--------- + 33 | public | ts_table | t +(1 row) + +--should report a warning +\set VERBOSITY terse +ALTER TABLE ts_table SET(timescaledb.compress, timescaledb.compress_segmentby = 'c1', + timescaledb.compress_orderby = 'tm'); +WARNING: compressed row size might exceed maximum row size +INSERT INTO ts_table SELECT * FROM data_table; +--cleanup tables +DROP TABLE data_table cascade; +DROP TABLE ts_table cascade; +-- #5458 invalid reads for row expressions after column dropped on compressed tables +CREATE TABLE readings( + "time" TIMESTAMPTZ NOT NULL, + battery_status TEXT, + battery_temperature DOUBLE PRECISION +); +INSERT INTO readings ("time") VALUES ('2022-11-11 11:11:11-00'); +SELECT create_hypertable('readings', 'time', chunk_time_interval => interval '12 hour', migrate_data=>true); +NOTICE: migrating data to chunks + create_hypertable +------------------------ + (35,public,readings,t) +(1 row) + +create unique index readings_uniq_idx on readings("time",battery_temperature); +ALTER TABLE readings SET (timescaledb.compress,timescaledb.compress_segmentby = 'battery_temperature'); +SELECT compress_chunk(show_chunks('readings')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_35_22_chunk +(1 row) + +ALTER TABLE readings DROP COLUMN battery_status; +INSERT INTO readings ("time", battery_temperature) VALUES ('2022-11-11 11:11:11', 0.2); +SELECT readings FROM readings; + readings +-------------------------------------- + ("Fri Nov 11 03:11:11 2022 PST",) + ("Fri Nov 11 11:11:11 2022 PST",0.2) +(2 rows) + +-- #5577 On-insert decompression after schema changes may not work properly +SELECT decompress_chunk(show_chunks('readings'),true); +NOTICE: chunk "_hyper_35_24_chunk" is not compressed + decompress_chunk +------------------------------------------ + _timescaledb_internal._hyper_35_22_chunk + +(2 rows) + +SELECT compress_chunk(show_chunks('readings'),true); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_35_22_chunk + _timescaledb_internal._hyper_35_24_chunk +(2 rows) + +\set ON_ERROR_STOP 0 +INSERT INTO readings ("time", battery_temperature) VALUES + ('2022-11-11 11:11:11',0.2) -- same record as inserted +; +ERROR: duplicate key value violates unique constraint "_hyper_35_24_chunk_readings_uniq_idx" +\set ON_ERROR_STOP 1 +SELECT * from readings; + time | battery_temperature +------------------------------+--------------------- + Fri Nov 11 03:11:11 2022 PST | + Fri Nov 11 11:11:11 2022 PST | 0.2 +(2 rows) + +SELECT assert_equal(count(1), 2::bigint) FROM readings; + assert_equal +-------------- + +(1 row) + +-- no unique check failure during decompression +SELECT decompress_chunk(show_chunks('readings'),true); + decompress_chunk +------------------------------------------ + _timescaledb_internal._hyper_35_22_chunk + _timescaledb_internal._hyper_35_24_chunk +(2 rows) + +-- #5553 Unique constraints are not always respected on compressed tables +CREATE TABLE main_table AS +SELECT '2011-11-11 11:11:11'::timestamptz AS time, 'foo' AS device_id; +CREATE UNIQUE INDEX xm ON main_table(time, device_id); +SELECT create_hypertable('main_table', 'time', chunk_time_interval => interval '12 hour', migrate_data => TRUE); +NOTICE: adding not-null constraint to column "time" +NOTICE: migrating data to chunks + create_hypertable +-------------------------- + (37,public,main_table,t) +(1 row) + +ALTER TABLE main_table SET ( + timescaledb.compress, + timescaledb.compress_segmentby = 'device_id', + timescaledb.compress_orderby = ''); +SELECT compress_chunk(show_chunks('main_table')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_37_27_chunk +(1 row) + +-- insert rejected +\set ON_ERROR_STOP 0 +INSERT INTO main_table VALUES + ('2011-11-11 11:11:11', 'foo'); +ERROR: duplicate key value violates unique constraint "_hyper_37_27_chunk_xm" +-- insert rejected in case 1st row doesn't violate constraint with different segmentby +INSERT INTO main_table VALUES + ('2011-11-11 11:12:11', 'bar'), + ('2011-11-11 11:11:11', 'foo'); +ERROR: duplicate key value violates unique constraint "_hyper_37_27_chunk_xm" +\set ON_ERROR_STOP 1 +SELECT assert_equal(count(1), 1::bigint) FROM main_table; + assert_equal +-------------- + +(1 row) + +-- no unique check failure during decompression +SELECT decompress_chunk(show_chunks('main_table'), TRUE); + decompress_chunk +------------------------------------------ + _timescaledb_internal._hyper_37_27_chunk +(1 row) + +DROP TABLE IF EXISTS readings; +CREATE TABLE readings( + "time" timestamptz NOT NULL, + battery_status text, + candy integer, + battery_status2 text, + battery_temperature text +); +SELECT create_hypertable('readings', 'time', chunk_time_interval => interval '12 hour'); + create_hypertable +------------------------ + (39,public,readings,t) +(1 row) + +CREATE UNIQUE INDEX readings_uniq_idx ON readings("time", battery_temperature); +ALTER TABLE readings SET (timescaledb.compress, timescaledb.compress_segmentby = 'battery_temperature'); +ALTER TABLE readings DROP COLUMN battery_status; +ALTER TABLE readings DROP COLUMN battery_status2; +INSERT INTO readings("time", candy, battery_temperature) + VALUES ('2022-11-11 11:11:11', 88, '0.2'); +SELECT compress_chunk(show_chunks('readings'), TRUE); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_39_29_chunk +(1 row) + +-- no error happens +INSERT INTO readings("time", candy, battery_temperature) + VALUES ('2022-11-11 11:11:11', 33, 0.3) +; +-- Segmentby checks should be done for unique indexes without +-- constraints, so create a table without constraints and add a unique +-- index and try to create a table without using the right segmentby +-- column. +CREATE TABLE table_unique_index( + location smallint not null, + device_id smallint not null, + time timestamptz not null, + value float8 not null +); +CREATE UNIQUE index ON table_unique_index(location, device_id, time); +SELECT table_name FROM create_hypertable('table_unique_index', 'time'); + table_name +-------------------- + table_unique_index +(1 row) + +-- Will warn because the lack of segmentby/orderby compression options +ALTER TABLE table_unique_index SET (timescaledb.compress); +WARNING: column "location" should be used for segmenting or ordering +WARNING: column "device_id" should be used for segmenting or ordering +ALTER TABLE table_unique_index SET (timescaledb.compress = off); +ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_segmentby = 'location'); +WARNING: column "device_id" should be used for segmenting or ordering +ALTER TABLE table_unique_index SET (timescaledb.compress = off); +ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_orderby = 'device_id'); +WARNING: column "location" should be used for segmenting or ordering +ALTER TABLE table_unique_index SET (timescaledb.compress = off); +-- Will enable compression without warnings +ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_segmentby = 'location', timescaledb.compress_orderby = 'device_id'); +ALTER TABLE table_unique_index SET (timescaledb.compress = off); +ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_segmentby = 'device_id,location'); +ALTER TABLE table_unique_index SET (timescaledb.compress = off); +ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_orderby = 'device_id,location'); +ALTER TABLE table_unique_index SET (timescaledb.compress = off); +ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_segmentby = 'time,location', timescaledb.compress_orderby = 'device_id'); +ALTER TABLE table_unique_index SET (timescaledb.compress = off); +ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_segmentby = 'time,location,device_id'); +ALTER TABLE table_unique_index SET (timescaledb.compress = off); diff --git a/tsl/test/expected/compression_errors-15.out b/tsl/test/expected/compression_errors-15.out new file mode 100644 index 00000000000..457d6151781 --- /dev/null +++ b/tsl/test/expected/compression_errors-15.out @@ -0,0 +1,829 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\set ON_ERROR_STOP 0 +\set VERBOSITY default +\set ECHO none +--table with special column names -- +create table foo2 (a integer, "bacB toD" integer, c integer, d integer); +select table_name from create_hypertable('foo2', 'a', chunk_time_interval=> 10); +NOTICE: adding not-null constraint to column "a" +DETAIL: Dimensions cannot have NULL values. + table_name +------------ + foo2 +(1 row) + +create table foo3 (a integer, "bacB toD" integer, c integer, d integer); +select table_name from create_hypertable('foo3', 'a', chunk_time_interval=> 10); +NOTICE: adding not-null constraint to column "a" +DETAIL: Dimensions cannot have NULL values. + table_name +------------ + foo3 +(1 row) + +create table non_compressed (a integer, "bacB toD" integer, c integer, d integer); +select table_name from create_hypertable('non_compressed', 'a', chunk_time_interval=> 10); +NOTICE: adding not-null constraint to column "a" +DETAIL: Dimensions cannot have NULL values. + table_name +---------------- + non_compressed +(1 row) + +insert into non_compressed values( 3 , 16 , 20, 4); +ALTER TABLE foo2 set (timescaledb.compress_segmentby = '"bacB toD",c' , timescaledb.compress_orderby = 'c'); +ERROR: the option timescaledb.compress must be set to true to enable compression +ALTER TABLE foo2 set (timescaledb.compress, timescaledb.compress_segmentby = '"bacB toD",c' , timescaledb.compress_orderby = 'c'); +ERROR: cannot use column "c" for both ordering and segmenting +HINT: Use separate columns for the timescaledb.compress_orderby and timescaledb.compress_segmentby options. +ALTER TABLE foo2 set (timescaledb.compress, timescaledb.compress_segmentby = '"bacB toD",c' , timescaledb.compress_orderby = 'd DESC'); +ALTER TABLE foo2 set (timescaledb.compress, timescaledb.compress_segmentby = '"bacB toD",c' , timescaledb.compress_orderby = 'd'); +-- this is acceptable: having previously set the default value for orderby +-- and skipping orderby on a subsequent alter command +create table default_skipped (a integer not null, b integer, c integer, d integer); +select create_hypertable('default_skipped', 'a', chunk_time_interval=> 10); + create_hypertable +------------------------------ + (6,public,default_skipped,t) +(1 row) + +alter table default_skipped set (timescaledb.compress, timescaledb.compress_segmentby = 'c'); +alter table default_skipped set (timescaledb.compress, timescaledb.compress_segmentby = 'c'); +create table with_rls (a integer, b integer); +ALTER TABLE with_rls ENABLE ROW LEVEL SECURITY; +select table_name from create_hypertable('with_rls', 'a', chunk_time_interval=> 10); +NOTICE: adding not-null constraint to column "a" +DETAIL: Dimensions cannot have NULL values. + table_name +------------ + with_rls +(1 row) + +ALTER TABLE with_rls set (timescaledb.compress, timescaledb.compress_orderby='a'); +ERROR: compression cannot be used on table with row security +--note that the time column "a" should be added to the end of the orderby list +select * from _timescaledb_catalog.hypertable_compression order by attname; + hypertable_id | attname | compression_algorithm_id | segmentby_column_index | orderby_column_index | orderby_asc | orderby_nullsfirst +---------------+----------+--------------------------+------------------------+----------------------+-------------+-------------------- + 1 | a | 4 | | 2 | f | t + 6 | a | 4 | | 1 | f | t + 6 | b | 4 | | | | + 1 | bacB toD | 0 | 1 | | | + 1 | c | 0 | 2 | | | + 6 | c | 0 | 1 | | | + 1 | d | 4 | | 1 | t | f + 6 | d | 4 | | | | +(8 rows) + +ALTER TABLE foo3 set (timescaledb.compress, timescaledb.compress_orderby='d DeSc NullS lAsT'); +--shold allow alter since segment by was empty +ALTER TABLE foo3 set (timescaledb.compress, timescaledb.compress_orderby='d Asc NullS lAsT'); +--this is ok too +ALTER TABLE foo3 set (timescaledb.compress, timescaledb.compress_segmentby = '"bacB toD",c', timescaledb.compress_orderby = 'd DeSc NullS lAsT'); +-- Negative test cases --- +ALTER TABLE foo2 set (timescaledb.compress, timescaledb.compress_segmentby = '"bacB toD",c'); +ERROR: must specify a column to order by +DETAIL: The timescaledb.compress_orderby option was previously set and must also be specified in the updated configuration. +alter table default_skipped set (timescaledb.compress, timescaledb.compress_orderby = 'a asc', timescaledb.compress_segmentby = 'c'); +alter table default_skipped set (timescaledb.compress, timescaledb.compress_segmentby = 'c'); +ERROR: must specify a column to order by +DETAIL: The timescaledb.compress_orderby option was previously set and must also be specified in the updated configuration. +create table reserved_column_prefix (a integer, _ts_meta_foo integer, "bacB toD" integer, c integer, d integer); +select table_name from create_hypertable('reserved_column_prefix', 'a', chunk_time_interval=> 10); +NOTICE: adding not-null constraint to column "a" +DETAIL: Dimensions cannot have NULL values. + table_name +------------------------ + reserved_column_prefix +(1 row) + +ALTER TABLE reserved_column_prefix set (timescaledb.compress); +ERROR: cannot compress tables with reserved column prefix '_ts_meta_' +--basic test with count +create table foo (a integer, b integer, c integer, t text, p point); +ALTER TABLE foo ADD CONSTRAINT chk_existing CHECK(b > 0); +select table_name from create_hypertable('foo', 'a', chunk_time_interval=> 10); +NOTICE: adding not-null constraint to column "a" +DETAIL: Dimensions cannot have NULL values. + table_name +------------ + foo +(1 row) + +insert into foo values( 3 , 16 , 20); +insert into foo values( 10 , 10 , 20); +insert into foo values( 20 , 11 , 20); +insert into foo values( 30 , 12 , 20); +-- should error out -- +ALTER TABLE foo ALTER b SET NOT NULL, set (timescaledb.compress); +ERROR: ALTER TABLE SET does not support multiple clauses +ALTER TABLE foo ALTER b SET NOT NULL; +select attname, attnotnull from pg_attribute where attrelid = (select oid from pg_class where relname like 'foo') and attname like 'b'; + attname | attnotnull +---------+------------ + b | t +(1 row) + +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'd'); +ERROR: column "d" does not exist +HINT: The timescaledb.compress_segmentby option must reference a valid column. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'd'); +ERROR: column "d" does not exist +HINT: The timescaledb.compress_orderby option must reference a valid column. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c desc nulls'); +ERROR: unable to parse ordering option "c desc nulls" +HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c desc nulls thirsty'); +ERROR: unable to parse ordering option "c desc nulls thirsty" +HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c climb nulls first'); +ERROR: unable to parse ordering option "c climb nulls first" +HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c nulls first asC'); +ERROR: unable to parse ordering option "c nulls first asC" +HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c desc nulls first asc'); +ERROR: unable to parse ordering option "c desc nulls first asc" +HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c desc hurry'); +ERROR: unable to parse ordering option "c desc hurry" +HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c descend'); +ERROR: unable to parse ordering option "c descend" +HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c; SELECT 1'); +ERROR: unable to parse ordering option "c; SELECT 1" +HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = '1,2'); +ERROR: unable to parse ordering option "1,2" +HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c + 1'); +ERROR: unable to parse ordering option "c + 1" +HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'random()'); +ERROR: unable to parse ordering option "random()" +HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c LIMIT 1'); +ERROR: unable to parse ordering option "c LIMIT 1" +HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c USING <'); +ERROR: unable to parse ordering option "c USING <" +HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 't COLLATE "en_US"'); +ERROR: unable to parse ordering option "t COLLATE "en_US"" +HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'c asc' , timescaledb.compress_orderby = 'c'); +ERROR: unable to parse segmenting option "c asc" +HINT: The option timescaledb.compress_segmentby must be a set of columns separated by commas. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'c nulls last'); +ERROR: unable to parse segmenting option "c nulls last" +HINT: The option timescaledb.compress_segmentby must be a set of columns separated by commas. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'c + 1'); +ERROR: unable to parse segmenting option "c + 1" +HINT: The option timescaledb.compress_segmentby must be a set of columns separated by commas. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'random()'); +ERROR: unable to parse segmenting option "random()" +HINT: The option timescaledb.compress_segmentby must be a set of columns separated by commas. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'c LIMIT 1'); +ERROR: unable to parse segmenting option "c LIMIT 1" +HINT: The option timescaledb.compress_segmentby must be a set of columns separated by commas. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'c + b'); +ERROR: unable to parse segmenting option "c + b" +HINT: The option timescaledb.compress_segmentby must be a set of columns separated by commas. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'a, p'); +ERROR: invalid ordering column type point +DETAIL: Could not identify a less-than operator for the type. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'b, b'); +ERROR: duplicate column name "b" +HINT: The timescaledb.compress_segmentby option must reference distinct column. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'b, b'); +ERROR: duplicate column name "b" +HINT: The timescaledb.compress_orderby option must reference distinct column. +--should succeed +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'a, b'); +--ddl on ht with compression +ALTER TABLE foo DROP COLUMN a; +ERROR: cannot drop column named in partition key +DETAIL: Cannot drop column that is a hypertable partitioning (space or time) dimension. +ALTER TABLE foo DROP COLUMN b; +ERROR: cannot drop orderby or segmentby column from a hypertable with compression enabled +ALTER TABLE foo ALTER COLUMN t SET NOT NULL; +ERROR: operation not supported on hypertables that have compression enabled +ALTER TABLE foo RESET (timescaledb.compress); +ERROR: compression options cannot be reset +ALTER TABLE foo ADD CONSTRAINT chk CHECK(b > 0); +ERROR: operation not supported on hypertables that have compression enabled +ALTER TABLE foo ADD CONSTRAINT chk UNIQUE(b); +ERROR: operation not supported on hypertables that have compression enabled +ALTER TABLE foo DROP CONSTRAINT chk_existing; +ERROR: operation not supported on hypertables that have compression enabled +--note that the time column "a" should not be added to the end of the order by list again (should appear first) +select hc.* from _timescaledb_catalog.hypertable_compression hc inner join _timescaledb_catalog.hypertable h on (h.id = hc.hypertable_id) where h.table_name = 'foo' order by attname; + hypertable_id | attname | compression_algorithm_id | segmentby_column_index | orderby_column_index | orderby_asc | orderby_nullsfirst +---------------+---------+--------------------------+------------------------+----------------------+-------------+-------------------- + 15 | a | 4 | | 1 | t | f + 15 | b | 4 | | 2 | t | f + 15 | c | 4 | | | | + 15 | p | 1 | | | | + 15 | t | 2 | | | | +(5 rows) + +select decompress_chunk(ch1.schema_name|| '.' || ch1.table_name) +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht where ch1.hypertable_id = ht.id and ht.table_name like 'foo' ORDER BY ch1.id limit 1; +ERROR: chunk "_hyper_15_2_chunk" is not compressed +--test changing the segment by columns +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'a', timescaledb.compress_segmentby = 'b'); +select ch1.schema_name|| '.' || ch1.table_name AS "CHUNK_NAME" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht where ch1.hypertable_id = ht.id and ht.table_name like 'foo' ORDER BY ch1.id limit 1 \gset +select decompress_chunk(:'CHUNK_NAME'); +ERROR: chunk "_hyper_15_2_chunk" is not compressed +select decompress_chunk(:'CHUNK_NAME', if_compressed=>true); +NOTICE: chunk "_hyper_15_2_chunk" is not compressed + decompress_chunk +------------------ + +(1 row) + +--should succeed +select compress_chunk(:'CHUNK_NAME'); + compress_chunk +----------------------------------------- + _timescaledb_internal._hyper_15_2_chunk +(1 row) + +select compress_chunk(:'CHUNK_NAME'); +ERROR: chunk "_hyper_15_2_chunk" is already compressed +select compress_chunk(:'CHUNK_NAME', if_not_compressed=>true); +NOTICE: chunk "_hyper_15_2_chunk" is already compressed + compress_chunk +----------------------------------------- + _timescaledb_internal._hyper_15_2_chunk +(1 row) + +select compress_chunk(ch1.schema_name|| '.' || ch1.table_name) +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht where ch1.hypertable_id = ht.id and ht.table_name like 'non_compressed' ORDER BY ch1.id limit 1; +ERROR: compression not enabled on "non_compressed" +DETAIL: It is not possible to compress chunks on a hypertable or continuous aggregate that does not have compression enabled. +HINT: Enable compression using ALTER TABLE/MATERIALIZED VIEW with the timescaledb.compress option. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'a', timescaledb.compress_segmentby = 'c'); +ERROR: cannot change configuration on already compressed chunks +DETAIL: There are compressed chunks that prevent changing the existing compression configuration. +ALTER TABLE foo set (timescaledb.compress='f'); +ERROR: cannot change configuration on already compressed chunks +DETAIL: There are compressed chunks that prevent changing the existing compression configuration. +ALTER TABLE foo reset (timescaledb.compress); +ERROR: compression options cannot be reset +select decompress_chunk(ch1.schema_name|| '.' || ch1.table_name) +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht where ch1.hypertable_id = ht.id and ht.table_name like 'non_compressed' ORDER BY ch1.id limit 1; +ERROR: missing compressed hypertable +--should succeed +select decompress_chunk(ch1.schema_name|| '.' || ch1.table_name) +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht where ch1.hypertable_id = ht.id and ht.table_name like 'foo' and ch1.compressed_chunk_id IS NOT NULL; + decompress_chunk +----------------------------------------- + _timescaledb_internal._hyper_15_2_chunk +(1 row) + +--should succeed +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'a', timescaledb.compress_segmentby = 'b'); +select hc.* from _timescaledb_catalog.hypertable_compression hc inner join _timescaledb_catalog.hypertable h on (h.id = hc.hypertable_id) where h.table_name = 'foo' order by attname; + hypertable_id | attname | compression_algorithm_id | segmentby_column_index | orderby_column_index | orderby_asc | orderby_nullsfirst +---------------+---------+--------------------------+------------------------+----------------------+-------------+-------------------- + 15 | a | 4 | | 1 | t | f + 15 | b | 0 | 1 | | | + 15 | c | 4 | | | | + 15 | p | 1 | | | | + 15 | t | 2 | | | | +(5 rows) + +SELECT comp_hyper.schema_name|| '.' || comp_hyper.table_name as "COMPRESSED_HYPER_NAME" +FROM _timescaledb_catalog.hypertable comp_hyper +INNER JOIN _timescaledb_catalog.hypertable uncomp_hyper ON (comp_hyper.id = uncomp_hyper.compressed_hypertable_id) +WHERE uncomp_hyper.table_name like 'foo' ORDER BY comp_hyper.id LIMIT 1 \gset +select add_retention_policy(:'COMPRESSED_HYPER_NAME', INTERVAL '4 months', true); +ERROR: cannot add retention policy to compressed hypertable "_compressed_hypertable_18" +HINT: Please add the policy to the corresponding uncompressed hypertable instead. +--Constraint checking for compression +create table fortable(col integer primary key); +create table table_constr( device_id integer, + timec integer , + location integer , + c integer constraint valid_cval check (c > 20) , + d integer, + primary key ( device_id, timec) +); +select table_name from create_hypertable('table_constr', 'timec', chunk_time_interval=> 10); + table_name +-------------- + table_constr +(1 row) + +BEGIN; +ALTER TABLE table_constr set (timescaledb.compress, timescaledb.compress_segmentby = 'd'); +WARNING: column "device_id" should be used for segmenting or ordering +ROLLBACK; +alter table table_constr add constraint table_constr_uk unique (location, timec, device_id); +BEGIN; +ALTER TABLE table_constr set (timescaledb.compress, timescaledb.compress_orderby = 'timec', timescaledb.compress_segmentby = 'device_id'); +WARNING: column "location" should be used for segmenting or ordering +ROLLBACK; +alter table table_constr add constraint table_constr_fk FOREIGN KEY(d) REFERENCES fortable(col) on delete cascade; +ALTER TABLE table_constr set (timescaledb.compress, timescaledb.compress_orderby = 'timec', timescaledb.compress_segmentby = 'device_id, location'); +ERROR: column "d" must be used for segmenting +DETAIL: The foreign key constraint "table_constr_fk" cannot be enforced with the given compression configuration. +--exclusion constraints not allowed +alter table table_constr add constraint table_constr_exclu exclude using btree (timec with = ); +ALTER TABLE table_constr set (timescaledb.compress, timescaledb.compress_orderby = 'timec', timescaledb.compress_segmentby = 'device_id, location, d'); +ERROR: constraint table_constr_exclu is not supported for compression +HINT: Exclusion constraints are not supported on hypertables that are compressed. +alter table table_constr drop constraint table_constr_exclu ; +--now it works +ALTER TABLE table_constr set (timescaledb.compress, timescaledb.compress_orderby = 'timec', timescaledb.compress_segmentby = 'device_id, location, d'); +--can't add fks after compression enabled +alter table table_constr add constraint table_constr_fk_add_after FOREIGN KEY(d) REFERENCES fortable(col) on delete cascade; +ERROR: operation not supported on hypertables that have compression enabled +-- ddl ADD column variants that are not supported +ALTER TABLE table_constr ADD COLUMN newcol integer CHECK ( newcol < 10 ); +ERROR: cannot add column with constraints to a hypertable that has compression enabled +ALTER TABLE table_constr ADD COLUMN newcol integer UNIQUE; +ERROR: cannot add column with constraints to a hypertable that has compression enabled +ALTER TABLE table_constr ADD COLUMN newcol integer PRIMARY KEY; +ERROR: cannot add column with constraints to a hypertable that has compression enabled +ALTER TABLE table_constr ADD COLUMN newcol integer NOT NULL; +ERROR: cannot add column with NOT NULL constraint without default to a hypertable that has compression enabled +ALTER TABLE table_constr ADD COLUMN newcol integer DEFAULT random() + random(); +ERROR: cannot add column with non-constant default expression to a hypertable that has compression enabled +ALTER TABLE table_constr ADD COLUMN IF NOT EXISTS newcol integer REFERENCES fortable(col); +ERROR: cannot add column with constraints to a hypertable that has compression enabled +ALTER TABLE table_constr ADD COLUMN IF NOT EXISTS newcol integer GENERATED ALWAYS AS IDENTITY; +ERROR: cannot add column with constraints to a hypertable that has compression enabled +ALTER TABLE table_constr ADD COLUMN IF NOT EXISTS newcol integer GENERATED BY DEFAULT AS IDENTITY; +ERROR: cannot add column with constraints to a hypertable that has compression enabled +ALTER TABLE table_constr ADD COLUMN newcol nonexistent_type; +ERROR: type "nonexistent_type" does not exist +LINE 1: ALTER TABLE table_constr ADD COLUMN newcol nonexistent_type; + ^ +--FK check should not error even with dropped columns (previously had a bug related to this) +CREATE TABLE table_fk ( + time timestamptz NOT NULL, + id1 int8 NOT NULL, + id2 int8 NOT NULL, + value float8 NULL, + CONSTRAINT fk1 FOREIGN KEY (id1) REFERENCES fortable(col), + CONSTRAINT fk2 FOREIGN KEY (id2) REFERENCES fortable(col) +); +SELECT create_hypertable('table_fk', 'time'); + create_hypertable +------------------------ + (23,public,table_fk,t) +(1 row) + +ALTER TABLE table_fk DROP COLUMN id1; +ALTER TABLE table_fk SET (timescaledb.compress,timescaledb.compress_segmentby = 'id2'); +-- TEST fk cascade delete behavior on compressed chunk -- +insert into fortable values(1); +insert into fortable values(10); +--we want 2 chunks here -- +insert into table_constr values(1000, 1, 44, 44, 1); +insert into table_constr values(1000, 10, 44, 44, 10); +select ch1.schema_name|| '.' || ch1.table_name AS "CHUNK_NAME" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +where ch1.hypertable_id = ht.id and ht.table_name like 'table_constr' +ORDER BY ch1.id limit 1 \gset +-- we have 1 compressed and 1 uncompressed chunk after this. +select compress_chunk(:'CHUNK_NAME'); + compress_chunk +----------------------------------------- + _timescaledb_internal._hyper_19_7_chunk +(1 row) + +SELECT total_chunks , number_compressed_chunks +FROM hypertable_compression_stats('table_constr'); + total_chunks | number_compressed_chunks +--------------+-------------------------- + 2 | 1 +(1 row) + +--github issue 1661 +--disable compression after enabling it on a table that has fk constraints +CREATE TABLE table_constr2( device_id integer, + timec integer , + location integer , + d integer references fortable(col), + primary key ( device_id, timec) +); +SELECT table_name from create_hypertable('table_constr2', 'timec', chunk_time_interval=> 10); + table_name +--------------- + table_constr2 +(1 row) + +INSERT INTO fortable VALUES( 99 ); +INSERT INTO table_constr2 VALUES( 1000, 10, 5, 99); +ALTER TABLE table_constr2 SET (timescaledb.compress, timescaledb.compress_segmentby = 'device_id'); +ERROR: column "d" must be used for segmenting +DETAIL: The foreign key constraint "table_constr2_d_fkey" cannot be enforced with the given compression configuration. + ALTER TABLE table_constr2 SET (timescaledb.compress, timescaledb.compress_segmentby = 'device_id, d'); +--compress a chunk and try to disable compression, it should fail -- +SELECT ch1.schema_name|| '.' || ch1.table_name AS "CHUNK_NAME" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id and ht.table_name like 'table_constr2' \gset +SELECT compress_chunk(:'CHUNK_NAME'); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_25_10_chunk +(1 row) + +ALTER TABLE table_constr2 set (timescaledb.compress=false); +ERROR: cannot change configuration on already compressed chunks +DETAIL: There are compressed chunks that prevent changing the existing compression configuration. +--decompress all chunks and disable compression. +SELECT decompress_chunk(:'CHUNK_NAME'); + decompress_chunk +------------------------------------------ + _timescaledb_internal._hyper_25_10_chunk +(1 row) + +ALTER TABLE table_constr2 SET (timescaledb.compress=false); +-- TEST compression policy +-- modify the config to trigger errors at runtime +CREATE TABLE test_table_int(time bigint, val int); +SELECT create_hypertable('test_table_int', 'time', chunk_time_interval => 1); +NOTICE: adding not-null constraint to column "time" +DETAIL: Dimensions cannot have NULL values. + create_hypertable +------------------------------ + (27,public,test_table_int,t) +(1 row) + +CREATE OR REPLACE function dummy_now() returns BIGINT LANGUAGE SQL IMMUTABLE as 'SELECT 5::BIGINT'; +SELECT set_integer_now_func('test_table_int', 'dummy_now'); + set_integer_now_func +---------------------- + +(1 row) + +INSERT INTO test_table_int SELECT generate_series(1,5), 10; +ALTER TABLE test_table_int set (timescaledb.compress); +SELECT add_compression_policy('test_table_int', 2::int) AS compressjob_id +\gset +\c :TEST_DBNAME :ROLE_SUPERUSER +UPDATE _timescaledb_config.bgw_job +SET config = config - 'compress_after' +WHERE id = :compressjob_id; +SELECT config FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id; + config +----------------------- + {"hypertable_id": 27} +(1 row) + +--should fail +CALL run_job(:compressjob_id); +ERROR: job 1000 config must have compress_after +CONTEXT: PL/pgSQL function _timescaledb_functions.policy_compression(integer,jsonb) line 35 at RAISE +SELECT remove_compression_policy('test_table_int'); + remove_compression_policy +--------------------------- + t +(1 row) + +--again add a new policy that we'll tamper with +SELECT add_compression_policy('test_table_int', 2::int) AS compressjob_id +\gset +UPDATE _timescaledb_config.bgw_job +SET config = config - 'hypertable_id' +WHERE id = :compressjob_id; +SELECT config FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id; + config +----------------------- + {"compress_after": 2} +(1 row) + +--should fail +CALL run_job(:compressjob_id); +ERROR: job 1001 config must have hypertable_id +CONTEXT: PL/pgSQL function _timescaledb_functions.policy_compression(integer,jsonb) line 26 at RAISE +UPDATE _timescaledb_config.bgw_job +SET config = NULL +WHERE id = :compressjob_id; +SELECT config FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id; + config +-------- + +(1 row) + +--should fail +CALL run_job(:compressjob_id); +ERROR: job 1001 has null config +CONTEXT: PL/pgSQL function _timescaledb_functions.policy_compression(integer,jsonb) line 21 at RAISE +-- test ADD COLUMN IF NOT EXISTS +CREATE TABLE metric (time TIMESTAMPTZ NOT NULL, val FLOAT8 NOT NULL, dev_id INT4 NOT NULL); +SELECT create_hypertable('metric', 'time', 'dev_id', 10); + create_hypertable +---------------------- + (29,public,metric,t) +(1 row) + +ALTER TABLE metric SET ( +timescaledb.compress, +timescaledb.compress_segmentby = 'dev_id', +timescaledb.compress_orderby = 'time DESC' +); +INSERT INTO metric(time, val, dev_id) +SELECT s.*, 3.14+1, 1 +FROM generate_series('2021-08-17 00:00:00'::timestamp, + '2021-08-17 00:02:00'::timestamp, '1 s'::interval) s; +SELECT compress_chunk(show_chunks('metric')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_29_17_chunk +(1 row) + +-- column does not exist the first time +ALTER TABLE metric ADD COLUMN IF NOT EXISTS "medium" VARCHAR ; +-- column already exists the second time +ALTER TABLE metric ADD COLUMN IF NOT EXISTS "medium" VARCHAR ; +NOTICE: column "medium" of relation "metric" already exists, skipping +-- also add one without IF NOT EXISTS +ALTER TABLE metric ADD COLUMN "medium_1" VARCHAR ; +ALTER TABLE metric ADD COLUMN "medium_1" VARCHAR ; +ERROR: column "medium_1" of relation "metric" already exists +--github issue 3481 +--GROUP BY error when setting compress_segmentby with an enum column +CREATE TYPE an_enum_type AS ENUM ('home', 'school'); +CREATE TABLE test ( + time timestamp NOT NULL, + enum_col an_enum_type NOT NULL +); +SELECT create_hypertable( + 'test', 'time' +); +WARNING: column type "timestamp without time zone" used for "time" does not follow best practices +HINT: Use datatype TIMESTAMPTZ instead. + create_hypertable +-------------------- + (31,public,test,t) +(1 row) + +INSERT INTO test VALUES ('2001-01-01 00:00', 'home'), + ('2001-01-01 01:00', 'school'), + ('2001-01-01 02:00', 'home'); +--enable compression on enum_col +ALTER TABLE test SET ( + timescaledb.compress, + timescaledb.compress_segmentby = 'enum_col', + timescaledb.compress_orderby = 'time' +); +--below queries will pass before chunks are compressed +SELECT 1 FROM test GROUP BY enum_col; + ?column? +---------- + 1 + 1 +(2 rows) + +EXPLAIN (COSTS OFF, TIMING OFF, SUMMARY OFF) SELECT DISTINCT 1 FROM test; + QUERY PLAN +-------------------------------------------- + Unique + -> Result + -> Seq Scan on _hyper_31_19_chunk +(3 rows) + +--compress chunks +SELECT COMPRESS_CHUNK(X) FROM SHOW_CHUNKS('test') X; + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_31_19_chunk +(1 row) + +ANALYZE test; +--below query should pass after chunks are compressed +SELECT 1 FROM test GROUP BY enum_col; + ?column? +---------- + 1 + 1 +(2 rows) + +EXPLAIN (COSTS OFF, TIMING OFF, SUMMARY OFF) SELECT DISTINCT 1 FROM test; + QUERY PLAN +----------------------------------------------------------------- + Unique + -> Result + -> Custom Scan (DecompressChunk) on _hyper_31_19_chunk + -> Seq Scan on compress_hyper_32_20_chunk +(4 rows) + +--github issue 4398 +SELECT format('CREATE TABLE data_table AS SELECT now() AS tm, %s', array_to_string(array_agg(format('125 AS c%s',a)), ', ')) FROM generate_series(1,550)a \gexec +CREATE TABLE data_table AS SELECT now() AS tm, 125 AS c1, 125 AS c2, 125 AS c3, 125 AS c4, 125 AS c5, 125 AS c6, 125 AS c7, 125 AS c8, 125 AS c9, 125 AS c10, 125 AS c11, 125 AS c12, 125 AS c13, 125 AS c14, 125 AS c15, 125 AS c16, 125 AS c17, 125 AS c18, 125 AS c19, 125 AS c20, 125 AS c21, 125 AS c22, 125 AS c23, 125 AS c24, 125 AS c25, 125 AS c26, 125 AS c27, 125 AS c28, 125 AS c29, 125 AS c30, 125 AS c31, 125 AS c32, 125 AS c33, 125 AS c34, 125 AS c35, 125 AS c36, 125 AS c37, 125 AS c38, 125 AS c39, 125 AS c40, 125 AS c41, 125 AS c42, 125 AS c43, 125 AS c44, 125 AS c45, 125 AS c46, 125 AS c47, 125 AS c48, 125 AS c49, 125 AS c50, 125 AS c51, 125 AS c52, 125 AS c53, 125 AS c54, 125 AS c55, 125 AS c56, 125 AS c57, 125 AS c58, 125 AS c59, 125 AS c60, 125 AS c61, 125 AS c62, 125 AS c63, 125 AS c64, 125 AS c65, 125 AS c66, 125 AS c67, 125 AS c68, 125 AS c69, 125 AS c70, 125 AS c71, 125 AS c72, 125 AS c73, 125 AS c74, 125 AS c75, 125 AS c76, 125 AS c77, 125 AS c78, 125 AS c79, 125 AS c80, 125 AS c81, 125 AS c82, 125 AS c83, 125 AS c84, 125 AS c85, 125 AS c86, 125 AS c87, 125 AS c88, 125 AS c89, 125 AS c90, 125 AS c91, 125 AS c92, 125 AS c93, 125 AS c94, 125 AS c95, 125 AS c96, 125 AS c97, 125 AS c98, 125 AS c99, 125 AS c100, 125 AS c101, 125 AS c102, 125 AS c103, 125 AS c104, 125 AS c105, 125 AS c106, 125 AS c107, 125 AS c108, 125 AS c109, 125 AS c110, 125 AS c111, 125 AS c112, 125 AS c113, 125 AS c114, 125 AS c115, 125 AS c116, 125 AS c117, 125 AS c118, 125 AS c119, 125 AS c120, 125 AS c121, 125 AS c122, 125 AS c123, 125 AS c124, 125 AS c125, 125 AS c126, 125 AS c127, 125 AS c128, 125 AS c129, 125 AS c130, 125 AS c131, 125 AS c132, 125 AS c133, 125 AS c134, 125 AS c135, 125 AS c136, 125 AS c137, 125 AS c138, 125 AS c139, 125 AS c140, 125 AS c141, 125 AS c142, 125 AS c143, 125 AS c144, 125 AS c145, 125 AS c146, 125 AS c147, 125 AS c148, 125 AS c149, 125 AS c150, 125 AS c151, 125 AS c152, 125 AS c153, 125 AS c154, 125 AS c155, 125 AS c156, 125 AS c157, 125 AS c158, 125 AS c159, 125 AS c160, 125 AS c161, 125 AS c162, 125 AS c163, 125 AS c164, 125 AS c165, 125 AS c166, 125 AS c167, 125 AS c168, 125 AS c169, 125 AS c170, 125 AS c171, 125 AS c172, 125 AS c173, 125 AS c174, 125 AS c175, 125 AS c176, 125 AS c177, 125 AS c178, 125 AS c179, 125 AS c180, 125 AS c181, 125 AS c182, 125 AS c183, 125 AS c184, 125 AS c185, 125 AS c186, 125 AS c187, 125 AS c188, 125 AS c189, 125 AS c190, 125 AS c191, 125 AS c192, 125 AS c193, 125 AS c194, 125 AS c195, 125 AS c196, 125 AS c197, 125 AS c198, 125 AS c199, 125 AS c200, 125 AS c201, 125 AS c202, 125 AS c203, 125 AS c204, 125 AS c205, 125 AS c206, 125 AS c207, 125 AS c208, 125 AS c209, 125 AS c210, 125 AS c211, 125 AS c212, 125 AS c213, 125 AS c214, 125 AS c215, 125 AS c216, 125 AS c217, 125 AS c218, 125 AS c219, 125 AS c220, 125 AS c221, 125 AS c222, 125 AS c223, 125 AS c224, 125 AS c225, 125 AS c226, 125 AS c227, 125 AS c228, 125 AS c229, 125 AS c230, 125 AS c231, 125 AS c232, 125 AS c233, 125 AS c234, 125 AS c235, 125 AS c236, 125 AS c237, 125 AS c238, 125 AS c239, 125 AS c240, 125 AS c241, 125 AS c242, 125 AS c243, 125 AS c244, 125 AS c245, 125 AS c246, 125 AS c247, 125 AS c248, 125 AS c249, 125 AS c250, 125 AS c251, 125 AS c252, 125 AS c253, 125 AS c254, 125 AS c255, 125 AS c256, 125 AS c257, 125 AS c258, 125 AS c259, 125 AS c260, 125 AS c261, 125 AS c262, 125 AS c263, 125 AS c264, 125 AS c265, 125 AS c266, 125 AS c267, 125 AS c268, 125 AS c269, 125 AS c270, 125 AS c271, 125 AS c272, 125 AS c273, 125 AS c274, 125 AS c275, 125 AS c276, 125 AS c277, 125 AS c278, 125 AS c279, 125 AS c280, 125 AS c281, 125 AS c282, 125 AS c283, 125 AS c284, 125 AS c285, 125 AS c286, 125 AS c287, 125 AS c288, 125 AS c289, 125 AS c290, 125 AS c291, 125 AS c292, 125 AS c293, 125 AS c294, 125 AS c295, 125 AS c296, 125 AS c297, 125 AS c298, 125 AS c299, 125 AS c300, 125 AS c301, 125 AS c302, 125 AS c303, 125 AS c304, 125 AS c305, 125 AS c306, 125 AS c307, 125 AS c308, 125 AS c309, 125 AS c310, 125 AS c311, 125 AS c312, 125 AS c313, 125 AS c314, 125 AS c315, 125 AS c316, 125 AS c317, 125 AS c318, 125 AS c319, 125 AS c320, 125 AS c321, 125 AS c322, 125 AS c323, 125 AS c324, 125 AS c325, 125 AS c326, 125 AS c327, 125 AS c328, 125 AS c329, 125 AS c330, 125 AS c331, 125 AS c332, 125 AS c333, 125 AS c334, 125 AS c335, 125 AS c336, 125 AS c337, 125 AS c338, 125 AS c339, 125 AS c340, 125 AS c341, 125 AS c342, 125 AS c343, 125 AS c344, 125 AS c345, 125 AS c346, 125 AS c347, 125 AS c348, 125 AS c349, 125 AS c350, 125 AS c351, 125 AS c352, 125 AS c353, 125 AS c354, 125 AS c355, 125 AS c356, 125 AS c357, 125 AS c358, 125 AS c359, 125 AS c360, 125 AS c361, 125 AS c362, 125 AS c363, 125 AS c364, 125 AS c365, 125 AS c366, 125 AS c367, 125 AS c368, 125 AS c369, 125 AS c370, 125 AS c371, 125 AS c372, 125 AS c373, 125 AS c374, 125 AS c375, 125 AS c376, 125 AS c377, 125 AS c378, 125 AS c379, 125 AS c380, 125 AS c381, 125 AS c382, 125 AS c383, 125 AS c384, 125 AS c385, 125 AS c386, 125 AS c387, 125 AS c388, 125 AS c389, 125 AS c390, 125 AS c391, 125 AS c392, 125 AS c393, 125 AS c394, 125 AS c395, 125 AS c396, 125 AS c397, 125 AS c398, 125 AS c399, 125 AS c400, 125 AS c401, 125 AS c402, 125 AS c403, 125 AS c404, 125 AS c405, 125 AS c406, 125 AS c407, 125 AS c408, 125 AS c409, 125 AS c410, 125 AS c411, 125 AS c412, 125 AS c413, 125 AS c414, 125 AS c415, 125 AS c416, 125 AS c417, 125 AS c418, 125 AS c419, 125 AS c420, 125 AS c421, 125 AS c422, 125 AS c423, 125 AS c424, 125 AS c425, 125 AS c426, 125 AS c427, 125 AS c428, 125 AS c429, 125 AS c430, 125 AS c431, 125 AS c432, 125 AS c433, 125 AS c434, 125 AS c435, 125 AS c436, 125 AS c437, 125 AS c438, 125 AS c439, 125 AS c440, 125 AS c441, 125 AS c442, 125 AS c443, 125 AS c444, 125 AS c445, 125 AS c446, 125 AS c447, 125 AS c448, 125 AS c449, 125 AS c450, 125 AS c451, 125 AS c452, 125 AS c453, 125 AS c454, 125 AS c455, 125 AS c456, 125 AS c457, 125 AS c458, 125 AS c459, 125 AS c460, 125 AS c461, 125 AS c462, 125 AS c463, 125 AS c464, 125 AS c465, 125 AS c466, 125 AS c467, 125 AS c468, 125 AS c469, 125 AS c470, 125 AS c471, 125 AS c472, 125 AS c473, 125 AS c474, 125 AS c475, 125 AS c476, 125 AS c477, 125 AS c478, 125 AS c479, 125 AS c480, 125 AS c481, 125 AS c482, 125 AS c483, 125 AS c484, 125 AS c485, 125 AS c486, 125 AS c487, 125 AS c488, 125 AS c489, 125 AS c490, 125 AS c491, 125 AS c492, 125 AS c493, 125 AS c494, 125 AS c495, 125 AS c496, 125 AS c497, 125 AS c498, 125 AS c499, 125 AS c500, 125 AS c501, 125 AS c502, 125 AS c503, 125 AS c504, 125 AS c505, 125 AS c506, 125 AS c507, 125 AS c508, 125 AS c509, 125 AS c510, 125 AS c511, 125 AS c512, 125 AS c513, 125 AS c514, 125 AS c515, 125 AS c516, 125 AS c517, 125 AS c518, 125 AS c519, 125 AS c520, 125 AS c521, 125 AS c522, 125 AS c523, 125 AS c524, 125 AS c525, 125 AS c526, 125 AS c527, 125 AS c528, 125 AS c529, 125 AS c530, 125 AS c531, 125 AS c532, 125 AS c533, 125 AS c534, 125 AS c535, 125 AS c536, 125 AS c537, 125 AS c538, 125 AS c539, 125 AS c540, 125 AS c541, 125 AS c542, 125 AS c543, 125 AS c544, 125 AS c545, 125 AS c546, 125 AS c547, 125 AS c548, 125 AS c549, 125 AS c550 +CREATE TABLE ts_table (LIKE data_table); +SELECT * FROM create_hypertable('ts_table', 'tm'); +NOTICE: adding not-null constraint to column "tm" +DETAIL: Dimensions cannot have NULL values. + hypertable_id | schema_name | table_name | created +---------------+-------------+------------+--------- + 33 | public | ts_table | t +(1 row) + +--should report a warning +\set VERBOSITY terse +ALTER TABLE ts_table SET(timescaledb.compress, timescaledb.compress_segmentby = 'c1', + timescaledb.compress_orderby = 'tm'); +WARNING: compressed row size might exceed maximum row size +INSERT INTO ts_table SELECT * FROM data_table; +--cleanup tables +DROP TABLE data_table cascade; +DROP TABLE ts_table cascade; +-- #5458 invalid reads for row expressions after column dropped on compressed tables +CREATE TABLE readings( + "time" TIMESTAMPTZ NOT NULL, + battery_status TEXT, + battery_temperature DOUBLE PRECISION +); +INSERT INTO readings ("time") VALUES ('2022-11-11 11:11:11-00'); +SELECT create_hypertable('readings', 'time', chunk_time_interval => interval '12 hour', migrate_data=>true); +NOTICE: migrating data to chunks + create_hypertable +------------------------ + (35,public,readings,t) +(1 row) + +create unique index readings_uniq_idx on readings("time",battery_temperature); +ALTER TABLE readings SET (timescaledb.compress,timescaledb.compress_segmentby = 'battery_temperature'); +SELECT compress_chunk(show_chunks('readings')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_35_22_chunk +(1 row) + +ALTER TABLE readings DROP COLUMN battery_status; +INSERT INTO readings ("time", battery_temperature) VALUES ('2022-11-11 11:11:11', 0.2); +SELECT readings FROM readings; + readings +-------------------------------------- + ("Fri Nov 11 03:11:11 2022 PST",) + ("Fri Nov 11 11:11:11 2022 PST",0.2) +(2 rows) + +-- #5577 On-insert decompression after schema changes may not work properly +SELECT decompress_chunk(show_chunks('readings'),true); +NOTICE: chunk "_hyper_35_24_chunk" is not compressed + decompress_chunk +------------------------------------------ + _timescaledb_internal._hyper_35_22_chunk + +(2 rows) + +SELECT compress_chunk(show_chunks('readings'),true); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_35_22_chunk + _timescaledb_internal._hyper_35_24_chunk +(2 rows) + +\set ON_ERROR_STOP 0 +INSERT INTO readings ("time", battery_temperature) VALUES + ('2022-11-11 11:11:11',0.2) -- same record as inserted +; +ERROR: duplicate key value violates unique constraint "_hyper_35_24_chunk_readings_uniq_idx" +\set ON_ERROR_STOP 1 +SELECT * from readings; + time | battery_temperature +------------------------------+--------------------- + Fri Nov 11 03:11:11 2022 PST | + Fri Nov 11 11:11:11 2022 PST | 0.2 +(2 rows) + +SELECT assert_equal(count(1), 2::bigint) FROM readings; + assert_equal +-------------- + +(1 row) + +-- no unique check failure during decompression +SELECT decompress_chunk(show_chunks('readings'),true); + decompress_chunk +------------------------------------------ + _timescaledb_internal._hyper_35_22_chunk + _timescaledb_internal._hyper_35_24_chunk +(2 rows) + +-- #5553 Unique constraints are not always respected on compressed tables +CREATE TABLE main_table AS +SELECT '2011-11-11 11:11:11'::timestamptz AS time, 'foo' AS device_id; +CREATE UNIQUE INDEX xm ON main_table(time, device_id); +SELECT create_hypertable('main_table', 'time', chunk_time_interval => interval '12 hour', migrate_data => TRUE); +NOTICE: adding not-null constraint to column "time" +NOTICE: migrating data to chunks + create_hypertable +-------------------------- + (37,public,main_table,t) +(1 row) + +ALTER TABLE main_table SET ( + timescaledb.compress, + timescaledb.compress_segmentby = 'device_id', + timescaledb.compress_orderby = ''); +SELECT compress_chunk(show_chunks('main_table')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_37_27_chunk +(1 row) + +-- insert rejected +\set ON_ERROR_STOP 0 +INSERT INTO main_table VALUES + ('2011-11-11 11:11:11', 'foo'); +ERROR: duplicate key value violates unique constraint "_hyper_37_27_chunk_xm" +-- insert rejected in case 1st row doesn't violate constraint with different segmentby +INSERT INTO main_table VALUES + ('2011-11-11 11:12:11', 'bar'), + ('2011-11-11 11:11:11', 'foo'); +ERROR: duplicate key value violates unique constraint "_hyper_37_27_chunk_xm" +\set ON_ERROR_STOP 1 +SELECT assert_equal(count(1), 1::bigint) FROM main_table; + assert_equal +-------------- + +(1 row) + +-- no unique check failure during decompression +SELECT decompress_chunk(show_chunks('main_table'), TRUE); + decompress_chunk +------------------------------------------ + _timescaledb_internal._hyper_37_27_chunk +(1 row) + +DROP TABLE IF EXISTS readings; +CREATE TABLE readings( + "time" timestamptz NOT NULL, + battery_status text, + candy integer, + battery_status2 text, + battery_temperature text +); +SELECT create_hypertable('readings', 'time', chunk_time_interval => interval '12 hour'); + create_hypertable +------------------------ + (39,public,readings,t) +(1 row) + +CREATE UNIQUE INDEX readings_uniq_idx ON readings("time", battery_temperature); +ALTER TABLE readings SET (timescaledb.compress, timescaledb.compress_segmentby = 'battery_temperature'); +ALTER TABLE readings DROP COLUMN battery_status; +ALTER TABLE readings DROP COLUMN battery_status2; +INSERT INTO readings("time", candy, battery_temperature) + VALUES ('2022-11-11 11:11:11', 88, '0.2'); +SELECT compress_chunk(show_chunks('readings'), TRUE); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_39_29_chunk +(1 row) + +-- no error happens +INSERT INTO readings("time", candy, battery_temperature) + VALUES ('2022-11-11 11:11:11', 33, 0.3) +; +-- Segmentby checks should be done for unique indexes without +-- constraints, so create a table without constraints and add a unique +-- index and try to create a table without using the right segmentby +-- column. +CREATE TABLE table_unique_index( + location smallint not null, + device_id smallint not null, + time timestamptz not null, + value float8 not null +); +CREATE UNIQUE index ON table_unique_index(location, device_id, time); +SELECT table_name FROM create_hypertable('table_unique_index', 'time'); + table_name +-------------------- + table_unique_index +(1 row) + +-- Will warn because the lack of segmentby/orderby compression options +ALTER TABLE table_unique_index SET (timescaledb.compress); +WARNING: column "location" should be used for segmenting or ordering +WARNING: column "device_id" should be used for segmenting or ordering +ALTER TABLE table_unique_index SET (timescaledb.compress = off); +ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_segmentby = 'location'); +WARNING: column "device_id" should be used for segmenting or ordering +ALTER TABLE table_unique_index SET (timescaledb.compress = off); +ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_orderby = 'device_id'); +WARNING: column "location" should be used for segmenting or ordering +ALTER TABLE table_unique_index SET (timescaledb.compress = off); +-- Will enable compression without warnings +ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_segmentby = 'location', timescaledb.compress_orderby = 'device_id'); +ALTER TABLE table_unique_index SET (timescaledb.compress = off); +ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_segmentby = 'device_id,location'); +ALTER TABLE table_unique_index SET (timescaledb.compress = off); +ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_orderby = 'device_id,location'); +ALTER TABLE table_unique_index SET (timescaledb.compress = off); +ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_segmentby = 'time,location', timescaledb.compress_orderby = 'device_id'); +ALTER TABLE table_unique_index SET (timescaledb.compress = off); +ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_segmentby = 'time,location,device_id'); +ALTER TABLE table_unique_index SET (timescaledb.compress = off); diff --git a/tsl/test/expected/compression_errors-16.out b/tsl/test/expected/compression_errors-16.out new file mode 100644 index 00000000000..df79cf6b406 --- /dev/null +++ b/tsl/test/expected/compression_errors-16.out @@ -0,0 +1,829 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\set ON_ERROR_STOP 0 +\set VERBOSITY default +\set ECHO none +--table with special column names -- +create table foo2 (a integer, "bacB toD" integer, c integer, d integer); +select table_name from create_hypertable('foo2', 'a', chunk_time_interval=> 10); +NOTICE: adding not-null constraint to column "a" +DETAIL: Dimensions cannot have NULL values. + table_name +------------ + foo2 +(1 row) + +create table foo3 (a integer, "bacB toD" integer, c integer, d integer); +select table_name from create_hypertable('foo3', 'a', chunk_time_interval=> 10); +NOTICE: adding not-null constraint to column "a" +DETAIL: Dimensions cannot have NULL values. + table_name +------------ + foo3 +(1 row) + +create table non_compressed (a integer, "bacB toD" integer, c integer, d integer); +select table_name from create_hypertable('non_compressed', 'a', chunk_time_interval=> 10); +NOTICE: adding not-null constraint to column "a" +DETAIL: Dimensions cannot have NULL values. + table_name +---------------- + non_compressed +(1 row) + +insert into non_compressed values( 3 , 16 , 20, 4); +ALTER TABLE foo2 set (timescaledb.compress_segmentby = '"bacB toD",c' , timescaledb.compress_orderby = 'c'); +ERROR: the option timescaledb.compress must be set to true to enable compression +ALTER TABLE foo2 set (timescaledb.compress, timescaledb.compress_segmentby = '"bacB toD",c' , timescaledb.compress_orderby = 'c'); +ERROR: cannot use column "c" for both ordering and segmenting +HINT: Use separate columns for the timescaledb.compress_orderby and timescaledb.compress_segmentby options. +ALTER TABLE foo2 set (timescaledb.compress, timescaledb.compress_segmentby = '"bacB toD",c' , timescaledb.compress_orderby = 'd DESC'); +ALTER TABLE foo2 set (timescaledb.compress, timescaledb.compress_segmentby = '"bacB toD",c' , timescaledb.compress_orderby = 'd'); +-- this is acceptable: having previously set the default value for orderby +-- and skipping orderby on a subsequent alter command +create table default_skipped (a integer not null, b integer, c integer, d integer); +select create_hypertable('default_skipped', 'a', chunk_time_interval=> 10); + create_hypertable +------------------------------ + (6,public,default_skipped,t) +(1 row) + +alter table default_skipped set (timescaledb.compress, timescaledb.compress_segmentby = 'c'); +alter table default_skipped set (timescaledb.compress, timescaledb.compress_segmentby = 'c'); +create table with_rls (a integer, b integer); +ALTER TABLE with_rls ENABLE ROW LEVEL SECURITY; +select table_name from create_hypertable('with_rls', 'a', chunk_time_interval=> 10); +NOTICE: adding not-null constraint to column "a" +DETAIL: Dimensions cannot have NULL values. + table_name +------------ + with_rls +(1 row) + +ALTER TABLE with_rls set (timescaledb.compress, timescaledb.compress_orderby='a'); +ERROR: compression cannot be used on table with row security +--note that the time column "a" should be added to the end of the orderby list +select * from _timescaledb_catalog.hypertable_compression order by attname; + hypertable_id | attname | compression_algorithm_id | segmentby_column_index | orderby_column_index | orderby_asc | orderby_nullsfirst +---------------+----------+--------------------------+------------------------+----------------------+-------------+-------------------- + 1 | a | 4 | | 2 | f | t + 6 | a | 4 | | 1 | f | t + 6 | b | 4 | | | | + 1 | bacB toD | 0 | 1 | | | + 1 | c | 0 | 2 | | | + 6 | c | 0 | 1 | | | + 1 | d | 4 | | 1 | t | f + 6 | d | 4 | | | | +(8 rows) + +ALTER TABLE foo3 set (timescaledb.compress, timescaledb.compress_orderby='d DeSc NullS lAsT'); +--shold allow alter since segment by was empty +ALTER TABLE foo3 set (timescaledb.compress, timescaledb.compress_orderby='d Asc NullS lAsT'); +--this is ok too +ALTER TABLE foo3 set (timescaledb.compress, timescaledb.compress_segmentby = '"bacB toD",c', timescaledb.compress_orderby = 'd DeSc NullS lAsT'); +-- Negative test cases --- +ALTER TABLE foo2 set (timescaledb.compress, timescaledb.compress_segmentby = '"bacB toD",c'); +ERROR: must specify a column to order by +DETAIL: The timescaledb.compress_orderby option was previously set and must also be specified in the updated configuration. +alter table default_skipped set (timescaledb.compress, timescaledb.compress_orderby = 'a asc', timescaledb.compress_segmentby = 'c'); +alter table default_skipped set (timescaledb.compress, timescaledb.compress_segmentby = 'c'); +ERROR: must specify a column to order by +DETAIL: The timescaledb.compress_orderby option was previously set and must also be specified in the updated configuration. +create table reserved_column_prefix (a integer, _ts_meta_foo integer, "bacB toD" integer, c integer, d integer); +select table_name from create_hypertable('reserved_column_prefix', 'a', chunk_time_interval=> 10); +NOTICE: adding not-null constraint to column "a" +DETAIL: Dimensions cannot have NULL values. + table_name +------------------------ + reserved_column_prefix +(1 row) + +ALTER TABLE reserved_column_prefix set (timescaledb.compress); +ERROR: cannot compress tables with reserved column prefix '_ts_meta_' +--basic test with count +create table foo (a integer, b integer, c integer, t text, p point); +ALTER TABLE foo ADD CONSTRAINT chk_existing CHECK(b > 0); +select table_name from create_hypertable('foo', 'a', chunk_time_interval=> 10); +NOTICE: adding not-null constraint to column "a" +DETAIL: Dimensions cannot have NULL values. + table_name +------------ + foo +(1 row) + +insert into foo values( 3 , 16 , 20); +insert into foo values( 10 , 10 , 20); +insert into foo values( 20 , 11 , 20); +insert into foo values( 30 , 12 , 20); +-- should error out -- +ALTER TABLE foo ALTER b SET NOT NULL, set (timescaledb.compress); +ERROR: ALTER TABLE SET does not support multiple clauses +ALTER TABLE foo ALTER b SET NOT NULL; +select attname, attnotnull from pg_attribute where attrelid = (select oid from pg_class where relname like 'foo') and attname like 'b'; + attname | attnotnull +---------+------------ + b | t +(1 row) + +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'd'); +ERROR: column "d" does not exist +HINT: The timescaledb.compress_segmentby option must reference a valid column. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'd'); +ERROR: column "d" does not exist +HINT: The timescaledb.compress_orderby option must reference a valid column. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c desc nulls'); +ERROR: unable to parse ordering option "c desc nulls" +HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c desc nulls thirsty'); +ERROR: unable to parse ordering option "c desc nulls thirsty" +HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c climb nulls first'); +ERROR: unable to parse ordering option "c climb nulls first" +HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c nulls first asC'); +ERROR: unable to parse ordering option "c nulls first asC" +HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c desc nulls first asc'); +ERROR: unable to parse ordering option "c desc nulls first asc" +HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c desc hurry'); +ERROR: unable to parse ordering option "c desc hurry" +HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c descend'); +ERROR: unable to parse ordering option "c descend" +HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c; SELECT 1'); +ERROR: unable to parse ordering option "c; SELECT 1" +HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = '1,2'); +ERROR: unable to parse ordering option "1,2" +HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c + 1'); +ERROR: unable to parse ordering option "c + 1" +HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'random()'); +ERROR: unable to parse ordering option "random()" +HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c LIMIT 1'); +ERROR: unable to parse ordering option "c LIMIT 1" +HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c USING <'); +ERROR: unable to parse ordering option "c USING <" +HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 't COLLATE "en_US"'); +ERROR: unable to parse ordering option "t COLLATE "en_US"" +HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'c asc' , timescaledb.compress_orderby = 'c'); +ERROR: unable to parse segmenting option "c asc" +HINT: The option timescaledb.compress_segmentby must be a set of columns separated by commas. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'c nulls last'); +ERROR: unable to parse segmenting option "c nulls last" +HINT: The option timescaledb.compress_segmentby must be a set of columns separated by commas. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'c + 1'); +ERROR: unable to parse segmenting option "c + 1" +HINT: The option timescaledb.compress_segmentby must be a set of columns separated by commas. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'random()'); +ERROR: unable to parse segmenting option "random()" +HINT: The option timescaledb.compress_segmentby must be a set of columns separated by commas. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'c LIMIT 1'); +ERROR: unable to parse segmenting option "c LIMIT 1" +HINT: The option timescaledb.compress_segmentby must be a set of columns separated by commas. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'c + b'); +ERROR: unable to parse segmenting option "c + b" +HINT: The option timescaledb.compress_segmentby must be a set of columns separated by commas. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'a, p'); +ERROR: invalid ordering column type point +DETAIL: Could not identify a less-than operator for the type. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'b, b'); +ERROR: duplicate column name "b" +HINT: The timescaledb.compress_segmentby option must reference distinct column. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'b, b'); +ERROR: duplicate column name "b" +HINT: The timescaledb.compress_orderby option must reference distinct column. +--should succeed +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'a, b'); +--ddl on ht with compression +ALTER TABLE foo DROP COLUMN a; +ERROR: cannot drop column named in partition key +DETAIL: Cannot drop column that is a hypertable partitioning (space or time) dimension. +ALTER TABLE foo DROP COLUMN b; +ERROR: cannot drop orderby or segmentby column from a hypertable with compression enabled +ALTER TABLE foo ALTER COLUMN t SET NOT NULL; +ERROR: operation not supported on hypertables that have compression enabled +ALTER TABLE foo RESET (timescaledb.compress); +ERROR: compression options cannot be reset +ALTER TABLE foo ADD CONSTRAINT chk CHECK(b > 0); +ERROR: operation not supported on hypertables that have compression enabled +ALTER TABLE foo ADD CONSTRAINT chk UNIQUE(b); +ERROR: operation not supported on hypertables that have compression enabled +ALTER TABLE foo DROP CONSTRAINT chk_existing; +ERROR: operation not supported on hypertables that have compression enabled +--note that the time column "a" should not be added to the end of the order by list again (should appear first) +select hc.* from _timescaledb_catalog.hypertable_compression hc inner join _timescaledb_catalog.hypertable h on (h.id = hc.hypertable_id) where h.table_name = 'foo' order by attname; + hypertable_id | attname | compression_algorithm_id | segmentby_column_index | orderby_column_index | orderby_asc | orderby_nullsfirst +---------------+---------+--------------------------+------------------------+----------------------+-------------+-------------------- + 15 | a | 4 | | 1 | t | f + 15 | b | 4 | | 2 | t | f + 15 | c | 4 | | | | + 15 | p | 1 | | | | + 15 | t | 2 | | | | +(5 rows) + +select decompress_chunk(ch1.schema_name|| '.' || ch1.table_name) +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht where ch1.hypertable_id = ht.id and ht.table_name like 'foo' ORDER BY ch1.id limit 1; +ERROR: chunk "_hyper_15_2_chunk" is not compressed +--test changing the segment by columns +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'a', timescaledb.compress_segmentby = 'b'); +select ch1.schema_name|| '.' || ch1.table_name AS "CHUNK_NAME" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht where ch1.hypertable_id = ht.id and ht.table_name like 'foo' ORDER BY ch1.id limit 1 \gset +select decompress_chunk(:'CHUNK_NAME'); +ERROR: chunk "_hyper_15_2_chunk" is not compressed +select decompress_chunk(:'CHUNK_NAME', if_compressed=>true); +NOTICE: chunk "_hyper_15_2_chunk" is not compressed + decompress_chunk +------------------ + +(1 row) + +--should succeed +select compress_chunk(:'CHUNK_NAME'); + compress_chunk +----------------------------------------- + _timescaledb_internal._hyper_15_2_chunk +(1 row) + +select compress_chunk(:'CHUNK_NAME'); +ERROR: chunk "_hyper_15_2_chunk" is already compressed +select compress_chunk(:'CHUNK_NAME', if_not_compressed=>true); +NOTICE: chunk "_hyper_15_2_chunk" is already compressed + compress_chunk +----------------------------------------- + _timescaledb_internal._hyper_15_2_chunk +(1 row) + +select compress_chunk(ch1.schema_name|| '.' || ch1.table_name) +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht where ch1.hypertable_id = ht.id and ht.table_name like 'non_compressed' ORDER BY ch1.id limit 1; +ERROR: compression not enabled on "non_compressed" +DETAIL: It is not possible to compress chunks on a hypertable or continuous aggregate that does not have compression enabled. +HINT: Enable compression using ALTER TABLE/MATERIALIZED VIEW with the timescaledb.compress option. +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'a', timescaledb.compress_segmentby = 'c'); +ERROR: cannot change configuration on already compressed chunks +DETAIL: There are compressed chunks that prevent changing the existing compression configuration. +ALTER TABLE foo set (timescaledb.compress='f'); +ERROR: cannot change configuration on already compressed chunks +DETAIL: There are compressed chunks that prevent changing the existing compression configuration. +ALTER TABLE foo reset (timescaledb.compress); +ERROR: compression options cannot be reset +select decompress_chunk(ch1.schema_name|| '.' || ch1.table_name) +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht where ch1.hypertable_id = ht.id and ht.table_name like 'non_compressed' ORDER BY ch1.id limit 1; +ERROR: missing compressed hypertable +--should succeed +select decompress_chunk(ch1.schema_name|| '.' || ch1.table_name) +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht where ch1.hypertable_id = ht.id and ht.table_name like 'foo' and ch1.compressed_chunk_id IS NOT NULL; + decompress_chunk +----------------------------------------- + _timescaledb_internal._hyper_15_2_chunk +(1 row) + +--should succeed +ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'a', timescaledb.compress_segmentby = 'b'); +select hc.* from _timescaledb_catalog.hypertable_compression hc inner join _timescaledb_catalog.hypertable h on (h.id = hc.hypertable_id) where h.table_name = 'foo' order by attname; + hypertable_id | attname | compression_algorithm_id | segmentby_column_index | orderby_column_index | orderby_asc | orderby_nullsfirst +---------------+---------+--------------------------+------------------------+----------------------+-------------+-------------------- + 15 | a | 4 | | 1 | t | f + 15 | b | 0 | 1 | | | + 15 | c | 4 | | | | + 15 | p | 1 | | | | + 15 | t | 2 | | | | +(5 rows) + +SELECT comp_hyper.schema_name|| '.' || comp_hyper.table_name as "COMPRESSED_HYPER_NAME" +FROM _timescaledb_catalog.hypertable comp_hyper +INNER JOIN _timescaledb_catalog.hypertable uncomp_hyper ON (comp_hyper.id = uncomp_hyper.compressed_hypertable_id) +WHERE uncomp_hyper.table_name like 'foo' ORDER BY comp_hyper.id LIMIT 1 \gset +select add_retention_policy(:'COMPRESSED_HYPER_NAME', INTERVAL '4 months', true); +ERROR: cannot add retention policy to compressed hypertable "_compressed_hypertable_18" +HINT: Please add the policy to the corresponding uncompressed hypertable instead. +--Constraint checking for compression +create table fortable(col integer primary key); +create table table_constr( device_id integer, + timec integer , + location integer , + c integer constraint valid_cval check (c > 20) , + d integer, + primary key ( device_id, timec) +); +select table_name from create_hypertable('table_constr', 'timec', chunk_time_interval=> 10); + table_name +-------------- + table_constr +(1 row) + +BEGIN; +ALTER TABLE table_constr set (timescaledb.compress, timescaledb.compress_segmentby = 'd'); +WARNING: column "device_id" should be used for segmenting or ordering +ROLLBACK; +alter table table_constr add constraint table_constr_uk unique (location, timec, device_id); +BEGIN; +ALTER TABLE table_constr set (timescaledb.compress, timescaledb.compress_orderby = 'timec', timescaledb.compress_segmentby = 'device_id'); +WARNING: column "location" should be used for segmenting or ordering +ROLLBACK; +alter table table_constr add constraint table_constr_fk FOREIGN KEY(d) REFERENCES fortable(col) on delete cascade; +ALTER TABLE table_constr set (timescaledb.compress, timescaledb.compress_orderby = 'timec', timescaledb.compress_segmentby = 'device_id, location'); +ERROR: column "d" must be used for segmenting +DETAIL: The foreign key constraint "table_constr_fk" cannot be enforced with the given compression configuration. +--exclusion constraints not allowed +alter table table_constr add constraint table_constr_exclu exclude using btree (timec with = ); +ALTER TABLE table_constr set (timescaledb.compress, timescaledb.compress_orderby = 'timec', timescaledb.compress_segmentby = 'device_id, location, d'); +ERROR: constraint table_constr_exclu is not supported for compression +HINT: Exclusion constraints are not supported on hypertables that are compressed. +alter table table_constr drop constraint table_constr_exclu ; +--now it works +ALTER TABLE table_constr set (timescaledb.compress, timescaledb.compress_orderby = 'timec', timescaledb.compress_segmentby = 'device_id, location, d'); +--can't add fks after compression enabled +alter table table_constr add constraint table_constr_fk_add_after FOREIGN KEY(d) REFERENCES fortable(col) on delete cascade; +ERROR: operation not supported on hypertables that have compression enabled +-- ddl ADD column variants that are not supported +ALTER TABLE table_constr ADD COLUMN newcol integer CHECK ( newcol < 10 ); +ERROR: cannot add column with constraints to a hypertable that has compression enabled +ALTER TABLE table_constr ADD COLUMN newcol integer UNIQUE; +ERROR: cannot add column with constraints to a hypertable that has compression enabled +ALTER TABLE table_constr ADD COLUMN newcol integer PRIMARY KEY; +ERROR: cannot add column with constraints to a hypertable that has compression enabled +ALTER TABLE table_constr ADD COLUMN newcol integer NOT NULL; +ERROR: cannot add column with NOT NULL constraint without default to a hypertable that has compression enabled +ALTER TABLE table_constr ADD COLUMN newcol integer DEFAULT random() + random(); +ERROR: cannot add column with non-constant default expression to a hypertable that has compression enabled +ALTER TABLE table_constr ADD COLUMN IF NOT EXISTS newcol integer REFERENCES fortable(col); +ERROR: cannot add column with constraints to a hypertable that has compression enabled +ALTER TABLE table_constr ADD COLUMN IF NOT EXISTS newcol integer GENERATED ALWAYS AS IDENTITY; +ERROR: cannot add column with constraints to a hypertable that has compression enabled +ALTER TABLE table_constr ADD COLUMN IF NOT EXISTS newcol integer GENERATED BY DEFAULT AS IDENTITY; +ERROR: cannot add column with constraints to a hypertable that has compression enabled +ALTER TABLE table_constr ADD COLUMN newcol nonexistent_type; +ERROR: type "nonexistent_type" does not exist +LINE 1: ALTER TABLE table_constr ADD COLUMN newcol nonexistent_type; + ^ +--FK check should not error even with dropped columns (previously had a bug related to this) +CREATE TABLE table_fk ( + time timestamptz NOT NULL, + id1 int8 NOT NULL, + id2 int8 NOT NULL, + value float8 NULL, + CONSTRAINT fk1 FOREIGN KEY (id1) REFERENCES fortable(col), + CONSTRAINT fk2 FOREIGN KEY (id2) REFERENCES fortable(col) +); +SELECT create_hypertable('table_fk', 'time'); + create_hypertable +------------------------ + (23,public,table_fk,t) +(1 row) + +ALTER TABLE table_fk DROP COLUMN id1; +ALTER TABLE table_fk SET (timescaledb.compress,timescaledb.compress_segmentby = 'id2'); +-- TEST fk cascade delete behavior on compressed chunk -- +insert into fortable values(1); +insert into fortable values(10); +--we want 2 chunks here -- +insert into table_constr values(1000, 1, 44, 44, 1); +insert into table_constr values(1000, 10, 44, 44, 10); +select ch1.schema_name|| '.' || ch1.table_name AS "CHUNK_NAME" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +where ch1.hypertable_id = ht.id and ht.table_name like 'table_constr' +ORDER BY ch1.id limit 1 \gset +-- we have 1 compressed and 1 uncompressed chunk after this. +select compress_chunk(:'CHUNK_NAME'); + compress_chunk +----------------------------------------- + _timescaledb_internal._hyper_19_7_chunk +(1 row) + +SELECT total_chunks , number_compressed_chunks +FROM hypertable_compression_stats('table_constr'); + total_chunks | number_compressed_chunks +--------------+-------------------------- + 2 | 1 +(1 row) + +--github issue 1661 +--disable compression after enabling it on a table that has fk constraints +CREATE TABLE table_constr2( device_id integer, + timec integer , + location integer , + d integer references fortable(col), + primary key ( device_id, timec) +); +SELECT table_name from create_hypertable('table_constr2', 'timec', chunk_time_interval=> 10); + table_name +--------------- + table_constr2 +(1 row) + +INSERT INTO fortable VALUES( 99 ); +INSERT INTO table_constr2 VALUES( 1000, 10, 5, 99); +ALTER TABLE table_constr2 SET (timescaledb.compress, timescaledb.compress_segmentby = 'device_id'); +ERROR: column "d" must be used for segmenting +DETAIL: The foreign key constraint "table_constr2_d_fkey" cannot be enforced with the given compression configuration. + ALTER TABLE table_constr2 SET (timescaledb.compress, timescaledb.compress_segmentby = 'device_id, d'); +--compress a chunk and try to disable compression, it should fail -- +SELECT ch1.schema_name|| '.' || ch1.table_name AS "CHUNK_NAME" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id and ht.table_name like 'table_constr2' \gset +SELECT compress_chunk(:'CHUNK_NAME'); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_25_10_chunk +(1 row) + +ALTER TABLE table_constr2 set (timescaledb.compress=false); +ERROR: cannot change configuration on already compressed chunks +DETAIL: There are compressed chunks that prevent changing the existing compression configuration. +--decompress all chunks and disable compression. +SELECT decompress_chunk(:'CHUNK_NAME'); + decompress_chunk +------------------------------------------ + _timescaledb_internal._hyper_25_10_chunk +(1 row) + +ALTER TABLE table_constr2 SET (timescaledb.compress=false); +-- TEST compression policy +-- modify the config to trigger errors at runtime +CREATE TABLE test_table_int(time bigint, val int); +SELECT create_hypertable('test_table_int', 'time', chunk_time_interval => 1); +NOTICE: adding not-null constraint to column "time" +DETAIL: Dimensions cannot have NULL values. + create_hypertable +------------------------------ + (27,public,test_table_int,t) +(1 row) + +CREATE OR REPLACE function dummy_now() returns BIGINT LANGUAGE SQL IMMUTABLE as 'SELECT 5::BIGINT'; +SELECT set_integer_now_func('test_table_int', 'dummy_now'); + set_integer_now_func +---------------------- + +(1 row) + +INSERT INTO test_table_int SELECT generate_series(1,5), 10; +ALTER TABLE test_table_int set (timescaledb.compress); +SELECT add_compression_policy('test_table_int', 2::int) AS compressjob_id +\gset +\c :TEST_DBNAME :ROLE_SUPERUSER +UPDATE _timescaledb_config.bgw_job +SET config = config - 'compress_after' +WHERE id = :compressjob_id; +SELECT config FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id; + config +----------------------- + {"hypertable_id": 27} +(1 row) + +--should fail +CALL run_job(:compressjob_id); +ERROR: job 1000 config must have compress_after +CONTEXT: PL/pgSQL function _timescaledb_functions.policy_compression(integer,jsonb) line 35 at RAISE +SELECT remove_compression_policy('test_table_int'); + remove_compression_policy +--------------------------- + t +(1 row) + +--again add a new policy that we'll tamper with +SELECT add_compression_policy('test_table_int', 2::int) AS compressjob_id +\gset +UPDATE _timescaledb_config.bgw_job +SET config = config - 'hypertable_id' +WHERE id = :compressjob_id; +SELECT config FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id; + config +----------------------- + {"compress_after": 2} +(1 row) + +--should fail +CALL run_job(:compressjob_id); +ERROR: job 1001 config must have hypertable_id +CONTEXT: PL/pgSQL function _timescaledb_functions.policy_compression(integer,jsonb) line 26 at RAISE +UPDATE _timescaledb_config.bgw_job +SET config = NULL +WHERE id = :compressjob_id; +SELECT config FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id; + config +-------- + +(1 row) + +--should fail +CALL run_job(:compressjob_id); +ERROR: job 1001 has null config +CONTEXT: PL/pgSQL function _timescaledb_functions.policy_compression(integer,jsonb) line 21 at RAISE +-- test ADD COLUMN IF NOT EXISTS +CREATE TABLE metric (time TIMESTAMPTZ NOT NULL, val FLOAT8 NOT NULL, dev_id INT4 NOT NULL); +SELECT create_hypertable('metric', 'time', 'dev_id', 10); + create_hypertable +---------------------- + (29,public,metric,t) +(1 row) + +ALTER TABLE metric SET ( +timescaledb.compress, +timescaledb.compress_segmentby = 'dev_id', +timescaledb.compress_orderby = 'time DESC' +); +INSERT INTO metric(time, val, dev_id) +SELECT s.*, 3.14+1, 1 +FROM generate_series('2021-08-17 00:00:00'::timestamp, + '2021-08-17 00:02:00'::timestamp, '1 s'::interval) s; +SELECT compress_chunk(show_chunks('metric')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_29_17_chunk +(1 row) + +-- column does not exist the first time +ALTER TABLE metric ADD COLUMN IF NOT EXISTS "medium" VARCHAR ; +-- column already exists the second time +ALTER TABLE metric ADD COLUMN IF NOT EXISTS "medium" VARCHAR ; +NOTICE: column "medium" of relation "metric" already exists, skipping +-- also add one without IF NOT EXISTS +ALTER TABLE metric ADD COLUMN "medium_1" VARCHAR ; +ALTER TABLE metric ADD COLUMN "medium_1" VARCHAR ; +ERROR: column "medium_1" of relation "metric" already exists +--github issue 3481 +--GROUP BY error when setting compress_segmentby with an enum column +CREATE TYPE an_enum_type AS ENUM ('home', 'school'); +CREATE TABLE test ( + time timestamp NOT NULL, + enum_col an_enum_type NOT NULL +); +SELECT create_hypertable( + 'test', 'time' +); +WARNING: column type "timestamp without time zone" used for "time" does not follow best practices +HINT: Use datatype TIMESTAMPTZ instead. + create_hypertable +-------------------- + (31,public,test,t) +(1 row) + +INSERT INTO test VALUES ('2001-01-01 00:00', 'home'), + ('2001-01-01 01:00', 'school'), + ('2001-01-01 02:00', 'home'); +--enable compression on enum_col +ALTER TABLE test SET ( + timescaledb.compress, + timescaledb.compress_segmentby = 'enum_col', + timescaledb.compress_orderby = 'time' +); +--below queries will pass before chunks are compressed +SELECT 1 FROM test GROUP BY enum_col; + ?column? +---------- + 1 + 1 +(2 rows) + +EXPLAIN (COSTS OFF, TIMING OFF, SUMMARY OFF) SELECT DISTINCT 1 FROM test; + QUERY PLAN +-------------------------------------------- + Limit + -> Result + -> Seq Scan on _hyper_31_19_chunk +(3 rows) + +--compress chunks +SELECT COMPRESS_CHUNK(X) FROM SHOW_CHUNKS('test') X; + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_31_19_chunk +(1 row) + +ANALYZE test; +--below query should pass after chunks are compressed +SELECT 1 FROM test GROUP BY enum_col; + ?column? +---------- + 1 + 1 +(2 rows) + +EXPLAIN (COSTS OFF, TIMING OFF, SUMMARY OFF) SELECT DISTINCT 1 FROM test; + QUERY PLAN +----------------------------------------------------------------- + Limit + -> Result + -> Custom Scan (DecompressChunk) on _hyper_31_19_chunk + -> Seq Scan on compress_hyper_32_20_chunk +(4 rows) + +--github issue 4398 +SELECT format('CREATE TABLE data_table AS SELECT now() AS tm, %s', array_to_string(array_agg(format('125 AS c%s',a)), ', ')) FROM generate_series(1,550)a \gexec +CREATE TABLE data_table AS SELECT now() AS tm, 125 AS c1, 125 AS c2, 125 AS c3, 125 AS c4, 125 AS c5, 125 AS c6, 125 AS c7, 125 AS c8, 125 AS c9, 125 AS c10, 125 AS c11, 125 AS c12, 125 AS c13, 125 AS c14, 125 AS c15, 125 AS c16, 125 AS c17, 125 AS c18, 125 AS c19, 125 AS c20, 125 AS c21, 125 AS c22, 125 AS c23, 125 AS c24, 125 AS c25, 125 AS c26, 125 AS c27, 125 AS c28, 125 AS c29, 125 AS c30, 125 AS c31, 125 AS c32, 125 AS c33, 125 AS c34, 125 AS c35, 125 AS c36, 125 AS c37, 125 AS c38, 125 AS c39, 125 AS c40, 125 AS c41, 125 AS c42, 125 AS c43, 125 AS c44, 125 AS c45, 125 AS c46, 125 AS c47, 125 AS c48, 125 AS c49, 125 AS c50, 125 AS c51, 125 AS c52, 125 AS c53, 125 AS c54, 125 AS c55, 125 AS c56, 125 AS c57, 125 AS c58, 125 AS c59, 125 AS c60, 125 AS c61, 125 AS c62, 125 AS c63, 125 AS c64, 125 AS c65, 125 AS c66, 125 AS c67, 125 AS c68, 125 AS c69, 125 AS c70, 125 AS c71, 125 AS c72, 125 AS c73, 125 AS c74, 125 AS c75, 125 AS c76, 125 AS c77, 125 AS c78, 125 AS c79, 125 AS c80, 125 AS c81, 125 AS c82, 125 AS c83, 125 AS c84, 125 AS c85, 125 AS c86, 125 AS c87, 125 AS c88, 125 AS c89, 125 AS c90, 125 AS c91, 125 AS c92, 125 AS c93, 125 AS c94, 125 AS c95, 125 AS c96, 125 AS c97, 125 AS c98, 125 AS c99, 125 AS c100, 125 AS c101, 125 AS c102, 125 AS c103, 125 AS c104, 125 AS c105, 125 AS c106, 125 AS c107, 125 AS c108, 125 AS c109, 125 AS c110, 125 AS c111, 125 AS c112, 125 AS c113, 125 AS c114, 125 AS c115, 125 AS c116, 125 AS c117, 125 AS c118, 125 AS c119, 125 AS c120, 125 AS c121, 125 AS c122, 125 AS c123, 125 AS c124, 125 AS c125, 125 AS c126, 125 AS c127, 125 AS c128, 125 AS c129, 125 AS c130, 125 AS c131, 125 AS c132, 125 AS c133, 125 AS c134, 125 AS c135, 125 AS c136, 125 AS c137, 125 AS c138, 125 AS c139, 125 AS c140, 125 AS c141, 125 AS c142, 125 AS c143, 125 AS c144, 125 AS c145, 125 AS c146, 125 AS c147, 125 AS c148, 125 AS c149, 125 AS c150, 125 AS c151, 125 AS c152, 125 AS c153, 125 AS c154, 125 AS c155, 125 AS c156, 125 AS c157, 125 AS c158, 125 AS c159, 125 AS c160, 125 AS c161, 125 AS c162, 125 AS c163, 125 AS c164, 125 AS c165, 125 AS c166, 125 AS c167, 125 AS c168, 125 AS c169, 125 AS c170, 125 AS c171, 125 AS c172, 125 AS c173, 125 AS c174, 125 AS c175, 125 AS c176, 125 AS c177, 125 AS c178, 125 AS c179, 125 AS c180, 125 AS c181, 125 AS c182, 125 AS c183, 125 AS c184, 125 AS c185, 125 AS c186, 125 AS c187, 125 AS c188, 125 AS c189, 125 AS c190, 125 AS c191, 125 AS c192, 125 AS c193, 125 AS c194, 125 AS c195, 125 AS c196, 125 AS c197, 125 AS c198, 125 AS c199, 125 AS c200, 125 AS c201, 125 AS c202, 125 AS c203, 125 AS c204, 125 AS c205, 125 AS c206, 125 AS c207, 125 AS c208, 125 AS c209, 125 AS c210, 125 AS c211, 125 AS c212, 125 AS c213, 125 AS c214, 125 AS c215, 125 AS c216, 125 AS c217, 125 AS c218, 125 AS c219, 125 AS c220, 125 AS c221, 125 AS c222, 125 AS c223, 125 AS c224, 125 AS c225, 125 AS c226, 125 AS c227, 125 AS c228, 125 AS c229, 125 AS c230, 125 AS c231, 125 AS c232, 125 AS c233, 125 AS c234, 125 AS c235, 125 AS c236, 125 AS c237, 125 AS c238, 125 AS c239, 125 AS c240, 125 AS c241, 125 AS c242, 125 AS c243, 125 AS c244, 125 AS c245, 125 AS c246, 125 AS c247, 125 AS c248, 125 AS c249, 125 AS c250, 125 AS c251, 125 AS c252, 125 AS c253, 125 AS c254, 125 AS c255, 125 AS c256, 125 AS c257, 125 AS c258, 125 AS c259, 125 AS c260, 125 AS c261, 125 AS c262, 125 AS c263, 125 AS c264, 125 AS c265, 125 AS c266, 125 AS c267, 125 AS c268, 125 AS c269, 125 AS c270, 125 AS c271, 125 AS c272, 125 AS c273, 125 AS c274, 125 AS c275, 125 AS c276, 125 AS c277, 125 AS c278, 125 AS c279, 125 AS c280, 125 AS c281, 125 AS c282, 125 AS c283, 125 AS c284, 125 AS c285, 125 AS c286, 125 AS c287, 125 AS c288, 125 AS c289, 125 AS c290, 125 AS c291, 125 AS c292, 125 AS c293, 125 AS c294, 125 AS c295, 125 AS c296, 125 AS c297, 125 AS c298, 125 AS c299, 125 AS c300, 125 AS c301, 125 AS c302, 125 AS c303, 125 AS c304, 125 AS c305, 125 AS c306, 125 AS c307, 125 AS c308, 125 AS c309, 125 AS c310, 125 AS c311, 125 AS c312, 125 AS c313, 125 AS c314, 125 AS c315, 125 AS c316, 125 AS c317, 125 AS c318, 125 AS c319, 125 AS c320, 125 AS c321, 125 AS c322, 125 AS c323, 125 AS c324, 125 AS c325, 125 AS c326, 125 AS c327, 125 AS c328, 125 AS c329, 125 AS c330, 125 AS c331, 125 AS c332, 125 AS c333, 125 AS c334, 125 AS c335, 125 AS c336, 125 AS c337, 125 AS c338, 125 AS c339, 125 AS c340, 125 AS c341, 125 AS c342, 125 AS c343, 125 AS c344, 125 AS c345, 125 AS c346, 125 AS c347, 125 AS c348, 125 AS c349, 125 AS c350, 125 AS c351, 125 AS c352, 125 AS c353, 125 AS c354, 125 AS c355, 125 AS c356, 125 AS c357, 125 AS c358, 125 AS c359, 125 AS c360, 125 AS c361, 125 AS c362, 125 AS c363, 125 AS c364, 125 AS c365, 125 AS c366, 125 AS c367, 125 AS c368, 125 AS c369, 125 AS c370, 125 AS c371, 125 AS c372, 125 AS c373, 125 AS c374, 125 AS c375, 125 AS c376, 125 AS c377, 125 AS c378, 125 AS c379, 125 AS c380, 125 AS c381, 125 AS c382, 125 AS c383, 125 AS c384, 125 AS c385, 125 AS c386, 125 AS c387, 125 AS c388, 125 AS c389, 125 AS c390, 125 AS c391, 125 AS c392, 125 AS c393, 125 AS c394, 125 AS c395, 125 AS c396, 125 AS c397, 125 AS c398, 125 AS c399, 125 AS c400, 125 AS c401, 125 AS c402, 125 AS c403, 125 AS c404, 125 AS c405, 125 AS c406, 125 AS c407, 125 AS c408, 125 AS c409, 125 AS c410, 125 AS c411, 125 AS c412, 125 AS c413, 125 AS c414, 125 AS c415, 125 AS c416, 125 AS c417, 125 AS c418, 125 AS c419, 125 AS c420, 125 AS c421, 125 AS c422, 125 AS c423, 125 AS c424, 125 AS c425, 125 AS c426, 125 AS c427, 125 AS c428, 125 AS c429, 125 AS c430, 125 AS c431, 125 AS c432, 125 AS c433, 125 AS c434, 125 AS c435, 125 AS c436, 125 AS c437, 125 AS c438, 125 AS c439, 125 AS c440, 125 AS c441, 125 AS c442, 125 AS c443, 125 AS c444, 125 AS c445, 125 AS c446, 125 AS c447, 125 AS c448, 125 AS c449, 125 AS c450, 125 AS c451, 125 AS c452, 125 AS c453, 125 AS c454, 125 AS c455, 125 AS c456, 125 AS c457, 125 AS c458, 125 AS c459, 125 AS c460, 125 AS c461, 125 AS c462, 125 AS c463, 125 AS c464, 125 AS c465, 125 AS c466, 125 AS c467, 125 AS c468, 125 AS c469, 125 AS c470, 125 AS c471, 125 AS c472, 125 AS c473, 125 AS c474, 125 AS c475, 125 AS c476, 125 AS c477, 125 AS c478, 125 AS c479, 125 AS c480, 125 AS c481, 125 AS c482, 125 AS c483, 125 AS c484, 125 AS c485, 125 AS c486, 125 AS c487, 125 AS c488, 125 AS c489, 125 AS c490, 125 AS c491, 125 AS c492, 125 AS c493, 125 AS c494, 125 AS c495, 125 AS c496, 125 AS c497, 125 AS c498, 125 AS c499, 125 AS c500, 125 AS c501, 125 AS c502, 125 AS c503, 125 AS c504, 125 AS c505, 125 AS c506, 125 AS c507, 125 AS c508, 125 AS c509, 125 AS c510, 125 AS c511, 125 AS c512, 125 AS c513, 125 AS c514, 125 AS c515, 125 AS c516, 125 AS c517, 125 AS c518, 125 AS c519, 125 AS c520, 125 AS c521, 125 AS c522, 125 AS c523, 125 AS c524, 125 AS c525, 125 AS c526, 125 AS c527, 125 AS c528, 125 AS c529, 125 AS c530, 125 AS c531, 125 AS c532, 125 AS c533, 125 AS c534, 125 AS c535, 125 AS c536, 125 AS c537, 125 AS c538, 125 AS c539, 125 AS c540, 125 AS c541, 125 AS c542, 125 AS c543, 125 AS c544, 125 AS c545, 125 AS c546, 125 AS c547, 125 AS c548, 125 AS c549, 125 AS c550 +CREATE TABLE ts_table (LIKE data_table); +SELECT * FROM create_hypertable('ts_table', 'tm'); +NOTICE: adding not-null constraint to column "tm" +DETAIL: Dimensions cannot have NULL values. + hypertable_id | schema_name | table_name | created +---------------+-------------+------------+--------- + 33 | public | ts_table | t +(1 row) + +--should report a warning +\set VERBOSITY terse +ALTER TABLE ts_table SET(timescaledb.compress, timescaledb.compress_segmentby = 'c1', + timescaledb.compress_orderby = 'tm'); +WARNING: compressed row size might exceed maximum row size +INSERT INTO ts_table SELECT * FROM data_table; +--cleanup tables +DROP TABLE data_table cascade; +DROP TABLE ts_table cascade; +-- #5458 invalid reads for row expressions after column dropped on compressed tables +CREATE TABLE readings( + "time" TIMESTAMPTZ NOT NULL, + battery_status TEXT, + battery_temperature DOUBLE PRECISION +); +INSERT INTO readings ("time") VALUES ('2022-11-11 11:11:11-00'); +SELECT create_hypertable('readings', 'time', chunk_time_interval => interval '12 hour', migrate_data=>true); +NOTICE: migrating data to chunks + create_hypertable +------------------------ + (35,public,readings,t) +(1 row) + +create unique index readings_uniq_idx on readings("time",battery_temperature); +ALTER TABLE readings SET (timescaledb.compress,timescaledb.compress_segmentby = 'battery_temperature'); +SELECT compress_chunk(show_chunks('readings')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_35_22_chunk +(1 row) + +ALTER TABLE readings DROP COLUMN battery_status; +INSERT INTO readings ("time", battery_temperature) VALUES ('2022-11-11 11:11:11', 0.2); +SELECT readings FROM readings; + readings +-------------------------------------- + ("Fri Nov 11 03:11:11 2022 PST",) + ("Fri Nov 11 11:11:11 2022 PST",0.2) +(2 rows) + +-- #5577 On-insert decompression after schema changes may not work properly +SELECT decompress_chunk(show_chunks('readings'),true); +NOTICE: chunk "_hyper_35_24_chunk" is not compressed + decompress_chunk +------------------------------------------ + _timescaledb_internal._hyper_35_22_chunk + +(2 rows) + +SELECT compress_chunk(show_chunks('readings'),true); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_35_22_chunk + _timescaledb_internal._hyper_35_24_chunk +(2 rows) + +\set ON_ERROR_STOP 0 +INSERT INTO readings ("time", battery_temperature) VALUES + ('2022-11-11 11:11:11',0.2) -- same record as inserted +; +ERROR: duplicate key value violates unique constraint "_hyper_35_24_chunk_readings_uniq_idx" +\set ON_ERROR_STOP 1 +SELECT * from readings; + time | battery_temperature +------------------------------+--------------------- + Fri Nov 11 03:11:11 2022 PST | + Fri Nov 11 11:11:11 2022 PST | 0.2 +(2 rows) + +SELECT assert_equal(count(1), 2::bigint) FROM readings; + assert_equal +-------------- + +(1 row) + +-- no unique check failure during decompression +SELECT decompress_chunk(show_chunks('readings'),true); + decompress_chunk +------------------------------------------ + _timescaledb_internal._hyper_35_22_chunk + _timescaledb_internal._hyper_35_24_chunk +(2 rows) + +-- #5553 Unique constraints are not always respected on compressed tables +CREATE TABLE main_table AS +SELECT '2011-11-11 11:11:11'::timestamptz AS time, 'foo' AS device_id; +CREATE UNIQUE INDEX xm ON main_table(time, device_id); +SELECT create_hypertable('main_table', 'time', chunk_time_interval => interval '12 hour', migrate_data => TRUE); +NOTICE: adding not-null constraint to column "time" +NOTICE: migrating data to chunks + create_hypertable +-------------------------- + (37,public,main_table,t) +(1 row) + +ALTER TABLE main_table SET ( + timescaledb.compress, + timescaledb.compress_segmentby = 'device_id', + timescaledb.compress_orderby = ''); +SELECT compress_chunk(show_chunks('main_table')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_37_27_chunk +(1 row) + +-- insert rejected +\set ON_ERROR_STOP 0 +INSERT INTO main_table VALUES + ('2011-11-11 11:11:11', 'foo'); +ERROR: duplicate key value violates unique constraint "_hyper_37_27_chunk_xm" +-- insert rejected in case 1st row doesn't violate constraint with different segmentby +INSERT INTO main_table VALUES + ('2011-11-11 11:12:11', 'bar'), + ('2011-11-11 11:11:11', 'foo'); +ERROR: duplicate key value violates unique constraint "_hyper_37_27_chunk_xm" +\set ON_ERROR_STOP 1 +SELECT assert_equal(count(1), 1::bigint) FROM main_table; + assert_equal +-------------- + +(1 row) + +-- no unique check failure during decompression +SELECT decompress_chunk(show_chunks('main_table'), TRUE); + decompress_chunk +------------------------------------------ + _timescaledb_internal._hyper_37_27_chunk +(1 row) + +DROP TABLE IF EXISTS readings; +CREATE TABLE readings( + "time" timestamptz NOT NULL, + battery_status text, + candy integer, + battery_status2 text, + battery_temperature text +); +SELECT create_hypertable('readings', 'time', chunk_time_interval => interval '12 hour'); + create_hypertable +------------------------ + (39,public,readings,t) +(1 row) + +CREATE UNIQUE INDEX readings_uniq_idx ON readings("time", battery_temperature); +ALTER TABLE readings SET (timescaledb.compress, timescaledb.compress_segmentby = 'battery_temperature'); +ALTER TABLE readings DROP COLUMN battery_status; +ALTER TABLE readings DROP COLUMN battery_status2; +INSERT INTO readings("time", candy, battery_temperature) + VALUES ('2022-11-11 11:11:11', 88, '0.2'); +SELECT compress_chunk(show_chunks('readings'), TRUE); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_39_29_chunk +(1 row) + +-- no error happens +INSERT INTO readings("time", candy, battery_temperature) + VALUES ('2022-11-11 11:11:11', 33, 0.3) +; +-- Segmentby checks should be done for unique indexes without +-- constraints, so create a table without constraints and add a unique +-- index and try to create a table without using the right segmentby +-- column. +CREATE TABLE table_unique_index( + location smallint not null, + device_id smallint not null, + time timestamptz not null, + value float8 not null +); +CREATE UNIQUE index ON table_unique_index(location, device_id, time); +SELECT table_name FROM create_hypertable('table_unique_index', 'time'); + table_name +-------------------- + table_unique_index +(1 row) + +-- Will warn because the lack of segmentby/orderby compression options +ALTER TABLE table_unique_index SET (timescaledb.compress); +WARNING: column "location" should be used for segmenting or ordering +WARNING: column "device_id" should be used for segmenting or ordering +ALTER TABLE table_unique_index SET (timescaledb.compress = off); +ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_segmentby = 'location'); +WARNING: column "device_id" should be used for segmenting or ordering +ALTER TABLE table_unique_index SET (timescaledb.compress = off); +ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_orderby = 'device_id'); +WARNING: column "location" should be used for segmenting or ordering +ALTER TABLE table_unique_index SET (timescaledb.compress = off); +-- Will enable compression without warnings +ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_segmentby = 'location', timescaledb.compress_orderby = 'device_id'); +ALTER TABLE table_unique_index SET (timescaledb.compress = off); +ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_segmentby = 'device_id,location'); +ALTER TABLE table_unique_index SET (timescaledb.compress = off); +ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_orderby = 'device_id,location'); +ALTER TABLE table_unique_index SET (timescaledb.compress = off); +ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_segmentby = 'time,location', timescaledb.compress_orderby = 'device_id'); +ALTER TABLE table_unique_index SET (timescaledb.compress = off); +ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_segmentby = 'time,location,device_id'); +ALTER TABLE table_unique_index SET (timescaledb.compress = off); diff --git a/tsl/test/expected/compression_sorted_merge-16.out b/tsl/test/expected/compression_sorted_merge-16.out new file mode 100644 index 00000000000..bebbacd21d0 --- /dev/null +++ b/tsl/test/expected/compression_sorted_merge-16.out @@ -0,0 +1,1636 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\set PREFIX 'EXPLAIN (analyze, verbose, costs off, timing off, summary off)' +CREATE TABLE test1 ( +time timestamptz NOT NULL, + x1 integer, + x2 integer, + x3 integer, + x4 integer, + x5 integer); +SELECT FROM create_hypertable('test1', 'time'); +-- +(1 row) + +ALTER TABLE test1 SET (timescaledb.compress, timescaledb.compress_segmentby='x1, x2, x5', timescaledb.compress_orderby = 'time DESC, x3 ASC, x4 ASC'); +INSERT INTO test1 (time, x1, x2, x3, x4, x5) values('2000-01-01 00:00:00-00', 1, 2, 1, 1, 0); +INSERT INTO test1 (time, x1, x2, x3, x4, x5) values('2000-01-01 01:00:00-00', 1, 3, 2, 2, 0); +INSERT INTO test1 (time, x1, x2, x3, x4, x5) values('2000-01-01 02:00:00-00', 2, 1, 3, 3, 0); +INSERT INTO test1 (time, x1, x2, x3, x4, x5) values('2000-01-01 03:00:00-00', 1, 2, 4, 4, 0); +SELECT compress_chunk(i) FROM show_chunks('test1') i; + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk +(1 row) + +ANALYZE test1; +CREATE TABLE test2 ( +time timestamptz NOT NULL, + x1 integer, + x2 integer, + x3 integer, + x4 integer, + x5 integer); +SELECT FROM create_hypertable('test2', 'time'); +-- +(1 row) + +ALTER TABLE test2 SET (timescaledb.compress, timescaledb.compress_segmentby='x1, x2, x5', timescaledb.compress_orderby = 'time ASC, x3 DESC, x4 DESC'); +INSERT INTO test2 (time, x1, x2, x3, x4, x5) values('2000-01-01 00:00:00-00', 1, 2, 1, 1, 0); +INSERT INTO test2 (time, x1, x2, x3, x4, x5) values('2000-01-01 01:00:00-00', 1, 3, 2, 2, 0); +INSERT INTO test2 (time, x1, x2, x3, x4, x5) values('2000-01-01 02:00:00-00', 2, 1, 3, 3, 0); +INSERT INTO test2 (time, x1, x2, x3, x4, x5) values('2000-01-01 03:00:00-00', 1, 2, 4, 4, 0); +SELECT compress_chunk(i) FROM show_chunks('test2') i; + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_3_3_chunk +(1 row) + +ANALYZE test2; +CREATE TABLE test_with_defined_null ( + time timestamptz NOT NULL, + x1 integer, + x2 integer, + x3 integer); +SELECT FROM create_hypertable('test_with_defined_null','time'); +-- +(1 row) + +ALTER TABLE test_with_defined_null SET (timescaledb.compress,timescaledb.compress_segmentby='x1', timescaledb.compress_orderby='x2 ASC NULLS FIRST'); +INSERT INTO test_with_defined_null (time, x1, x2) values('2000-01-01', '1', NULL); +INSERT INTO test_with_defined_null (time, x1, x2) values('2000-01-01','2', NULL); +INSERT INTO test_with_defined_null (time, x1, x2) values('2000-01-01','1',1); +INSERT INTO test_with_defined_null (time, x1, x2) values('2000-01-01','1',2); +SELECT compress_chunk(i) FROM show_chunks('test_with_defined_null') i; + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_5_5_chunk +(1 row) + +ANALYZE test_with_defined_null; +-- test1 uses compress_segmentby='x1, x2, x5' and compress_orderby = 'time DESC, x3 ASC, x4 ASC' +-- test2 uses compress_segmentby='x1, x2, x5' and compress_orderby = 'time ASC, x3 DESC, x4 DESC' +-- test_with_defined_null uses compress_segmentby='x1' and compress_orderby = 'x2 ASC NULLS FIRST' +------ +-- Tests based on ordering +------ +-- Should be optimized (implicit NULLS first) +:PREFIX +SELECT * FROM test1 ORDER BY time DESC; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (actual rows=4 loops=1) + Output: _hyper_1_1_chunk."time", _hyper_1_1_chunk.x1, _hyper_1_1_chunk.x2, _hyper_1_1_chunk.x3, _hyper_1_1_chunk.x4, _hyper_1_1_chunk.x5 + Sorted merge append: true + Bulk Decompression: false + -> Sort (actual rows=3 loops=1) + Output: compress_hyper_2_2_chunk."time", compress_hyper_2_2_chunk.x1, compress_hyper_2_2_chunk.x2, compress_hyper_2_2_chunk.x3, compress_hyper_2_2_chunk.x4, compress_hyper_2_2_chunk.x5, compress_hyper_2_2_chunk._ts_meta_count, compress_hyper_2_2_chunk._ts_meta_sequence_num, compress_hyper_2_2_chunk._ts_meta_min_1, compress_hyper_2_2_chunk._ts_meta_max_1, compress_hyper_2_2_chunk._ts_meta_min_2, compress_hyper_2_2_chunk._ts_meta_max_2, compress_hyper_2_2_chunk._ts_meta_min_3, compress_hyper_2_2_chunk._ts_meta_max_3 + Sort Key: compress_hyper_2_2_chunk._ts_meta_max_1 DESC + Sort Method: quicksort + -> Seq Scan on _timescaledb_internal.compress_hyper_2_2_chunk (actual rows=3 loops=1) + Output: compress_hyper_2_2_chunk."time", compress_hyper_2_2_chunk.x1, compress_hyper_2_2_chunk.x2, compress_hyper_2_2_chunk.x3, compress_hyper_2_2_chunk.x4, compress_hyper_2_2_chunk.x5, compress_hyper_2_2_chunk._ts_meta_count, compress_hyper_2_2_chunk._ts_meta_sequence_num, compress_hyper_2_2_chunk._ts_meta_min_1, compress_hyper_2_2_chunk._ts_meta_max_1, compress_hyper_2_2_chunk._ts_meta_min_2, compress_hyper_2_2_chunk._ts_meta_max_2, compress_hyper_2_2_chunk._ts_meta_min_3, compress_hyper_2_2_chunk._ts_meta_max_3 +(10 rows) + +-- Should be optimized +:PREFIX +SELECT * FROM test1 ORDER BY time DESC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (actual rows=4 loops=1) + Output: _hyper_1_1_chunk."time", _hyper_1_1_chunk.x1, _hyper_1_1_chunk.x2, _hyper_1_1_chunk.x3, _hyper_1_1_chunk.x4, _hyper_1_1_chunk.x5 + Sorted merge append: true + Bulk Decompression: false + -> Sort (actual rows=3 loops=1) + Output: compress_hyper_2_2_chunk."time", compress_hyper_2_2_chunk.x1, compress_hyper_2_2_chunk.x2, compress_hyper_2_2_chunk.x3, compress_hyper_2_2_chunk.x4, compress_hyper_2_2_chunk.x5, compress_hyper_2_2_chunk._ts_meta_count, compress_hyper_2_2_chunk._ts_meta_sequence_num, compress_hyper_2_2_chunk._ts_meta_min_1, compress_hyper_2_2_chunk._ts_meta_max_1, compress_hyper_2_2_chunk._ts_meta_min_2, compress_hyper_2_2_chunk._ts_meta_max_2, compress_hyper_2_2_chunk._ts_meta_min_3, compress_hyper_2_2_chunk._ts_meta_max_3 + Sort Key: compress_hyper_2_2_chunk._ts_meta_max_1 DESC + Sort Method: quicksort + -> Seq Scan on _timescaledb_internal.compress_hyper_2_2_chunk (actual rows=3 loops=1) + Output: compress_hyper_2_2_chunk."time", compress_hyper_2_2_chunk.x1, compress_hyper_2_2_chunk.x2, compress_hyper_2_2_chunk.x3, compress_hyper_2_2_chunk.x4, compress_hyper_2_2_chunk.x5, compress_hyper_2_2_chunk._ts_meta_count, compress_hyper_2_2_chunk._ts_meta_sequence_num, compress_hyper_2_2_chunk._ts_meta_min_1, compress_hyper_2_2_chunk._ts_meta_max_1, compress_hyper_2_2_chunk._ts_meta_min_2, compress_hyper_2_2_chunk._ts_meta_max_2, compress_hyper_2_2_chunk._ts_meta_min_3, compress_hyper_2_2_chunk._ts_meta_max_3 +(10 rows) + +-- Should not be optimized (NULL order wrong) +:PREFIX +SELECT * FROM test1 ORDER BY time DESC NULLS LAST; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort (actual rows=4 loops=1) + Output: _hyper_1_1_chunk."time", _hyper_1_1_chunk.x1, _hyper_1_1_chunk.x2, _hyper_1_1_chunk.x3, _hyper_1_1_chunk.x4, _hyper_1_1_chunk.x5 + Sort Key: _hyper_1_1_chunk."time" DESC NULLS LAST + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (actual rows=4 loops=1) + Output: _hyper_1_1_chunk."time", _hyper_1_1_chunk.x1, _hyper_1_1_chunk.x2, _hyper_1_1_chunk.x3, _hyper_1_1_chunk.x4, _hyper_1_1_chunk.x5 + Bulk Decompression: true + -> Seq Scan on _timescaledb_internal.compress_hyper_2_2_chunk (actual rows=3 loops=1) + Output: compress_hyper_2_2_chunk."time", compress_hyper_2_2_chunk.x1, compress_hyper_2_2_chunk.x2, compress_hyper_2_2_chunk.x3, compress_hyper_2_2_chunk.x4, compress_hyper_2_2_chunk.x5, compress_hyper_2_2_chunk._ts_meta_count, compress_hyper_2_2_chunk._ts_meta_sequence_num, compress_hyper_2_2_chunk._ts_meta_min_1, compress_hyper_2_2_chunk._ts_meta_max_1, compress_hyper_2_2_chunk._ts_meta_min_2, compress_hyper_2_2_chunk._ts_meta_max_2, compress_hyper_2_2_chunk._ts_meta_min_3, compress_hyper_2_2_chunk._ts_meta_max_3 +(9 rows) + +-- Should be optimized (implicit NULLS last) +:PREFIX +SELECT * FROM test1 ORDER BY time ASC; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (actual rows=4 loops=1) + Output: _hyper_1_1_chunk."time", _hyper_1_1_chunk.x1, _hyper_1_1_chunk.x2, _hyper_1_1_chunk.x3, _hyper_1_1_chunk.x4, _hyper_1_1_chunk.x5 + Sorted merge append: true + Bulk Decompression: false + -> Sort (actual rows=3 loops=1) + Output: compress_hyper_2_2_chunk."time", compress_hyper_2_2_chunk.x1, compress_hyper_2_2_chunk.x2, compress_hyper_2_2_chunk.x3, compress_hyper_2_2_chunk.x4, compress_hyper_2_2_chunk.x5, compress_hyper_2_2_chunk._ts_meta_count, compress_hyper_2_2_chunk._ts_meta_sequence_num, compress_hyper_2_2_chunk._ts_meta_min_1, compress_hyper_2_2_chunk._ts_meta_max_1, compress_hyper_2_2_chunk._ts_meta_min_2, compress_hyper_2_2_chunk._ts_meta_max_2, compress_hyper_2_2_chunk._ts_meta_min_3, compress_hyper_2_2_chunk._ts_meta_max_3 + Sort Key: compress_hyper_2_2_chunk._ts_meta_min_1 + Sort Method: quicksort + -> Seq Scan on _timescaledb_internal.compress_hyper_2_2_chunk (actual rows=3 loops=1) + Output: compress_hyper_2_2_chunk."time", compress_hyper_2_2_chunk.x1, compress_hyper_2_2_chunk.x2, compress_hyper_2_2_chunk.x3, compress_hyper_2_2_chunk.x4, compress_hyper_2_2_chunk.x5, compress_hyper_2_2_chunk._ts_meta_count, compress_hyper_2_2_chunk._ts_meta_sequence_num, compress_hyper_2_2_chunk._ts_meta_min_1, compress_hyper_2_2_chunk._ts_meta_max_1, compress_hyper_2_2_chunk._ts_meta_min_2, compress_hyper_2_2_chunk._ts_meta_max_2, compress_hyper_2_2_chunk._ts_meta_min_3, compress_hyper_2_2_chunk._ts_meta_max_3 +(10 rows) + +-- Should be optimized +:PREFIX +SELECT * FROM test1 ORDER BY time ASC NULLS LAST; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (actual rows=4 loops=1) + Output: _hyper_1_1_chunk."time", _hyper_1_1_chunk.x1, _hyper_1_1_chunk.x2, _hyper_1_1_chunk.x3, _hyper_1_1_chunk.x4, _hyper_1_1_chunk.x5 + Sorted merge append: true + Bulk Decompression: false + -> Sort (actual rows=3 loops=1) + Output: compress_hyper_2_2_chunk."time", compress_hyper_2_2_chunk.x1, compress_hyper_2_2_chunk.x2, compress_hyper_2_2_chunk.x3, compress_hyper_2_2_chunk.x4, compress_hyper_2_2_chunk.x5, compress_hyper_2_2_chunk._ts_meta_count, compress_hyper_2_2_chunk._ts_meta_sequence_num, compress_hyper_2_2_chunk._ts_meta_min_1, compress_hyper_2_2_chunk._ts_meta_max_1, compress_hyper_2_2_chunk._ts_meta_min_2, compress_hyper_2_2_chunk._ts_meta_max_2, compress_hyper_2_2_chunk._ts_meta_min_3, compress_hyper_2_2_chunk._ts_meta_max_3 + Sort Key: compress_hyper_2_2_chunk._ts_meta_min_1 + Sort Method: quicksort + -> Seq Scan on _timescaledb_internal.compress_hyper_2_2_chunk (actual rows=3 loops=1) + Output: compress_hyper_2_2_chunk."time", compress_hyper_2_2_chunk.x1, compress_hyper_2_2_chunk.x2, compress_hyper_2_2_chunk.x3, compress_hyper_2_2_chunk.x4, compress_hyper_2_2_chunk.x5, compress_hyper_2_2_chunk._ts_meta_count, compress_hyper_2_2_chunk._ts_meta_sequence_num, compress_hyper_2_2_chunk._ts_meta_min_1, compress_hyper_2_2_chunk._ts_meta_max_1, compress_hyper_2_2_chunk._ts_meta_min_2, compress_hyper_2_2_chunk._ts_meta_max_2, compress_hyper_2_2_chunk._ts_meta_min_3, compress_hyper_2_2_chunk._ts_meta_max_3 +(10 rows) + +-- Should not be optimized (NULL order wrong) +:PREFIX +SELECT * FROM test1 ORDER BY time ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort (actual rows=4 loops=1) + Output: _hyper_1_1_chunk."time", _hyper_1_1_chunk.x1, _hyper_1_1_chunk.x2, _hyper_1_1_chunk.x3, _hyper_1_1_chunk.x4, _hyper_1_1_chunk.x5 + Sort Key: _hyper_1_1_chunk."time" NULLS FIRST + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (actual rows=4 loops=1) + Output: _hyper_1_1_chunk."time", _hyper_1_1_chunk.x1, _hyper_1_1_chunk.x2, _hyper_1_1_chunk.x3, _hyper_1_1_chunk.x4, _hyper_1_1_chunk.x5 + Bulk Decompression: true + -> Seq Scan on _timescaledb_internal.compress_hyper_2_2_chunk (actual rows=3 loops=1) + Output: compress_hyper_2_2_chunk."time", compress_hyper_2_2_chunk.x1, compress_hyper_2_2_chunk.x2, compress_hyper_2_2_chunk.x3, compress_hyper_2_2_chunk.x4, compress_hyper_2_2_chunk.x5, compress_hyper_2_2_chunk._ts_meta_count, compress_hyper_2_2_chunk._ts_meta_sequence_num, compress_hyper_2_2_chunk._ts_meta_min_1, compress_hyper_2_2_chunk._ts_meta_max_1, compress_hyper_2_2_chunk._ts_meta_min_2, compress_hyper_2_2_chunk._ts_meta_max_2, compress_hyper_2_2_chunk._ts_meta_min_3, compress_hyper_2_2_chunk._ts_meta_max_3 +(9 rows) + +-- Should be optimized +:PREFIX +SELECT * FROM test1 ORDER BY time DESC NULLS FIRST, x3 ASC NULLS LAST; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (actual rows=4 loops=1) + Output: _hyper_1_1_chunk."time", _hyper_1_1_chunk.x1, _hyper_1_1_chunk.x2, _hyper_1_1_chunk.x3, _hyper_1_1_chunk.x4, _hyper_1_1_chunk.x5 + Sorted merge append: true + Bulk Decompression: false + -> Sort (actual rows=3 loops=1) + Output: compress_hyper_2_2_chunk."time", compress_hyper_2_2_chunk.x1, compress_hyper_2_2_chunk.x2, compress_hyper_2_2_chunk.x3, compress_hyper_2_2_chunk.x4, compress_hyper_2_2_chunk.x5, compress_hyper_2_2_chunk._ts_meta_count, compress_hyper_2_2_chunk._ts_meta_sequence_num, compress_hyper_2_2_chunk._ts_meta_min_1, compress_hyper_2_2_chunk._ts_meta_max_1, compress_hyper_2_2_chunk._ts_meta_min_2, compress_hyper_2_2_chunk._ts_meta_max_2, compress_hyper_2_2_chunk._ts_meta_min_3, compress_hyper_2_2_chunk._ts_meta_max_3 + Sort Key: compress_hyper_2_2_chunk._ts_meta_max_1 DESC, compress_hyper_2_2_chunk._ts_meta_min_2 + Sort Method: quicksort + -> Seq Scan on _timescaledb_internal.compress_hyper_2_2_chunk (actual rows=3 loops=1) + Output: compress_hyper_2_2_chunk."time", compress_hyper_2_2_chunk.x1, compress_hyper_2_2_chunk.x2, compress_hyper_2_2_chunk.x3, compress_hyper_2_2_chunk.x4, compress_hyper_2_2_chunk.x5, compress_hyper_2_2_chunk._ts_meta_count, compress_hyper_2_2_chunk._ts_meta_sequence_num, compress_hyper_2_2_chunk._ts_meta_min_1, compress_hyper_2_2_chunk._ts_meta_max_1, compress_hyper_2_2_chunk._ts_meta_min_2, compress_hyper_2_2_chunk._ts_meta_max_2, compress_hyper_2_2_chunk._ts_meta_min_3, compress_hyper_2_2_chunk._ts_meta_max_3 +(10 rows) + +-- Should be optimized +:PREFIX +SELECT * FROM test1 ORDER BY time DESC NULLS FIRST, x3 ASC NULLS LAST, x4 ASC NULLS LAST; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (actual rows=4 loops=1) + Output: _hyper_1_1_chunk."time", _hyper_1_1_chunk.x1, _hyper_1_1_chunk.x2, _hyper_1_1_chunk.x3, _hyper_1_1_chunk.x4, _hyper_1_1_chunk.x5 + Sorted merge append: true + Bulk Decompression: false + -> Sort (actual rows=3 loops=1) + Output: compress_hyper_2_2_chunk."time", compress_hyper_2_2_chunk.x1, compress_hyper_2_2_chunk.x2, compress_hyper_2_2_chunk.x3, compress_hyper_2_2_chunk.x4, compress_hyper_2_2_chunk.x5, compress_hyper_2_2_chunk._ts_meta_count, compress_hyper_2_2_chunk._ts_meta_sequence_num, compress_hyper_2_2_chunk._ts_meta_min_1, compress_hyper_2_2_chunk._ts_meta_max_1, compress_hyper_2_2_chunk._ts_meta_min_2, compress_hyper_2_2_chunk._ts_meta_max_2, compress_hyper_2_2_chunk._ts_meta_min_3, compress_hyper_2_2_chunk._ts_meta_max_3 + Sort Key: compress_hyper_2_2_chunk._ts_meta_max_1 DESC, compress_hyper_2_2_chunk._ts_meta_min_2, compress_hyper_2_2_chunk._ts_meta_min_3 + Sort Method: quicksort + -> Seq Scan on _timescaledb_internal.compress_hyper_2_2_chunk (actual rows=3 loops=1) + Output: compress_hyper_2_2_chunk."time", compress_hyper_2_2_chunk.x1, compress_hyper_2_2_chunk.x2, compress_hyper_2_2_chunk.x3, compress_hyper_2_2_chunk.x4, compress_hyper_2_2_chunk.x5, compress_hyper_2_2_chunk._ts_meta_count, compress_hyper_2_2_chunk._ts_meta_sequence_num, compress_hyper_2_2_chunk._ts_meta_min_1, compress_hyper_2_2_chunk._ts_meta_max_1, compress_hyper_2_2_chunk._ts_meta_min_2, compress_hyper_2_2_chunk._ts_meta_max_2, compress_hyper_2_2_chunk._ts_meta_min_3, compress_hyper_2_2_chunk._ts_meta_max_3 +(10 rows) + +-- Should not be optimized (wrong order for x4) +:PREFIX +SELECT * FROM test1 ORDER BY time DESC NULLS FIRST, x3 ASC NULLS LAST, x4 DESC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort (actual rows=4 loops=1) + Output: _hyper_1_1_chunk."time", _hyper_1_1_chunk.x1, _hyper_1_1_chunk.x2, _hyper_1_1_chunk.x3, _hyper_1_1_chunk.x4, _hyper_1_1_chunk.x5 + Sort Key: _hyper_1_1_chunk."time" DESC, _hyper_1_1_chunk.x3, _hyper_1_1_chunk.x4 DESC + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (actual rows=4 loops=1) + Output: _hyper_1_1_chunk."time", _hyper_1_1_chunk.x1, _hyper_1_1_chunk.x2, _hyper_1_1_chunk.x3, _hyper_1_1_chunk.x4, _hyper_1_1_chunk.x5 + Bulk Decompression: true + -> Seq Scan on _timescaledb_internal.compress_hyper_2_2_chunk (actual rows=3 loops=1) + Output: compress_hyper_2_2_chunk."time", compress_hyper_2_2_chunk.x1, compress_hyper_2_2_chunk.x2, compress_hyper_2_2_chunk.x3, compress_hyper_2_2_chunk.x4, compress_hyper_2_2_chunk.x5, compress_hyper_2_2_chunk._ts_meta_count, compress_hyper_2_2_chunk._ts_meta_sequence_num, compress_hyper_2_2_chunk._ts_meta_min_1, compress_hyper_2_2_chunk._ts_meta_max_1, compress_hyper_2_2_chunk._ts_meta_min_2, compress_hyper_2_2_chunk._ts_meta_max_2, compress_hyper_2_2_chunk._ts_meta_min_3, compress_hyper_2_2_chunk._ts_meta_max_3 +(9 rows) + +-- Should be optimized (backward scan) +:PREFIX +SELECT * FROM test1 ORDER BY time ASC NULLS LAST; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (actual rows=4 loops=1) + Output: _hyper_1_1_chunk."time", _hyper_1_1_chunk.x1, _hyper_1_1_chunk.x2, _hyper_1_1_chunk.x3, _hyper_1_1_chunk.x4, _hyper_1_1_chunk.x5 + Sorted merge append: true + Bulk Decompression: false + -> Sort (actual rows=3 loops=1) + Output: compress_hyper_2_2_chunk."time", compress_hyper_2_2_chunk.x1, compress_hyper_2_2_chunk.x2, compress_hyper_2_2_chunk.x3, compress_hyper_2_2_chunk.x4, compress_hyper_2_2_chunk.x5, compress_hyper_2_2_chunk._ts_meta_count, compress_hyper_2_2_chunk._ts_meta_sequence_num, compress_hyper_2_2_chunk._ts_meta_min_1, compress_hyper_2_2_chunk._ts_meta_max_1, compress_hyper_2_2_chunk._ts_meta_min_2, compress_hyper_2_2_chunk._ts_meta_max_2, compress_hyper_2_2_chunk._ts_meta_min_3, compress_hyper_2_2_chunk._ts_meta_max_3 + Sort Key: compress_hyper_2_2_chunk._ts_meta_min_1 + Sort Method: quicksort + -> Seq Scan on _timescaledb_internal.compress_hyper_2_2_chunk (actual rows=3 loops=1) + Output: compress_hyper_2_2_chunk."time", compress_hyper_2_2_chunk.x1, compress_hyper_2_2_chunk.x2, compress_hyper_2_2_chunk.x3, compress_hyper_2_2_chunk.x4, compress_hyper_2_2_chunk.x5, compress_hyper_2_2_chunk._ts_meta_count, compress_hyper_2_2_chunk._ts_meta_sequence_num, compress_hyper_2_2_chunk._ts_meta_min_1, compress_hyper_2_2_chunk._ts_meta_max_1, compress_hyper_2_2_chunk._ts_meta_min_2, compress_hyper_2_2_chunk._ts_meta_max_2, compress_hyper_2_2_chunk._ts_meta_min_3, compress_hyper_2_2_chunk._ts_meta_max_3 +(10 rows) + +-- Should be optimized (backward scan) +:PREFIX +SELECT * FROM test1 ORDER BY time ASC NULLS LAST, x3 DESC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (actual rows=4 loops=1) + Output: _hyper_1_1_chunk."time", _hyper_1_1_chunk.x1, _hyper_1_1_chunk.x2, _hyper_1_1_chunk.x3, _hyper_1_1_chunk.x4, _hyper_1_1_chunk.x5 + Sorted merge append: true + Bulk Decompression: false + -> Sort (actual rows=3 loops=1) + Output: compress_hyper_2_2_chunk."time", compress_hyper_2_2_chunk.x1, compress_hyper_2_2_chunk.x2, compress_hyper_2_2_chunk.x3, compress_hyper_2_2_chunk.x4, compress_hyper_2_2_chunk.x5, compress_hyper_2_2_chunk._ts_meta_count, compress_hyper_2_2_chunk._ts_meta_sequence_num, compress_hyper_2_2_chunk._ts_meta_min_1, compress_hyper_2_2_chunk._ts_meta_max_1, compress_hyper_2_2_chunk._ts_meta_min_2, compress_hyper_2_2_chunk._ts_meta_max_2, compress_hyper_2_2_chunk._ts_meta_min_3, compress_hyper_2_2_chunk._ts_meta_max_3 + Sort Key: compress_hyper_2_2_chunk._ts_meta_min_1, compress_hyper_2_2_chunk._ts_meta_max_2 DESC + Sort Method: quicksort + -> Seq Scan on _timescaledb_internal.compress_hyper_2_2_chunk (actual rows=3 loops=1) + Output: compress_hyper_2_2_chunk."time", compress_hyper_2_2_chunk.x1, compress_hyper_2_2_chunk.x2, compress_hyper_2_2_chunk.x3, compress_hyper_2_2_chunk.x4, compress_hyper_2_2_chunk.x5, compress_hyper_2_2_chunk._ts_meta_count, compress_hyper_2_2_chunk._ts_meta_sequence_num, compress_hyper_2_2_chunk._ts_meta_min_1, compress_hyper_2_2_chunk._ts_meta_max_1, compress_hyper_2_2_chunk._ts_meta_min_2, compress_hyper_2_2_chunk._ts_meta_max_2, compress_hyper_2_2_chunk._ts_meta_min_3, compress_hyper_2_2_chunk._ts_meta_max_3 +(10 rows) + +-- Should be optimized (backward scan) +:PREFIX +SELECT * FROM test1 ORDER BY time ASC NULLS LAST, x3 DESC NULLS FIRST, x4 DESC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (actual rows=4 loops=1) + Output: _hyper_1_1_chunk."time", _hyper_1_1_chunk.x1, _hyper_1_1_chunk.x2, _hyper_1_1_chunk.x3, _hyper_1_1_chunk.x4, _hyper_1_1_chunk.x5 + Sorted merge append: true + Bulk Decompression: false + -> Sort (actual rows=3 loops=1) + Output: compress_hyper_2_2_chunk."time", compress_hyper_2_2_chunk.x1, compress_hyper_2_2_chunk.x2, compress_hyper_2_2_chunk.x3, compress_hyper_2_2_chunk.x4, compress_hyper_2_2_chunk.x5, compress_hyper_2_2_chunk._ts_meta_count, compress_hyper_2_2_chunk._ts_meta_sequence_num, compress_hyper_2_2_chunk._ts_meta_min_1, compress_hyper_2_2_chunk._ts_meta_max_1, compress_hyper_2_2_chunk._ts_meta_min_2, compress_hyper_2_2_chunk._ts_meta_max_2, compress_hyper_2_2_chunk._ts_meta_min_3, compress_hyper_2_2_chunk._ts_meta_max_3 + Sort Key: compress_hyper_2_2_chunk._ts_meta_min_1, compress_hyper_2_2_chunk._ts_meta_max_2 DESC, compress_hyper_2_2_chunk._ts_meta_max_3 DESC + Sort Method: quicksort + -> Seq Scan on _timescaledb_internal.compress_hyper_2_2_chunk (actual rows=3 loops=1) + Output: compress_hyper_2_2_chunk."time", compress_hyper_2_2_chunk.x1, compress_hyper_2_2_chunk.x2, compress_hyper_2_2_chunk.x3, compress_hyper_2_2_chunk.x4, compress_hyper_2_2_chunk.x5, compress_hyper_2_2_chunk._ts_meta_count, compress_hyper_2_2_chunk._ts_meta_sequence_num, compress_hyper_2_2_chunk._ts_meta_min_1, compress_hyper_2_2_chunk._ts_meta_max_1, compress_hyper_2_2_chunk._ts_meta_min_2, compress_hyper_2_2_chunk._ts_meta_max_2, compress_hyper_2_2_chunk._ts_meta_min_3, compress_hyper_2_2_chunk._ts_meta_max_3 +(10 rows) + +-- Should not be optimized (wrong order for x4 in backward scan) +:PREFIX +SELECT * FROM test1 ORDER BY time ASC NULLS FIRST, x3 DESC NULLS LAST, x4 ASC; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort (actual rows=4 loops=1) + Output: _hyper_1_1_chunk."time", _hyper_1_1_chunk.x1, _hyper_1_1_chunk.x2, _hyper_1_1_chunk.x3, _hyper_1_1_chunk.x4, _hyper_1_1_chunk.x5 + Sort Key: _hyper_1_1_chunk."time" NULLS FIRST, _hyper_1_1_chunk.x3 DESC NULLS LAST, _hyper_1_1_chunk.x4 + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (actual rows=4 loops=1) + Output: _hyper_1_1_chunk."time", _hyper_1_1_chunk.x1, _hyper_1_1_chunk.x2, _hyper_1_1_chunk.x3, _hyper_1_1_chunk.x4, _hyper_1_1_chunk.x5 + Bulk Decompression: true + -> Seq Scan on _timescaledb_internal.compress_hyper_2_2_chunk (actual rows=3 loops=1) + Output: compress_hyper_2_2_chunk."time", compress_hyper_2_2_chunk.x1, compress_hyper_2_2_chunk.x2, compress_hyper_2_2_chunk.x3, compress_hyper_2_2_chunk.x4, compress_hyper_2_2_chunk.x5, compress_hyper_2_2_chunk._ts_meta_count, compress_hyper_2_2_chunk._ts_meta_sequence_num, compress_hyper_2_2_chunk._ts_meta_min_1, compress_hyper_2_2_chunk._ts_meta_max_1, compress_hyper_2_2_chunk._ts_meta_min_2, compress_hyper_2_2_chunk._ts_meta_max_2, compress_hyper_2_2_chunk._ts_meta_min_3, compress_hyper_2_2_chunk._ts_meta_max_3 +(9 rows) + +-- Should be optimized +:PREFIX +SELECT * FROM test2 ORDER BY time ASC; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_3_chunk (actual rows=4 loops=1) + Output: _hyper_3_3_chunk."time", _hyper_3_3_chunk.x1, _hyper_3_3_chunk.x2, _hyper_3_3_chunk.x3, _hyper_3_3_chunk.x4, _hyper_3_3_chunk.x5 + Sorted merge append: true + Bulk Decompression: false + -> Sort (actual rows=3 loops=1) + Output: compress_hyper_4_4_chunk."time", compress_hyper_4_4_chunk.x1, compress_hyper_4_4_chunk.x2, compress_hyper_4_4_chunk.x3, compress_hyper_4_4_chunk.x4, compress_hyper_4_4_chunk.x5, compress_hyper_4_4_chunk._ts_meta_count, compress_hyper_4_4_chunk._ts_meta_sequence_num, compress_hyper_4_4_chunk._ts_meta_min_1, compress_hyper_4_4_chunk._ts_meta_max_1, compress_hyper_4_4_chunk._ts_meta_min_2, compress_hyper_4_4_chunk._ts_meta_max_2, compress_hyper_4_4_chunk._ts_meta_min_3, compress_hyper_4_4_chunk._ts_meta_max_3 + Sort Key: compress_hyper_4_4_chunk._ts_meta_min_1 + Sort Method: quicksort + -> Seq Scan on _timescaledb_internal.compress_hyper_4_4_chunk (actual rows=3 loops=1) + Output: compress_hyper_4_4_chunk."time", compress_hyper_4_4_chunk.x1, compress_hyper_4_4_chunk.x2, compress_hyper_4_4_chunk.x3, compress_hyper_4_4_chunk.x4, compress_hyper_4_4_chunk.x5, compress_hyper_4_4_chunk._ts_meta_count, compress_hyper_4_4_chunk._ts_meta_sequence_num, compress_hyper_4_4_chunk._ts_meta_min_1, compress_hyper_4_4_chunk._ts_meta_max_1, compress_hyper_4_4_chunk._ts_meta_min_2, compress_hyper_4_4_chunk._ts_meta_max_2, compress_hyper_4_4_chunk._ts_meta_min_3, compress_hyper_4_4_chunk._ts_meta_max_3 +(10 rows) + +-- Should be optimized +:PREFIX +SELECT * FROM test2 ORDER BY time ASC, x3 DESC; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_3_chunk (actual rows=4 loops=1) + Output: _hyper_3_3_chunk."time", _hyper_3_3_chunk.x1, _hyper_3_3_chunk.x2, _hyper_3_3_chunk.x3, _hyper_3_3_chunk.x4, _hyper_3_3_chunk.x5 + Sorted merge append: true + Bulk Decompression: false + -> Sort (actual rows=3 loops=1) + Output: compress_hyper_4_4_chunk."time", compress_hyper_4_4_chunk.x1, compress_hyper_4_4_chunk.x2, compress_hyper_4_4_chunk.x3, compress_hyper_4_4_chunk.x4, compress_hyper_4_4_chunk.x5, compress_hyper_4_4_chunk._ts_meta_count, compress_hyper_4_4_chunk._ts_meta_sequence_num, compress_hyper_4_4_chunk._ts_meta_min_1, compress_hyper_4_4_chunk._ts_meta_max_1, compress_hyper_4_4_chunk._ts_meta_min_2, compress_hyper_4_4_chunk._ts_meta_max_2, compress_hyper_4_4_chunk._ts_meta_min_3, compress_hyper_4_4_chunk._ts_meta_max_3 + Sort Key: compress_hyper_4_4_chunk._ts_meta_min_1, compress_hyper_4_4_chunk._ts_meta_max_2 DESC + Sort Method: quicksort + -> Seq Scan on _timescaledb_internal.compress_hyper_4_4_chunk (actual rows=3 loops=1) + Output: compress_hyper_4_4_chunk."time", compress_hyper_4_4_chunk.x1, compress_hyper_4_4_chunk.x2, compress_hyper_4_4_chunk.x3, compress_hyper_4_4_chunk.x4, compress_hyper_4_4_chunk.x5, compress_hyper_4_4_chunk._ts_meta_count, compress_hyper_4_4_chunk._ts_meta_sequence_num, compress_hyper_4_4_chunk._ts_meta_min_1, compress_hyper_4_4_chunk._ts_meta_max_1, compress_hyper_4_4_chunk._ts_meta_min_2, compress_hyper_4_4_chunk._ts_meta_max_2, compress_hyper_4_4_chunk._ts_meta_min_3, compress_hyper_4_4_chunk._ts_meta_max_3 +(10 rows) + +-- Should be optimized +:PREFIX +SELECT * FROM test2 ORDER BY time ASC, x3 DESC, x4 DESC; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_3_chunk (actual rows=4 loops=1) + Output: _hyper_3_3_chunk."time", _hyper_3_3_chunk.x1, _hyper_3_3_chunk.x2, _hyper_3_3_chunk.x3, _hyper_3_3_chunk.x4, _hyper_3_3_chunk.x5 + Sorted merge append: true + Bulk Decompression: false + -> Sort (actual rows=3 loops=1) + Output: compress_hyper_4_4_chunk."time", compress_hyper_4_4_chunk.x1, compress_hyper_4_4_chunk.x2, compress_hyper_4_4_chunk.x3, compress_hyper_4_4_chunk.x4, compress_hyper_4_4_chunk.x5, compress_hyper_4_4_chunk._ts_meta_count, compress_hyper_4_4_chunk._ts_meta_sequence_num, compress_hyper_4_4_chunk._ts_meta_min_1, compress_hyper_4_4_chunk._ts_meta_max_1, compress_hyper_4_4_chunk._ts_meta_min_2, compress_hyper_4_4_chunk._ts_meta_max_2, compress_hyper_4_4_chunk._ts_meta_min_3, compress_hyper_4_4_chunk._ts_meta_max_3 + Sort Key: compress_hyper_4_4_chunk._ts_meta_min_1, compress_hyper_4_4_chunk._ts_meta_max_2 DESC, compress_hyper_4_4_chunk._ts_meta_max_3 DESC + Sort Method: quicksort + -> Seq Scan on _timescaledb_internal.compress_hyper_4_4_chunk (actual rows=3 loops=1) + Output: compress_hyper_4_4_chunk."time", compress_hyper_4_4_chunk.x1, compress_hyper_4_4_chunk.x2, compress_hyper_4_4_chunk.x3, compress_hyper_4_4_chunk.x4, compress_hyper_4_4_chunk.x5, compress_hyper_4_4_chunk._ts_meta_count, compress_hyper_4_4_chunk._ts_meta_sequence_num, compress_hyper_4_4_chunk._ts_meta_min_1, compress_hyper_4_4_chunk._ts_meta_max_1, compress_hyper_4_4_chunk._ts_meta_min_2, compress_hyper_4_4_chunk._ts_meta_max_2, compress_hyper_4_4_chunk._ts_meta_min_3, compress_hyper_4_4_chunk._ts_meta_max_3 +(10 rows) + +-- Should not be optimized (wrong order for x3) +:PREFIX +SELECT * FROM test2 ORDER BY time ASC, x3 ASC NULLS LAST, x4 DESC; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort (actual rows=4 loops=1) + Output: _hyper_3_3_chunk."time", _hyper_3_3_chunk.x1, _hyper_3_3_chunk.x2, _hyper_3_3_chunk.x3, _hyper_3_3_chunk.x4, _hyper_3_3_chunk.x5 + Sort Key: _hyper_3_3_chunk."time", _hyper_3_3_chunk.x3, _hyper_3_3_chunk.x4 DESC + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_3_chunk (actual rows=4 loops=1) + Output: _hyper_3_3_chunk."time", _hyper_3_3_chunk.x1, _hyper_3_3_chunk.x2, _hyper_3_3_chunk.x3, _hyper_3_3_chunk.x4, _hyper_3_3_chunk.x5 + Bulk Decompression: true + -> Seq Scan on _timescaledb_internal.compress_hyper_4_4_chunk (actual rows=3 loops=1) + Output: compress_hyper_4_4_chunk."time", compress_hyper_4_4_chunk.x1, compress_hyper_4_4_chunk.x2, compress_hyper_4_4_chunk.x3, compress_hyper_4_4_chunk.x4, compress_hyper_4_4_chunk.x5, compress_hyper_4_4_chunk._ts_meta_count, compress_hyper_4_4_chunk._ts_meta_sequence_num, compress_hyper_4_4_chunk._ts_meta_min_1, compress_hyper_4_4_chunk._ts_meta_max_1, compress_hyper_4_4_chunk._ts_meta_min_2, compress_hyper_4_4_chunk._ts_meta_max_2, compress_hyper_4_4_chunk._ts_meta_min_3, compress_hyper_4_4_chunk._ts_meta_max_3 +(9 rows) + +-- Should not be optimized (wrong order for x3) +:PREFIX +SELECT * FROM test2 ORDER BY time ASC, x3 ASC NULLS FIRST, x4 DESC; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort (actual rows=4 loops=1) + Output: _hyper_3_3_chunk."time", _hyper_3_3_chunk.x1, _hyper_3_3_chunk.x2, _hyper_3_3_chunk.x3, _hyper_3_3_chunk.x4, _hyper_3_3_chunk.x5 + Sort Key: _hyper_3_3_chunk."time", _hyper_3_3_chunk.x3 NULLS FIRST, _hyper_3_3_chunk.x4 DESC + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_3_chunk (actual rows=4 loops=1) + Output: _hyper_3_3_chunk."time", _hyper_3_3_chunk.x1, _hyper_3_3_chunk.x2, _hyper_3_3_chunk.x3, _hyper_3_3_chunk.x4, _hyper_3_3_chunk.x5 + Bulk Decompression: true + -> Seq Scan on _timescaledb_internal.compress_hyper_4_4_chunk (actual rows=3 loops=1) + Output: compress_hyper_4_4_chunk."time", compress_hyper_4_4_chunk.x1, compress_hyper_4_4_chunk.x2, compress_hyper_4_4_chunk.x3, compress_hyper_4_4_chunk.x4, compress_hyper_4_4_chunk.x5, compress_hyper_4_4_chunk._ts_meta_count, compress_hyper_4_4_chunk._ts_meta_sequence_num, compress_hyper_4_4_chunk._ts_meta_min_1, compress_hyper_4_4_chunk._ts_meta_max_1, compress_hyper_4_4_chunk._ts_meta_min_2, compress_hyper_4_4_chunk._ts_meta_max_2, compress_hyper_4_4_chunk._ts_meta_min_3, compress_hyper_4_4_chunk._ts_meta_max_3 +(9 rows) + +-- Should be optimized (backward scan) +:PREFIX +SELECT * FROM test2 ORDER BY time DESC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_3_chunk (actual rows=4 loops=1) + Output: _hyper_3_3_chunk."time", _hyper_3_3_chunk.x1, _hyper_3_3_chunk.x2, _hyper_3_3_chunk.x3, _hyper_3_3_chunk.x4, _hyper_3_3_chunk.x5 + Sorted merge append: true + Bulk Decompression: false + -> Sort (actual rows=3 loops=1) + Output: compress_hyper_4_4_chunk."time", compress_hyper_4_4_chunk.x1, compress_hyper_4_4_chunk.x2, compress_hyper_4_4_chunk.x3, compress_hyper_4_4_chunk.x4, compress_hyper_4_4_chunk.x5, compress_hyper_4_4_chunk._ts_meta_count, compress_hyper_4_4_chunk._ts_meta_sequence_num, compress_hyper_4_4_chunk._ts_meta_min_1, compress_hyper_4_4_chunk._ts_meta_max_1, compress_hyper_4_4_chunk._ts_meta_min_2, compress_hyper_4_4_chunk._ts_meta_max_2, compress_hyper_4_4_chunk._ts_meta_min_3, compress_hyper_4_4_chunk._ts_meta_max_3 + Sort Key: compress_hyper_4_4_chunk._ts_meta_max_1 DESC + Sort Method: quicksort + -> Seq Scan on _timescaledb_internal.compress_hyper_4_4_chunk (actual rows=3 loops=1) + Output: compress_hyper_4_4_chunk."time", compress_hyper_4_4_chunk.x1, compress_hyper_4_4_chunk.x2, compress_hyper_4_4_chunk.x3, compress_hyper_4_4_chunk.x4, compress_hyper_4_4_chunk.x5, compress_hyper_4_4_chunk._ts_meta_count, compress_hyper_4_4_chunk._ts_meta_sequence_num, compress_hyper_4_4_chunk._ts_meta_min_1, compress_hyper_4_4_chunk._ts_meta_max_1, compress_hyper_4_4_chunk._ts_meta_min_2, compress_hyper_4_4_chunk._ts_meta_max_2, compress_hyper_4_4_chunk._ts_meta_min_3, compress_hyper_4_4_chunk._ts_meta_max_3 +(10 rows) + +-- Should be optimized (backward scan) +:PREFIX +SELECT * FROM test2 ORDER BY time DESC NULLS FIRST, x3 ASC NULLS LAST; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_3_chunk (actual rows=4 loops=1) + Output: _hyper_3_3_chunk."time", _hyper_3_3_chunk.x1, _hyper_3_3_chunk.x2, _hyper_3_3_chunk.x3, _hyper_3_3_chunk.x4, _hyper_3_3_chunk.x5 + Sorted merge append: true + Bulk Decompression: false + -> Sort (actual rows=3 loops=1) + Output: compress_hyper_4_4_chunk."time", compress_hyper_4_4_chunk.x1, compress_hyper_4_4_chunk.x2, compress_hyper_4_4_chunk.x3, compress_hyper_4_4_chunk.x4, compress_hyper_4_4_chunk.x5, compress_hyper_4_4_chunk._ts_meta_count, compress_hyper_4_4_chunk._ts_meta_sequence_num, compress_hyper_4_4_chunk._ts_meta_min_1, compress_hyper_4_4_chunk._ts_meta_max_1, compress_hyper_4_4_chunk._ts_meta_min_2, compress_hyper_4_4_chunk._ts_meta_max_2, compress_hyper_4_4_chunk._ts_meta_min_3, compress_hyper_4_4_chunk._ts_meta_max_3 + Sort Key: compress_hyper_4_4_chunk._ts_meta_max_1 DESC, compress_hyper_4_4_chunk._ts_meta_min_2 + Sort Method: quicksort + -> Seq Scan on _timescaledb_internal.compress_hyper_4_4_chunk (actual rows=3 loops=1) + Output: compress_hyper_4_4_chunk."time", compress_hyper_4_4_chunk.x1, compress_hyper_4_4_chunk.x2, compress_hyper_4_4_chunk.x3, compress_hyper_4_4_chunk.x4, compress_hyper_4_4_chunk.x5, compress_hyper_4_4_chunk._ts_meta_count, compress_hyper_4_4_chunk._ts_meta_sequence_num, compress_hyper_4_4_chunk._ts_meta_min_1, compress_hyper_4_4_chunk._ts_meta_max_1, compress_hyper_4_4_chunk._ts_meta_min_2, compress_hyper_4_4_chunk._ts_meta_max_2, compress_hyper_4_4_chunk._ts_meta_min_3, compress_hyper_4_4_chunk._ts_meta_max_3 +(10 rows) + +-- Should be optimized (backward scan) +:PREFIX +SELECT * FROM test2 ORDER BY time DESC NULLS FIRST, x3 ASC NULLS LAST, x4 NULLS LAST; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_3_chunk (actual rows=4 loops=1) + Output: _hyper_3_3_chunk."time", _hyper_3_3_chunk.x1, _hyper_3_3_chunk.x2, _hyper_3_3_chunk.x3, _hyper_3_3_chunk.x4, _hyper_3_3_chunk.x5 + Sorted merge append: true + Bulk Decompression: false + -> Sort (actual rows=3 loops=1) + Output: compress_hyper_4_4_chunk."time", compress_hyper_4_4_chunk.x1, compress_hyper_4_4_chunk.x2, compress_hyper_4_4_chunk.x3, compress_hyper_4_4_chunk.x4, compress_hyper_4_4_chunk.x5, compress_hyper_4_4_chunk._ts_meta_count, compress_hyper_4_4_chunk._ts_meta_sequence_num, compress_hyper_4_4_chunk._ts_meta_min_1, compress_hyper_4_4_chunk._ts_meta_max_1, compress_hyper_4_4_chunk._ts_meta_min_2, compress_hyper_4_4_chunk._ts_meta_max_2, compress_hyper_4_4_chunk._ts_meta_min_3, compress_hyper_4_4_chunk._ts_meta_max_3 + Sort Key: compress_hyper_4_4_chunk._ts_meta_max_1 DESC, compress_hyper_4_4_chunk._ts_meta_min_2, compress_hyper_4_4_chunk._ts_meta_min_3 + Sort Method: quicksort + -> Seq Scan on _timescaledb_internal.compress_hyper_4_4_chunk (actual rows=3 loops=1) + Output: compress_hyper_4_4_chunk."time", compress_hyper_4_4_chunk.x1, compress_hyper_4_4_chunk.x2, compress_hyper_4_4_chunk.x3, compress_hyper_4_4_chunk.x4, compress_hyper_4_4_chunk.x5, compress_hyper_4_4_chunk._ts_meta_count, compress_hyper_4_4_chunk._ts_meta_sequence_num, compress_hyper_4_4_chunk._ts_meta_min_1, compress_hyper_4_4_chunk._ts_meta_max_1, compress_hyper_4_4_chunk._ts_meta_min_2, compress_hyper_4_4_chunk._ts_meta_max_2, compress_hyper_4_4_chunk._ts_meta_min_3, compress_hyper_4_4_chunk._ts_meta_max_3 +(10 rows) + +-- Should not be optimized (wrong order for x3 in backward scan) +:PREFIX +SELECT * FROM test2 ORDER BY time DESC NULLS LAST, x3 DESC NULLS FIRST, x4 NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort (actual rows=4 loops=1) + Output: _hyper_3_3_chunk."time", _hyper_3_3_chunk.x1, _hyper_3_3_chunk.x2, _hyper_3_3_chunk.x3, _hyper_3_3_chunk.x4, _hyper_3_3_chunk.x5 + Sort Key: _hyper_3_3_chunk."time" DESC NULLS LAST, _hyper_3_3_chunk.x3 DESC, _hyper_3_3_chunk.x4 NULLS FIRST + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_3_chunk (actual rows=4 loops=1) + Output: _hyper_3_3_chunk."time", _hyper_3_3_chunk.x1, _hyper_3_3_chunk.x2, _hyper_3_3_chunk.x3, _hyper_3_3_chunk.x4, _hyper_3_3_chunk.x5 + Bulk Decompression: true + -> Seq Scan on _timescaledb_internal.compress_hyper_4_4_chunk (actual rows=3 loops=1) + Output: compress_hyper_4_4_chunk."time", compress_hyper_4_4_chunk.x1, compress_hyper_4_4_chunk.x2, compress_hyper_4_4_chunk.x3, compress_hyper_4_4_chunk.x4, compress_hyper_4_4_chunk.x5, compress_hyper_4_4_chunk._ts_meta_count, compress_hyper_4_4_chunk._ts_meta_sequence_num, compress_hyper_4_4_chunk._ts_meta_min_1, compress_hyper_4_4_chunk._ts_meta_max_1, compress_hyper_4_4_chunk._ts_meta_min_2, compress_hyper_4_4_chunk._ts_meta_max_2, compress_hyper_4_4_chunk._ts_meta_min_3, compress_hyper_4_4_chunk._ts_meta_max_3 +(9 rows) + +-- Should not be optimized (wrong order for x3 in backward scan) +:PREFIX +SELECT * FROM test2 ORDER BY time DESC NULLS LAST, x3 DESC NULLS LAST, x4 NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort (actual rows=4 loops=1) + Output: _hyper_3_3_chunk."time", _hyper_3_3_chunk.x1, _hyper_3_3_chunk.x2, _hyper_3_3_chunk.x3, _hyper_3_3_chunk.x4, _hyper_3_3_chunk.x5 + Sort Key: _hyper_3_3_chunk."time" DESC NULLS LAST, _hyper_3_3_chunk.x3 DESC NULLS LAST, _hyper_3_3_chunk.x4 NULLS FIRST + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_3_chunk (actual rows=4 loops=1) + Output: _hyper_3_3_chunk."time", _hyper_3_3_chunk.x1, _hyper_3_3_chunk.x2, _hyper_3_3_chunk.x3, _hyper_3_3_chunk.x4, _hyper_3_3_chunk.x5 + Bulk Decompression: true + -> Seq Scan on _timescaledb_internal.compress_hyper_4_4_chunk (actual rows=3 loops=1) + Output: compress_hyper_4_4_chunk."time", compress_hyper_4_4_chunk.x1, compress_hyper_4_4_chunk.x2, compress_hyper_4_4_chunk.x3, compress_hyper_4_4_chunk.x4, compress_hyper_4_4_chunk.x5, compress_hyper_4_4_chunk._ts_meta_count, compress_hyper_4_4_chunk._ts_meta_sequence_num, compress_hyper_4_4_chunk._ts_meta_min_1, compress_hyper_4_4_chunk._ts_meta_max_1, compress_hyper_4_4_chunk._ts_meta_min_2, compress_hyper_4_4_chunk._ts_meta_max_2, compress_hyper_4_4_chunk._ts_meta_min_3, compress_hyper_4_4_chunk._ts_meta_max_3 +(9 rows) + +-- Should be optimized +:PREFIX +SELECT * FROM test_with_defined_null ORDER BY x2 ASC NULLS FIRST; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_5_5_chunk (actual rows=4 loops=1) + Output: _hyper_5_5_chunk."time", _hyper_5_5_chunk.x1, _hyper_5_5_chunk.x2, _hyper_5_5_chunk.x3 + Sorted merge append: true + Bulk Decompression: false + -> Sort (actual rows=2 loops=1) + Output: compress_hyper_6_6_chunk."time", compress_hyper_6_6_chunk.x1, compress_hyper_6_6_chunk.x2, compress_hyper_6_6_chunk.x3, compress_hyper_6_6_chunk._ts_meta_count, compress_hyper_6_6_chunk._ts_meta_sequence_num, compress_hyper_6_6_chunk._ts_meta_min_2, compress_hyper_6_6_chunk._ts_meta_max_2, compress_hyper_6_6_chunk._ts_meta_min_1, compress_hyper_6_6_chunk._ts_meta_max_1 + Sort Key: compress_hyper_6_6_chunk._ts_meta_min_1 NULLS FIRST + Sort Method: quicksort + -> Seq Scan on _timescaledb_internal.compress_hyper_6_6_chunk (actual rows=2 loops=1) + Output: compress_hyper_6_6_chunk."time", compress_hyper_6_6_chunk.x1, compress_hyper_6_6_chunk.x2, compress_hyper_6_6_chunk.x3, compress_hyper_6_6_chunk._ts_meta_count, compress_hyper_6_6_chunk._ts_meta_sequence_num, compress_hyper_6_6_chunk._ts_meta_min_2, compress_hyper_6_6_chunk._ts_meta_max_2, compress_hyper_6_6_chunk._ts_meta_min_1, compress_hyper_6_6_chunk._ts_meta_max_1 +(10 rows) + +-- Should be optimized (backward scan) +:PREFIX +SELECT * FROM test_with_defined_null ORDER BY x2 DESC NULLS LAST; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_5_5_chunk (actual rows=4 loops=1) + Output: _hyper_5_5_chunk."time", _hyper_5_5_chunk.x1, _hyper_5_5_chunk.x2, _hyper_5_5_chunk.x3 + Sorted merge append: true + Bulk Decompression: false + -> Sort (actual rows=2 loops=1) + Output: compress_hyper_6_6_chunk."time", compress_hyper_6_6_chunk.x1, compress_hyper_6_6_chunk.x2, compress_hyper_6_6_chunk.x3, compress_hyper_6_6_chunk._ts_meta_count, compress_hyper_6_6_chunk._ts_meta_sequence_num, compress_hyper_6_6_chunk._ts_meta_min_2, compress_hyper_6_6_chunk._ts_meta_max_2, compress_hyper_6_6_chunk._ts_meta_min_1, compress_hyper_6_6_chunk._ts_meta_max_1 + Sort Key: compress_hyper_6_6_chunk._ts_meta_max_1 DESC NULLS LAST + Sort Method: quicksort + -> Seq Scan on _timescaledb_internal.compress_hyper_6_6_chunk (actual rows=2 loops=1) + Output: compress_hyper_6_6_chunk."time", compress_hyper_6_6_chunk.x1, compress_hyper_6_6_chunk.x2, compress_hyper_6_6_chunk.x3, compress_hyper_6_6_chunk._ts_meta_count, compress_hyper_6_6_chunk._ts_meta_sequence_num, compress_hyper_6_6_chunk._ts_meta_min_2, compress_hyper_6_6_chunk._ts_meta_max_2, compress_hyper_6_6_chunk._ts_meta_min_1, compress_hyper_6_6_chunk._ts_meta_max_1 +(10 rows) + +-- Should not be optimized +:PREFIX +SELECT * FROM test_with_defined_null ORDER BY x2 ASC NULLS LAST; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort (actual rows=4 loops=1) + Output: _hyper_5_5_chunk."time", _hyper_5_5_chunk.x1, _hyper_5_5_chunk.x2, _hyper_5_5_chunk.x3 + Sort Key: _hyper_5_5_chunk.x2 + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_5_5_chunk (actual rows=4 loops=1) + Output: _hyper_5_5_chunk."time", _hyper_5_5_chunk.x1, _hyper_5_5_chunk.x2, _hyper_5_5_chunk.x3 + Bulk Decompression: true + -> Seq Scan on _timescaledb_internal.compress_hyper_6_6_chunk (actual rows=2 loops=1) + Output: compress_hyper_6_6_chunk."time", compress_hyper_6_6_chunk.x1, compress_hyper_6_6_chunk.x2, compress_hyper_6_6_chunk.x3, compress_hyper_6_6_chunk._ts_meta_count, compress_hyper_6_6_chunk._ts_meta_sequence_num, compress_hyper_6_6_chunk._ts_meta_min_2, compress_hyper_6_6_chunk._ts_meta_max_2, compress_hyper_6_6_chunk._ts_meta_min_1, compress_hyper_6_6_chunk._ts_meta_max_1 +(9 rows) + +-- Should not be optimized +:PREFIX +SELECT * FROM test_with_defined_null ORDER BY x2 DESC NULLS FIRST; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort (actual rows=4 loops=1) + Output: _hyper_5_5_chunk."time", _hyper_5_5_chunk.x1, _hyper_5_5_chunk.x2, _hyper_5_5_chunk.x3 + Sort Key: _hyper_5_5_chunk.x2 DESC + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_5_5_chunk (actual rows=4 loops=1) + Output: _hyper_5_5_chunk."time", _hyper_5_5_chunk.x1, _hyper_5_5_chunk.x2, _hyper_5_5_chunk.x3 + Bulk Decompression: true + -> Seq Scan on _timescaledb_internal.compress_hyper_6_6_chunk (actual rows=2 loops=1) + Output: compress_hyper_6_6_chunk."time", compress_hyper_6_6_chunk.x1, compress_hyper_6_6_chunk.x2, compress_hyper_6_6_chunk.x3, compress_hyper_6_6_chunk._ts_meta_count, compress_hyper_6_6_chunk._ts_meta_sequence_num, compress_hyper_6_6_chunk._ts_meta_min_2, compress_hyper_6_6_chunk._ts_meta_max_2, compress_hyper_6_6_chunk._ts_meta_min_1, compress_hyper_6_6_chunk._ts_meta_max_1 +(9 rows) + +------ +-- Tests based on attributes +------ +-- Should be optimized (some batches qualify by pushed down filter on _ts_meta_max_3) +:PREFIX +SELECT * FROM test1 WHERE x4 > 0 ORDER BY time DESC; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (actual rows=4 loops=1) + Output: _hyper_1_1_chunk."time", _hyper_1_1_chunk.x1, _hyper_1_1_chunk.x2, _hyper_1_1_chunk.x3, _hyper_1_1_chunk.x4, _hyper_1_1_chunk.x5 + Filter: (_hyper_1_1_chunk.x4 > 0) + Sorted merge append: true + Bulk Decompression: false + -> Sort (actual rows=3 loops=1) + Output: compress_hyper_2_2_chunk."time", compress_hyper_2_2_chunk.x1, compress_hyper_2_2_chunk.x2, compress_hyper_2_2_chunk.x3, compress_hyper_2_2_chunk.x4, compress_hyper_2_2_chunk.x5, compress_hyper_2_2_chunk._ts_meta_count, compress_hyper_2_2_chunk._ts_meta_sequence_num, compress_hyper_2_2_chunk._ts_meta_min_1, compress_hyper_2_2_chunk._ts_meta_max_1, compress_hyper_2_2_chunk._ts_meta_min_2, compress_hyper_2_2_chunk._ts_meta_max_2, compress_hyper_2_2_chunk._ts_meta_min_3, compress_hyper_2_2_chunk._ts_meta_max_3 + Sort Key: compress_hyper_2_2_chunk._ts_meta_max_1 DESC + Sort Method: quicksort + -> Seq Scan on _timescaledb_internal.compress_hyper_2_2_chunk (actual rows=3 loops=1) + Output: compress_hyper_2_2_chunk."time", compress_hyper_2_2_chunk.x1, compress_hyper_2_2_chunk.x2, compress_hyper_2_2_chunk.x3, compress_hyper_2_2_chunk.x4, compress_hyper_2_2_chunk.x5, compress_hyper_2_2_chunk._ts_meta_count, compress_hyper_2_2_chunk._ts_meta_sequence_num, compress_hyper_2_2_chunk._ts_meta_min_1, compress_hyper_2_2_chunk._ts_meta_max_1, compress_hyper_2_2_chunk._ts_meta_min_2, compress_hyper_2_2_chunk._ts_meta_max_2, compress_hyper_2_2_chunk._ts_meta_min_3, compress_hyper_2_2_chunk._ts_meta_max_3 + Filter: (compress_hyper_2_2_chunk._ts_meta_max_3 > 0) +(12 rows) + +-- Should be optimized (no batches qualify by pushed down filter on _ts_meta_max_3) +:PREFIX +SELECT * FROM test1 WHERE x4 > 100 ORDER BY time DESC; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (actual rows=0 loops=1) + Output: _hyper_1_1_chunk."time", _hyper_1_1_chunk.x1, _hyper_1_1_chunk.x2, _hyper_1_1_chunk.x3, _hyper_1_1_chunk.x4, _hyper_1_1_chunk.x5 + Filter: (_hyper_1_1_chunk.x4 > 100) + Sorted merge append: true + Bulk Decompression: false + -> Sort (actual rows=0 loops=1) + Output: compress_hyper_2_2_chunk."time", compress_hyper_2_2_chunk.x1, compress_hyper_2_2_chunk.x2, compress_hyper_2_2_chunk.x3, compress_hyper_2_2_chunk.x4, compress_hyper_2_2_chunk.x5, compress_hyper_2_2_chunk._ts_meta_count, compress_hyper_2_2_chunk._ts_meta_sequence_num, compress_hyper_2_2_chunk._ts_meta_min_1, compress_hyper_2_2_chunk._ts_meta_max_1, compress_hyper_2_2_chunk._ts_meta_min_2, compress_hyper_2_2_chunk._ts_meta_max_2, compress_hyper_2_2_chunk._ts_meta_min_3, compress_hyper_2_2_chunk._ts_meta_max_3 + Sort Key: compress_hyper_2_2_chunk._ts_meta_max_1 DESC + Sort Method: quicksort + -> Seq Scan on _timescaledb_internal.compress_hyper_2_2_chunk (actual rows=0 loops=1) + Output: compress_hyper_2_2_chunk."time", compress_hyper_2_2_chunk.x1, compress_hyper_2_2_chunk.x2, compress_hyper_2_2_chunk.x3, compress_hyper_2_2_chunk.x4, compress_hyper_2_2_chunk.x5, compress_hyper_2_2_chunk._ts_meta_count, compress_hyper_2_2_chunk._ts_meta_sequence_num, compress_hyper_2_2_chunk._ts_meta_min_1, compress_hyper_2_2_chunk._ts_meta_max_1, compress_hyper_2_2_chunk._ts_meta_min_2, compress_hyper_2_2_chunk._ts_meta_max_2, compress_hyper_2_2_chunk._ts_meta_min_3, compress_hyper_2_2_chunk._ts_meta_max_3 + Filter: (compress_hyper_2_2_chunk._ts_meta_max_3 > 100) + Rows Removed by Filter: 3 +(13 rows) + +-- Should be optimized +:PREFIX +SELECT * FROM test1 WHERE x4 > 100 ORDER BY time DESC, x3, x4; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (actual rows=0 loops=1) + Output: _hyper_1_1_chunk."time", _hyper_1_1_chunk.x1, _hyper_1_1_chunk.x2, _hyper_1_1_chunk.x3, _hyper_1_1_chunk.x4, _hyper_1_1_chunk.x5 + Filter: (_hyper_1_1_chunk.x4 > 100) + Sorted merge append: true + Bulk Decompression: false + -> Sort (actual rows=0 loops=1) + Output: compress_hyper_2_2_chunk."time", compress_hyper_2_2_chunk.x1, compress_hyper_2_2_chunk.x2, compress_hyper_2_2_chunk.x3, compress_hyper_2_2_chunk.x4, compress_hyper_2_2_chunk.x5, compress_hyper_2_2_chunk._ts_meta_count, compress_hyper_2_2_chunk._ts_meta_sequence_num, compress_hyper_2_2_chunk._ts_meta_min_1, compress_hyper_2_2_chunk._ts_meta_max_1, compress_hyper_2_2_chunk._ts_meta_min_2, compress_hyper_2_2_chunk._ts_meta_max_2, compress_hyper_2_2_chunk._ts_meta_min_3, compress_hyper_2_2_chunk._ts_meta_max_3 + Sort Key: compress_hyper_2_2_chunk._ts_meta_max_1 DESC, compress_hyper_2_2_chunk._ts_meta_min_2, compress_hyper_2_2_chunk._ts_meta_min_3 + Sort Method: quicksort + -> Seq Scan on _timescaledb_internal.compress_hyper_2_2_chunk (actual rows=0 loops=1) + Output: compress_hyper_2_2_chunk."time", compress_hyper_2_2_chunk.x1, compress_hyper_2_2_chunk.x2, compress_hyper_2_2_chunk.x3, compress_hyper_2_2_chunk.x4, compress_hyper_2_2_chunk.x5, compress_hyper_2_2_chunk._ts_meta_count, compress_hyper_2_2_chunk._ts_meta_sequence_num, compress_hyper_2_2_chunk._ts_meta_min_1, compress_hyper_2_2_chunk._ts_meta_max_1, compress_hyper_2_2_chunk._ts_meta_min_2, compress_hyper_2_2_chunk._ts_meta_max_2, compress_hyper_2_2_chunk._ts_meta_min_3, compress_hyper_2_2_chunk._ts_meta_max_3 + Filter: (compress_hyper_2_2_chunk._ts_meta_max_3 > 100) + Rows Removed by Filter: 3 +(13 rows) + +-- Should be optimized (duplicate order by attributes) +:PREFIX +SELECT * FROM test1 WHERE x4 > 100 ORDER BY time DESC, x3, x3; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (actual rows=0 loops=1) + Output: _hyper_1_1_chunk."time", _hyper_1_1_chunk.x1, _hyper_1_1_chunk.x2, _hyper_1_1_chunk.x3, _hyper_1_1_chunk.x4, _hyper_1_1_chunk.x5 + Filter: (_hyper_1_1_chunk.x4 > 100) + Sorted merge append: true + Bulk Decompression: false + -> Sort (actual rows=0 loops=1) + Output: compress_hyper_2_2_chunk."time", compress_hyper_2_2_chunk.x1, compress_hyper_2_2_chunk.x2, compress_hyper_2_2_chunk.x3, compress_hyper_2_2_chunk.x4, compress_hyper_2_2_chunk.x5, compress_hyper_2_2_chunk._ts_meta_count, compress_hyper_2_2_chunk._ts_meta_sequence_num, compress_hyper_2_2_chunk._ts_meta_min_1, compress_hyper_2_2_chunk._ts_meta_max_1, compress_hyper_2_2_chunk._ts_meta_min_2, compress_hyper_2_2_chunk._ts_meta_max_2, compress_hyper_2_2_chunk._ts_meta_min_3, compress_hyper_2_2_chunk._ts_meta_max_3 + Sort Key: compress_hyper_2_2_chunk._ts_meta_max_1 DESC, compress_hyper_2_2_chunk._ts_meta_min_2 + Sort Method: quicksort + -> Seq Scan on _timescaledb_internal.compress_hyper_2_2_chunk (actual rows=0 loops=1) + Output: compress_hyper_2_2_chunk."time", compress_hyper_2_2_chunk.x1, compress_hyper_2_2_chunk.x2, compress_hyper_2_2_chunk.x3, compress_hyper_2_2_chunk.x4, compress_hyper_2_2_chunk.x5, compress_hyper_2_2_chunk._ts_meta_count, compress_hyper_2_2_chunk._ts_meta_sequence_num, compress_hyper_2_2_chunk._ts_meta_min_1, compress_hyper_2_2_chunk._ts_meta_max_1, compress_hyper_2_2_chunk._ts_meta_min_2, compress_hyper_2_2_chunk._ts_meta_max_2, compress_hyper_2_2_chunk._ts_meta_min_3, compress_hyper_2_2_chunk._ts_meta_max_3 + Filter: (compress_hyper_2_2_chunk._ts_meta_max_3 > 100) + Rows Removed by Filter: 3 +(13 rows) + +-- Should be optimized (duplicate order by attributes) +:PREFIX +SELECT * FROM test1 WHERE x4 > 100 ORDER BY time DESC, x3, x4, x3, x4; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (actual rows=0 loops=1) + Output: _hyper_1_1_chunk."time", _hyper_1_1_chunk.x1, _hyper_1_1_chunk.x2, _hyper_1_1_chunk.x3, _hyper_1_1_chunk.x4, _hyper_1_1_chunk.x5 + Filter: (_hyper_1_1_chunk.x4 > 100) + Sorted merge append: true + Bulk Decompression: false + -> Sort (actual rows=0 loops=1) + Output: compress_hyper_2_2_chunk."time", compress_hyper_2_2_chunk.x1, compress_hyper_2_2_chunk.x2, compress_hyper_2_2_chunk.x3, compress_hyper_2_2_chunk.x4, compress_hyper_2_2_chunk.x5, compress_hyper_2_2_chunk._ts_meta_count, compress_hyper_2_2_chunk._ts_meta_sequence_num, compress_hyper_2_2_chunk._ts_meta_min_1, compress_hyper_2_2_chunk._ts_meta_max_1, compress_hyper_2_2_chunk._ts_meta_min_2, compress_hyper_2_2_chunk._ts_meta_max_2, compress_hyper_2_2_chunk._ts_meta_min_3, compress_hyper_2_2_chunk._ts_meta_max_3 + Sort Key: compress_hyper_2_2_chunk._ts_meta_max_1 DESC, compress_hyper_2_2_chunk._ts_meta_min_2, compress_hyper_2_2_chunk._ts_meta_min_3 + Sort Method: quicksort + -> Seq Scan on _timescaledb_internal.compress_hyper_2_2_chunk (actual rows=0 loops=1) + Output: compress_hyper_2_2_chunk."time", compress_hyper_2_2_chunk.x1, compress_hyper_2_2_chunk.x2, compress_hyper_2_2_chunk.x3, compress_hyper_2_2_chunk.x4, compress_hyper_2_2_chunk.x5, compress_hyper_2_2_chunk._ts_meta_count, compress_hyper_2_2_chunk._ts_meta_sequence_num, compress_hyper_2_2_chunk._ts_meta_min_1, compress_hyper_2_2_chunk._ts_meta_max_1, compress_hyper_2_2_chunk._ts_meta_min_2, compress_hyper_2_2_chunk._ts_meta_max_2, compress_hyper_2_2_chunk._ts_meta_min_3, compress_hyper_2_2_chunk._ts_meta_max_3 + Filter: (compress_hyper_2_2_chunk._ts_meta_max_3 > 100) + Rows Removed by Filter: 3 +(13 rows) + +-- Should not be optimized +:PREFIX +SELECT * FROM test1 WHERE x4 > 100 ORDER BY time DESC, x4, x3; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort (actual rows=0 loops=1) + Output: _hyper_1_1_chunk."time", _hyper_1_1_chunk.x1, _hyper_1_1_chunk.x2, _hyper_1_1_chunk.x3, _hyper_1_1_chunk.x4, _hyper_1_1_chunk.x5 + Sort Key: _hyper_1_1_chunk."time" DESC, _hyper_1_1_chunk.x4, _hyper_1_1_chunk.x3 + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (actual rows=0 loops=1) + Output: _hyper_1_1_chunk."time", _hyper_1_1_chunk.x1, _hyper_1_1_chunk.x2, _hyper_1_1_chunk.x3, _hyper_1_1_chunk.x4, _hyper_1_1_chunk.x5 + Vectorized Filter: (_hyper_1_1_chunk.x4 > 100) + Bulk Decompression: true + -> Seq Scan on _timescaledb_internal.compress_hyper_2_2_chunk (actual rows=0 loops=1) + Output: compress_hyper_2_2_chunk."time", compress_hyper_2_2_chunk.x1, compress_hyper_2_2_chunk.x2, compress_hyper_2_2_chunk.x3, compress_hyper_2_2_chunk.x4, compress_hyper_2_2_chunk.x5, compress_hyper_2_2_chunk._ts_meta_count, compress_hyper_2_2_chunk._ts_meta_sequence_num, compress_hyper_2_2_chunk._ts_meta_min_1, compress_hyper_2_2_chunk._ts_meta_max_1, compress_hyper_2_2_chunk._ts_meta_min_2, compress_hyper_2_2_chunk._ts_meta_max_2, compress_hyper_2_2_chunk._ts_meta_min_3, compress_hyper_2_2_chunk._ts_meta_max_3 + Filter: (compress_hyper_2_2_chunk._ts_meta_max_3 > 100) + Rows Removed by Filter: 3 +(12 rows) + +-- Should not be optimized +:PREFIX +SELECT * FROM test1 WHERE x4 > 100 ORDER BY time ASC, x3, x4; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort (actual rows=0 loops=1) + Output: _hyper_1_1_chunk."time", _hyper_1_1_chunk.x1, _hyper_1_1_chunk.x2, _hyper_1_1_chunk.x3, _hyper_1_1_chunk.x4, _hyper_1_1_chunk.x5 + Sort Key: _hyper_1_1_chunk."time", _hyper_1_1_chunk.x3, _hyper_1_1_chunk.x4 + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (actual rows=0 loops=1) + Output: _hyper_1_1_chunk."time", _hyper_1_1_chunk.x1, _hyper_1_1_chunk.x2, _hyper_1_1_chunk.x3, _hyper_1_1_chunk.x4, _hyper_1_1_chunk.x5 + Vectorized Filter: (_hyper_1_1_chunk.x4 > 100) + Bulk Decompression: true + -> Seq Scan on _timescaledb_internal.compress_hyper_2_2_chunk (actual rows=0 loops=1) + Output: compress_hyper_2_2_chunk."time", compress_hyper_2_2_chunk.x1, compress_hyper_2_2_chunk.x2, compress_hyper_2_2_chunk.x3, compress_hyper_2_2_chunk.x4, compress_hyper_2_2_chunk.x5, compress_hyper_2_2_chunk._ts_meta_count, compress_hyper_2_2_chunk._ts_meta_sequence_num, compress_hyper_2_2_chunk._ts_meta_min_1, compress_hyper_2_2_chunk._ts_meta_max_1, compress_hyper_2_2_chunk._ts_meta_min_2, compress_hyper_2_2_chunk._ts_meta_max_2, compress_hyper_2_2_chunk._ts_meta_min_3, compress_hyper_2_2_chunk._ts_meta_max_3 + Filter: (compress_hyper_2_2_chunk._ts_meta_max_3 > 100) + Rows Removed by Filter: 3 +(12 rows) + +------ +-- Tests based on results +------ +-- Forward scan +SELECT * FROM test1 ORDER BY time DESC; + time | x1 | x2 | x3 | x4 | x5 +------------------------------+----+----+----+----+---- + Fri Dec 31 19:00:00 1999 PST | 1 | 2 | 4 | 4 | 0 + Fri Dec 31 18:00:00 1999 PST | 2 | 1 | 3 | 3 | 0 + Fri Dec 31 17:00:00 1999 PST | 1 | 3 | 2 | 2 | 0 + Fri Dec 31 16:00:00 1999 PST | 1 | 2 | 1 | 1 | 0 +(4 rows) + +-- Backward scan +SELECT * FROM test1 ORDER BY time ASC NULLS FIRST; + time | x1 | x2 | x3 | x4 | x5 +------------------------------+----+----+----+----+---- + Fri Dec 31 16:00:00 1999 PST | 1 | 2 | 1 | 1 | 0 + Fri Dec 31 17:00:00 1999 PST | 1 | 3 | 2 | 2 | 0 + Fri Dec 31 18:00:00 1999 PST | 2 | 1 | 3 | 3 | 0 + Fri Dec 31 19:00:00 1999 PST | 1 | 2 | 4 | 4 | 0 +(4 rows) + +-- Forward scan +SELECT * FROM test2 ORDER BY time ASC; + time | x1 | x2 | x3 | x4 | x5 +------------------------------+----+----+----+----+---- + Fri Dec 31 16:00:00 1999 PST | 1 | 2 | 1 | 1 | 0 + Fri Dec 31 17:00:00 1999 PST | 1 | 3 | 2 | 2 | 0 + Fri Dec 31 18:00:00 1999 PST | 2 | 1 | 3 | 3 | 0 + Fri Dec 31 19:00:00 1999 PST | 1 | 2 | 4 | 4 | 0 +(4 rows) + +-- Backward scan +SELECT * FROM test2 ORDER BY time DESC NULLS LAST; + time | x1 | x2 | x3 | x4 | x5 +------------------------------+----+----+----+----+---- + Fri Dec 31 19:00:00 1999 PST | 1 | 2 | 4 | 4 | 0 + Fri Dec 31 18:00:00 1999 PST | 2 | 1 | 3 | 3 | 0 + Fri Dec 31 17:00:00 1999 PST | 1 | 3 | 2 | 2 | 0 + Fri Dec 31 16:00:00 1999 PST | 1 | 2 | 1 | 1 | 0 +(4 rows) + +-- With selection on compressed column (value larger as max value for all batches, so no batch has to be opened) +SELECT * FROM test1 WHERE x4 > 100 ORDER BY time DESC; + time | x1 | x2 | x3 | x4 | x5 +------+----+----+----+----+---- +(0 rows) + +-- With selection on compressed column (value smaller as max value for some batches, so batches are opened and filter has to be applied) +SELECT * FROM test1 WHERE x4 > 2 ORDER BY time DESC; + time | x1 | x2 | x3 | x4 | x5 +------------------------------+----+----+----+----+---- + Fri Dec 31 19:00:00 1999 PST | 1 | 2 | 4 | 4 | 0 + Fri Dec 31 18:00:00 1999 PST | 2 | 1 | 3 | 3 | 0 +(2 rows) + +-- With selection on segment_by column +SELECT * FROM test1 WHERE time < '1980-01-01 00:00:00-00' ORDER BY time DESC; + time | x1 | x2 | x3 | x4 | x5 +------+----+----+----+----+---- +(0 rows) + +SELECT * FROM test1 WHERE time > '1980-01-01 00:00:00-00' ORDER BY time DESC; + time | x1 | x2 | x3 | x4 | x5 +------------------------------+----+----+----+----+---- + Fri Dec 31 19:00:00 1999 PST | 1 | 2 | 4 | 4 | 0 + Fri Dec 31 18:00:00 1999 PST | 2 | 1 | 3 | 3 | 0 + Fri Dec 31 17:00:00 1999 PST | 1 | 3 | 2 | 2 | 0 + Fri Dec 31 16:00:00 1999 PST | 1 | 2 | 1 | 1 | 0 +(4 rows) + +-- With selection on segment_by and compressed column +SELECT * FROM test1 WHERE time > '1980-01-01 00:00:00-00' ORDER BY time DESC; + time | x1 | x2 | x3 | x4 | x5 +------------------------------+----+----+----+----+---- + Fri Dec 31 19:00:00 1999 PST | 1 | 2 | 4 | 4 | 0 + Fri Dec 31 18:00:00 1999 PST | 2 | 1 | 3 | 3 | 0 + Fri Dec 31 17:00:00 1999 PST | 1 | 3 | 2 | 2 | 0 + Fri Dec 31 16:00:00 1999 PST | 1 | 2 | 1 | 1 | 0 +(4 rows) + +SELECT * FROM test1 WHERE time > '1980-01-01 00:00:00-00' AND x4 > 100 ORDER BY time DESC; + time | x1 | x2 | x3 | x4 | x5 +------+----+----+----+----+---- +(0 rows) + +-- Without projection +SELECT * FROM test1 ORDER BY time DESC; + time | x1 | x2 | x3 | x4 | x5 +------------------------------+----+----+----+----+---- + Fri Dec 31 19:00:00 1999 PST | 1 | 2 | 4 | 4 | 0 + Fri Dec 31 18:00:00 1999 PST | 2 | 1 | 3 | 3 | 0 + Fri Dec 31 17:00:00 1999 PST | 1 | 3 | 2 | 2 | 0 + Fri Dec 31 16:00:00 1999 PST | 1 | 2 | 1 | 1 | 0 +(4 rows) + +-- With projection on time +SELECT time FROM test1 ORDER BY time DESC; + time +------------------------------ + Fri Dec 31 19:00:00 1999 PST + Fri Dec 31 18:00:00 1999 PST + Fri Dec 31 17:00:00 1999 PST + Fri Dec 31 16:00:00 1999 PST +(4 rows) + +-- With projection on x3 +SELECT x3 FROM test1 ORDER BY time DESC; + x3 +---- + 4 + 3 + 2 + 1 +(4 rows) + +-- With projection on x3 and time +SELECT x3,time FROM test1 ORDER BY time DESC; + x3 | time +----+------------------------------ + 4 | Fri Dec 31 19:00:00 1999 PST + 3 | Fri Dec 31 18:00:00 1999 PST + 2 | Fri Dec 31 17:00:00 1999 PST + 1 | Fri Dec 31 16:00:00 1999 PST +(4 rows) + +-- With projection on time and x3 +SELECT time,x3 FROM test1 ORDER BY time DESC; + time | x3 +------------------------------+---- + Fri Dec 31 19:00:00 1999 PST | 4 + Fri Dec 31 18:00:00 1999 PST | 3 + Fri Dec 31 17:00:00 1999 PST | 2 + Fri Dec 31 16:00:00 1999 PST | 1 +(4 rows) + +-- Test with projection and constants +EXPLAIN (verbose) SELECT 1 as one, 2 as two, 3 as three, time, x2 FROM test1 ORDER BY time DESC; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Result (cost=1.06..38.26 rows=3000 width=24) + Output: 1, 2, 3, _hyper_1_1_chunk."time", _hyper_1_1_chunk.x2 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (cost=1.06..8.26 rows=3000 width=12) + Output: _hyper_1_1_chunk."time", _hyper_1_1_chunk.x2 + Sorted merge append: true + -> Sort (cost=0.00..0.00 rows=0 width=0) + Output: compress_hyper_2_2_chunk."time", compress_hyper_2_2_chunk.x1, compress_hyper_2_2_chunk.x2, compress_hyper_2_2_chunk.x3, compress_hyper_2_2_chunk.x4, compress_hyper_2_2_chunk.x5, compress_hyper_2_2_chunk._ts_meta_count, compress_hyper_2_2_chunk._ts_meta_sequence_num, compress_hyper_2_2_chunk._ts_meta_min_1, compress_hyper_2_2_chunk._ts_meta_max_1, compress_hyper_2_2_chunk._ts_meta_min_2, compress_hyper_2_2_chunk._ts_meta_max_2, compress_hyper_2_2_chunk._ts_meta_min_3, compress_hyper_2_2_chunk._ts_meta_max_3 + Sort Key: compress_hyper_2_2_chunk._ts_meta_max_1 DESC + -> Seq Scan on _timescaledb_internal.compress_hyper_2_2_chunk (cost=0.00..1.03 rows=3 width=56) + Output: compress_hyper_2_2_chunk."time", compress_hyper_2_2_chunk.x1, compress_hyper_2_2_chunk.x2, compress_hyper_2_2_chunk.x3, compress_hyper_2_2_chunk.x4, compress_hyper_2_2_chunk.x5, compress_hyper_2_2_chunk._ts_meta_count, compress_hyper_2_2_chunk._ts_meta_sequence_num, compress_hyper_2_2_chunk._ts_meta_min_1, compress_hyper_2_2_chunk._ts_meta_max_1, compress_hyper_2_2_chunk._ts_meta_min_2, compress_hyper_2_2_chunk._ts_meta_max_2, compress_hyper_2_2_chunk._ts_meta_min_3, compress_hyper_2_2_chunk._ts_meta_max_3 +(10 rows) + +SELECT 1 as one, 2 as two, 3 as three, time, x2 FROM test1 ORDER BY time DESC; + one | two | three | time | x2 +-----+-----+-------+------------------------------+---- + 1 | 2 | 3 | Fri Dec 31 19:00:00 1999 PST | 2 + 1 | 2 | 3 | Fri Dec 31 18:00:00 1999 PST | 1 + 1 | 2 | 3 | Fri Dec 31 17:00:00 1999 PST | 3 + 1 | 2 | 3 | Fri Dec 31 16:00:00 1999 PST | 2 +(4 rows) + +-- Test with projection and constants +EXPLAIN (verbose) SELECT 1 as one, 2 as two, 3 as three, x2, time FROM test1 ORDER BY time DESC; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Result (cost=1.06..38.26 rows=3000 width=24) + Output: 1, 2, 3, _hyper_1_1_chunk.x2, _hyper_1_1_chunk."time" + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (cost=1.06..8.26 rows=3000 width=12) + Output: _hyper_1_1_chunk.x2, _hyper_1_1_chunk."time" + Sorted merge append: true + -> Sort (cost=0.00..0.00 rows=0 width=0) + Output: compress_hyper_2_2_chunk."time", compress_hyper_2_2_chunk.x1, compress_hyper_2_2_chunk.x2, compress_hyper_2_2_chunk.x3, compress_hyper_2_2_chunk.x4, compress_hyper_2_2_chunk.x5, compress_hyper_2_2_chunk._ts_meta_count, compress_hyper_2_2_chunk._ts_meta_sequence_num, compress_hyper_2_2_chunk._ts_meta_min_1, compress_hyper_2_2_chunk._ts_meta_max_1, compress_hyper_2_2_chunk._ts_meta_min_2, compress_hyper_2_2_chunk._ts_meta_max_2, compress_hyper_2_2_chunk._ts_meta_min_3, compress_hyper_2_2_chunk._ts_meta_max_3 + Sort Key: compress_hyper_2_2_chunk._ts_meta_max_1 DESC + -> Seq Scan on _timescaledb_internal.compress_hyper_2_2_chunk (cost=0.00..1.03 rows=3 width=56) + Output: compress_hyper_2_2_chunk."time", compress_hyper_2_2_chunk.x1, compress_hyper_2_2_chunk.x2, compress_hyper_2_2_chunk.x3, compress_hyper_2_2_chunk.x4, compress_hyper_2_2_chunk.x5, compress_hyper_2_2_chunk._ts_meta_count, compress_hyper_2_2_chunk._ts_meta_sequence_num, compress_hyper_2_2_chunk._ts_meta_min_1, compress_hyper_2_2_chunk._ts_meta_max_1, compress_hyper_2_2_chunk._ts_meta_min_2, compress_hyper_2_2_chunk._ts_meta_max_2, compress_hyper_2_2_chunk._ts_meta_min_3, compress_hyper_2_2_chunk._ts_meta_max_3 +(10 rows) + +SELECT 1 as one, 2 as two, 3 as three, x2, time FROM test1 ORDER BY time DESC; + one | two | three | x2 | time +-----+-----+-------+----+------------------------------ + 1 | 2 | 3 | 2 | Fri Dec 31 19:00:00 1999 PST + 1 | 2 | 3 | 1 | Fri Dec 31 18:00:00 1999 PST + 1 | 2 | 3 | 3 | Fri Dec 31 17:00:00 1999 PST + 1 | 2 | 3 | 2 | Fri Dec 31 16:00:00 1999 PST +(4 rows) + +-- With projection and selection on compressed column (value smaller as max value for some batches, so batches are opened and filter has to be applied) +SELECT x4 FROM test1 WHERE x4 > 2 ORDER BY time DESC; + x4 +---- + 4 + 3 +(2 rows) + +-- Aggregation with count +SELECT count(*) FROM test1; + count +------- + 4 +(1 row) + +-- Test with default values +ALTER TABLE test1 ADD COLUMN c1 int; +ALTER TABLE test1 ADD COLUMN c2 int NOT NULL DEFAULT 42; +SELECT * FROM test1 ORDER BY time DESC; + time | x1 | x2 | x3 | x4 | x5 | c1 | c2 +------------------------------+----+----+----+----+----+----+---- + Fri Dec 31 19:00:00 1999 PST | 1 | 2 | 4 | 4 | 0 | | 42 + Fri Dec 31 18:00:00 1999 PST | 2 | 1 | 3 | 3 | 0 | | 42 + Fri Dec 31 17:00:00 1999 PST | 1 | 3 | 2 | 2 | 0 | | 42 + Fri Dec 31 16:00:00 1999 PST | 1 | 2 | 1 | 1 | 0 | | 42 +(4 rows) + +-- Recompress +SELECT decompress_chunk(i) FROM show_chunks('test1') i; + decompress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk +(1 row) + +SELECT compress_chunk(i) FROM show_chunks('test1') i; + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk +(1 row) + +ANALYZE test1; +-- Test with a changed physical layout +-- build_physical_tlist() can not be used for the scan on the compressed chunk anymore +SELECT * FROM test1 ORDER BY time DESC; + time | x1 | x2 | x3 | x4 | x5 | c1 | c2 +------------------------------+----+----+----+----+----+----+---- + Fri Dec 31 19:00:00 1999 PST | 1 | 2 | 4 | 4 | 0 | | 42 + Fri Dec 31 18:00:00 1999 PST | 2 | 1 | 3 | 3 | 0 | | 42 + Fri Dec 31 17:00:00 1999 PST | 1 | 3 | 2 | 2 | 0 | | 42 + Fri Dec 31 16:00:00 1999 PST | 1 | 2 | 1 | 1 | 0 | | 42 +(4 rows) + +ALTER TABLE test1 DROP COLUMN c2; +SELECT * FROM test1 ORDER BY time DESC; + time | x1 | x2 | x3 | x4 | x5 | c1 +------------------------------+----+----+----+----+----+---- + Fri Dec 31 19:00:00 1999 PST | 1 | 2 | 4 | 4 | 0 | + Fri Dec 31 18:00:00 1999 PST | 2 | 1 | 3 | 3 | 0 | + Fri Dec 31 17:00:00 1999 PST | 1 | 3 | 2 | 2 | 0 | + Fri Dec 31 16:00:00 1999 PST | 1 | 2 | 1 | 1 | 0 | +(4 rows) + +-- Test with a re-created column +ALTER TABLE test1 ADD COLUMN c2 int NOT NULL DEFAULT 43; +SELECT * FROM test1 ORDER BY time DESC; + time | x1 | x2 | x3 | x4 | x5 | c1 | c2 +------------------------------+----+----+----+----+----+----+---- + Fri Dec 31 19:00:00 1999 PST | 1 | 2 | 4 | 4 | 0 | | 43 + Fri Dec 31 18:00:00 1999 PST | 2 | 1 | 3 | 3 | 0 | | 43 + Fri Dec 31 17:00:00 1999 PST | 1 | 3 | 2 | 2 | 0 | | 43 + Fri Dec 31 16:00:00 1999 PST | 1 | 2 | 1 | 1 | 0 | | 43 +(4 rows) + +-- Test with the recreated column +:PREFIX +SELECT * FROM test1 ORDER BY time DESC; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (actual rows=4 loops=1) + Output: _hyper_1_1_chunk."time", _hyper_1_1_chunk.x1, _hyper_1_1_chunk.x2, _hyper_1_1_chunk.x3, _hyper_1_1_chunk.x4, _hyper_1_1_chunk.x5, _hyper_1_1_chunk.c1, _hyper_1_1_chunk.c2 + Sorted merge append: true + Bulk Decompression: false + -> Sort (actual rows=3 loops=1) + Output: compress_hyper_2_7_chunk."time", compress_hyper_2_7_chunk._ts_meta_min_1, compress_hyper_2_7_chunk._ts_meta_max_1, compress_hyper_2_7_chunk.x1, compress_hyper_2_7_chunk.x2, compress_hyper_2_7_chunk.x3, compress_hyper_2_7_chunk._ts_meta_min_2, compress_hyper_2_7_chunk._ts_meta_max_2, compress_hyper_2_7_chunk.x4, compress_hyper_2_7_chunk._ts_meta_min_3, compress_hyper_2_7_chunk._ts_meta_max_3, compress_hyper_2_7_chunk.x5, compress_hyper_2_7_chunk.c1, compress_hyper_2_7_chunk.c2, compress_hyper_2_7_chunk._ts_meta_count + Sort Key: compress_hyper_2_7_chunk._ts_meta_max_1 DESC + Sort Method: quicksort + -> Seq Scan on _timescaledb_internal.compress_hyper_2_7_chunk (actual rows=3 loops=1) + Output: compress_hyper_2_7_chunk."time", compress_hyper_2_7_chunk._ts_meta_min_1, compress_hyper_2_7_chunk._ts_meta_max_1, compress_hyper_2_7_chunk.x1, compress_hyper_2_7_chunk.x2, compress_hyper_2_7_chunk.x3, compress_hyper_2_7_chunk._ts_meta_min_2, compress_hyper_2_7_chunk._ts_meta_max_2, compress_hyper_2_7_chunk.x4, compress_hyper_2_7_chunk._ts_meta_min_3, compress_hyper_2_7_chunk._ts_meta_max_3, compress_hyper_2_7_chunk.x5, compress_hyper_2_7_chunk.c1, compress_hyper_2_7_chunk.c2, compress_hyper_2_7_chunk._ts_meta_count +(10 rows) + +SELECT * FROM test1 ORDER BY time DESC; + time | x1 | x2 | x3 | x4 | x5 | c1 | c2 +------------------------------+----+----+----+----+----+----+---- + Fri Dec 31 19:00:00 1999 PST | 1 | 2 | 4 | 4 | 0 | | 43 + Fri Dec 31 18:00:00 1999 PST | 2 | 1 | 3 | 3 | 0 | | 43 + Fri Dec 31 17:00:00 1999 PST | 1 | 3 | 2 | 2 | 0 | | 43 + Fri Dec 31 16:00:00 1999 PST | 1 | 2 | 1 | 1 | 0 | | 43 +(4 rows) + +-- Test with projection and recreated column +:PREFIX +SELECT time, x2, x1, c2 FROM test1 ORDER BY time DESC; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (actual rows=4 loops=1) + Output: _hyper_1_1_chunk."time", _hyper_1_1_chunk.x2, _hyper_1_1_chunk.x1, _hyper_1_1_chunk.c2 + Sorted merge append: true + Bulk Decompression: false + -> Sort (actual rows=3 loops=1) + Output: compress_hyper_2_7_chunk."time", compress_hyper_2_7_chunk._ts_meta_min_1, compress_hyper_2_7_chunk._ts_meta_max_1, compress_hyper_2_7_chunk.x2, compress_hyper_2_7_chunk.x1, compress_hyper_2_7_chunk.c2, compress_hyper_2_7_chunk._ts_meta_count + Sort Key: compress_hyper_2_7_chunk._ts_meta_max_1 DESC + Sort Method: quicksort + -> Seq Scan on _timescaledb_internal.compress_hyper_2_7_chunk (actual rows=3 loops=1) + Output: compress_hyper_2_7_chunk."time", compress_hyper_2_7_chunk._ts_meta_min_1, compress_hyper_2_7_chunk._ts_meta_max_1, compress_hyper_2_7_chunk.x2, compress_hyper_2_7_chunk.x1, compress_hyper_2_7_chunk.c2, compress_hyper_2_7_chunk._ts_meta_count +(10 rows) + +SELECT time, x2, x1, c2 FROM test1 ORDER BY time DESC; + time | x2 | x1 | c2 +------------------------------+----+----+---- + Fri Dec 31 19:00:00 1999 PST | 2 | 1 | 43 + Fri Dec 31 18:00:00 1999 PST | 1 | 2 | 43 + Fri Dec 31 17:00:00 1999 PST | 3 | 1 | 43 + Fri Dec 31 16:00:00 1999 PST | 2 | 1 | 43 +(4 rows) + +-- Test with projection and recreated column +:PREFIX +SELECT x2, x1, c2, time FROM test1 ORDER BY time DESC; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (actual rows=4 loops=1) + Output: _hyper_1_1_chunk.x2, _hyper_1_1_chunk.x1, _hyper_1_1_chunk.c2, _hyper_1_1_chunk."time" + Sorted merge append: true + Bulk Decompression: false + -> Sort (actual rows=3 loops=1) + Output: compress_hyper_2_7_chunk.x2, compress_hyper_2_7_chunk.x1, compress_hyper_2_7_chunk.c2, compress_hyper_2_7_chunk."time", compress_hyper_2_7_chunk._ts_meta_min_1, compress_hyper_2_7_chunk._ts_meta_max_1, compress_hyper_2_7_chunk._ts_meta_count + Sort Key: compress_hyper_2_7_chunk._ts_meta_max_1 DESC + Sort Method: quicksort + -> Seq Scan on _timescaledb_internal.compress_hyper_2_7_chunk (actual rows=3 loops=1) + Output: compress_hyper_2_7_chunk.x2, compress_hyper_2_7_chunk.x1, compress_hyper_2_7_chunk.c2, compress_hyper_2_7_chunk."time", compress_hyper_2_7_chunk._ts_meta_min_1, compress_hyper_2_7_chunk._ts_meta_max_1, compress_hyper_2_7_chunk._ts_meta_count +(10 rows) + +SELECT x2, x1, c2, time FROM test1 ORDER BY time DESC; + x2 | x1 | c2 | time +----+----+----+------------------------------ + 2 | 1 | 43 | Fri Dec 31 19:00:00 1999 PST + 1 | 2 | 43 | Fri Dec 31 18:00:00 1999 PST + 3 | 1 | 43 | Fri Dec 31 17:00:00 1999 PST + 2 | 1 | 43 | Fri Dec 31 16:00:00 1999 PST +(4 rows) + +-- Test with projection, constants and recreated column +:PREFIX +SELECT 1 as one, 2 as two, 3 as three, x2, x1, c2, time FROM test1 ORDER BY time DESC; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=4 loops=1) + Output: 1, 2, 3, _hyper_1_1_chunk.x2, _hyper_1_1_chunk.x1, _hyper_1_1_chunk.c2, _hyper_1_1_chunk."time" + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (actual rows=4 loops=1) + Output: _hyper_1_1_chunk.x2, _hyper_1_1_chunk.x1, _hyper_1_1_chunk.c2, _hyper_1_1_chunk."time" + Sorted merge append: true + Bulk Decompression: false + -> Sort (actual rows=3 loops=1) + Output: compress_hyper_2_7_chunk.x2, compress_hyper_2_7_chunk.x1, compress_hyper_2_7_chunk.c2, compress_hyper_2_7_chunk."time", compress_hyper_2_7_chunk._ts_meta_min_1, compress_hyper_2_7_chunk._ts_meta_max_1, compress_hyper_2_7_chunk._ts_meta_count + Sort Key: compress_hyper_2_7_chunk._ts_meta_max_1 DESC + Sort Method: quicksort + -> Seq Scan on _timescaledb_internal.compress_hyper_2_7_chunk (actual rows=3 loops=1) + Output: compress_hyper_2_7_chunk.x2, compress_hyper_2_7_chunk.x1, compress_hyper_2_7_chunk.c2, compress_hyper_2_7_chunk."time", compress_hyper_2_7_chunk._ts_meta_min_1, compress_hyper_2_7_chunk._ts_meta_max_1, compress_hyper_2_7_chunk._ts_meta_count +(12 rows) + +SELECT 1 as one, 2 as two, 3 as three, x2, x1, c2, time FROM test1 ORDER BY time DESC; + one | two | three | x2 | x1 | c2 | time +-----+-----+-------+----+----+----+------------------------------ + 1 | 2 | 3 | 2 | 1 | 43 | Fri Dec 31 19:00:00 1999 PST + 1 | 2 | 3 | 1 | 2 | 43 | Fri Dec 31 18:00:00 1999 PST + 1 | 2 | 3 | 3 | 1 | 43 | Fri Dec 31 17:00:00 1999 PST + 1 | 2 | 3 | 2 | 1 | 43 | Fri Dec 31 16:00:00 1999 PST +(4 rows) + +-- Test with null values +SELECT * FROM test_with_defined_null ORDER BY x2 ASC NULLS FIRST; + time | x1 | x2 | x3 +------------------------------+----+----+---- + Sat Jan 01 00:00:00 2000 PST | 2 | | + Sat Jan 01 00:00:00 2000 PST | 1 | | + Sat Jan 01 00:00:00 2000 PST | 1 | 1 | + Sat Jan 01 00:00:00 2000 PST | 1 | 2 | +(4 rows) + +SELECT * FROM test_with_defined_null ORDER BY x2 DESC NULLS LAST; + time | x1 | x2 | x3 +------------------------------+----+----+---- + Sat Jan 01 00:00:00 2000 PST | 1 | 2 | + Sat Jan 01 00:00:00 2000 PST | 1 | 1 | + Sat Jan 01 00:00:00 2000 PST | 1 | | + Sat Jan 01 00:00:00 2000 PST | 2 | | +(4 rows) + +SELECT * FROM test_with_defined_null ORDER BY x2 ASC NULLS LAST; + time | x1 | x2 | x3 +------------------------------+----+----+---- + Sat Jan 01 00:00:00 2000 PST | 1 | 1 | + Sat Jan 01 00:00:00 2000 PST | 1 | 2 | + Sat Jan 01 00:00:00 2000 PST | 1 | | + Sat Jan 01 00:00:00 2000 PST | 2 | | +(4 rows) + +SELECT * FROM test_with_defined_null ORDER BY x2 DESC NULLS FIRST; + time | x1 | x2 | x3 +------------------------------+----+----+---- + Sat Jan 01 00:00:00 2000 PST | 1 | | + Sat Jan 01 00:00:00 2000 PST | 2 | | + Sat Jan 01 00:00:00 2000 PST | 1 | 2 | + Sat Jan 01 00:00:00 2000 PST | 1 | 1 | +(4 rows) + +------ +-- Tests based on compressed chunk state +------ +-- Should be optimized +:PREFIX +SELECT * FROM test1 ORDER BY time ASC NULLS LAST; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (actual rows=4 loops=1) + Output: _hyper_1_1_chunk."time", _hyper_1_1_chunk.x1, _hyper_1_1_chunk.x2, _hyper_1_1_chunk.x3, _hyper_1_1_chunk.x4, _hyper_1_1_chunk.x5, _hyper_1_1_chunk.c1, _hyper_1_1_chunk.c2 + Sorted merge append: true + Bulk Decompression: false + -> Sort (actual rows=3 loops=1) + Output: compress_hyper_2_7_chunk."time", compress_hyper_2_7_chunk._ts_meta_min_1, compress_hyper_2_7_chunk._ts_meta_max_1, compress_hyper_2_7_chunk.x1, compress_hyper_2_7_chunk.x2, compress_hyper_2_7_chunk.x3, compress_hyper_2_7_chunk._ts_meta_min_2, compress_hyper_2_7_chunk._ts_meta_max_2, compress_hyper_2_7_chunk.x4, compress_hyper_2_7_chunk._ts_meta_min_3, compress_hyper_2_7_chunk._ts_meta_max_3, compress_hyper_2_7_chunk.x5, compress_hyper_2_7_chunk.c1, compress_hyper_2_7_chunk.c2, compress_hyper_2_7_chunk._ts_meta_count + Sort Key: compress_hyper_2_7_chunk._ts_meta_min_1 + Sort Method: quicksort + -> Seq Scan on _timescaledb_internal.compress_hyper_2_7_chunk (actual rows=3 loops=1) + Output: compress_hyper_2_7_chunk."time", compress_hyper_2_7_chunk._ts_meta_min_1, compress_hyper_2_7_chunk._ts_meta_max_1, compress_hyper_2_7_chunk.x1, compress_hyper_2_7_chunk.x2, compress_hyper_2_7_chunk.x3, compress_hyper_2_7_chunk._ts_meta_min_2, compress_hyper_2_7_chunk._ts_meta_max_2, compress_hyper_2_7_chunk.x4, compress_hyper_2_7_chunk._ts_meta_min_3, compress_hyper_2_7_chunk._ts_meta_max_3, compress_hyper_2_7_chunk.x5, compress_hyper_2_7_chunk.c1, compress_hyper_2_7_chunk.c2, compress_hyper_2_7_chunk._ts_meta_count +(10 rows) + +BEGIN TRANSACTION; +INSERT INTO test1 (time, x1, x2, x3, x4, x5) values('2000-01-01 02:01:00-00', 10, 20, 30, 40, 50); +-- Should be optimized using a merge append path between the compressed and uncompressed part of the chunk +:PREFIX +SELECT * FROM test1 ORDER BY time ASC NULLS LAST; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on public.test1 (actual rows=5 loops=1) + Output: test1."time", test1.x1, test1.x2, test1.x3, test1.x4, test1.x5, test1.c1, test1.c2 + Order: test1."time" + Startup Exclusion: false + Runtime Exclusion: false + -> Merge Append (actual rows=5 loops=1) + Sort Key: _hyper_1_1_chunk."time" + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (actual rows=4 loops=1) + Output: _hyper_1_1_chunk."time", _hyper_1_1_chunk.x1, _hyper_1_1_chunk.x2, _hyper_1_1_chunk.x3, _hyper_1_1_chunk.x4, _hyper_1_1_chunk.x5, _hyper_1_1_chunk.c1, _hyper_1_1_chunk.c2 + Sorted merge append: true + Bulk Decompression: false + -> Sort (actual rows=3 loops=1) + Output: compress_hyper_2_7_chunk."time", compress_hyper_2_7_chunk._ts_meta_min_1, compress_hyper_2_7_chunk._ts_meta_max_1, compress_hyper_2_7_chunk.x1, compress_hyper_2_7_chunk.x2, compress_hyper_2_7_chunk.x3, compress_hyper_2_7_chunk._ts_meta_min_2, compress_hyper_2_7_chunk._ts_meta_max_2, compress_hyper_2_7_chunk.x4, compress_hyper_2_7_chunk._ts_meta_min_3, compress_hyper_2_7_chunk._ts_meta_max_3, compress_hyper_2_7_chunk.x5, compress_hyper_2_7_chunk.c1, compress_hyper_2_7_chunk.c2, compress_hyper_2_7_chunk._ts_meta_count + Sort Key: compress_hyper_2_7_chunk._ts_meta_min_1 + Sort Method: quicksort + -> Seq Scan on _timescaledb_internal.compress_hyper_2_7_chunk (actual rows=3 loops=1) + Output: compress_hyper_2_7_chunk."time", compress_hyper_2_7_chunk._ts_meta_min_1, compress_hyper_2_7_chunk._ts_meta_max_1, compress_hyper_2_7_chunk.x1, compress_hyper_2_7_chunk.x2, compress_hyper_2_7_chunk.x3, compress_hyper_2_7_chunk._ts_meta_min_2, compress_hyper_2_7_chunk._ts_meta_max_2, compress_hyper_2_7_chunk.x4, compress_hyper_2_7_chunk._ts_meta_min_3, compress_hyper_2_7_chunk._ts_meta_max_3, compress_hyper_2_7_chunk.x5, compress_hyper_2_7_chunk.c1, compress_hyper_2_7_chunk.c2, compress_hyper_2_7_chunk._ts_meta_count + -> Sort (actual rows=1 loops=1) + Output: _hyper_1_1_chunk."time", _hyper_1_1_chunk.x1, _hyper_1_1_chunk.x2, _hyper_1_1_chunk.x3, _hyper_1_1_chunk.x4, _hyper_1_1_chunk.x5, _hyper_1_1_chunk.c1, _hyper_1_1_chunk.c2 + Sort Key: _hyper_1_1_chunk."time" + Sort Method: quicksort + -> Seq Scan on _timescaledb_internal._hyper_1_1_chunk (actual rows=1 loops=1) + Output: _hyper_1_1_chunk."time", _hyper_1_1_chunk.x1, _hyper_1_1_chunk.x2, _hyper_1_1_chunk.x3, _hyper_1_1_chunk.x4, _hyper_1_1_chunk.x5, _hyper_1_1_chunk.c1, _hyper_1_1_chunk.c2 +(23 rows) + +-- The inserted value should be visible +SELECT * FROM test1 ORDER BY time ASC NULLS LAST; + time | x1 | x2 | x3 | x4 | x5 | c1 | c2 +------------------------------+----+----+----+----+----+----+---- + Fri Dec 31 16:00:00 1999 PST | 1 | 2 | 1 | 1 | 0 | | 43 + Fri Dec 31 17:00:00 1999 PST | 1 | 3 | 2 | 2 | 0 | | 43 + Fri Dec 31 18:00:00 1999 PST | 2 | 1 | 3 | 3 | 0 | | 43 + Fri Dec 31 18:01:00 1999 PST | 10 | 20 | 30 | 40 | 50 | | 43 + Fri Dec 31 19:00:00 1999 PST | 1 | 2 | 4 | 4 | 0 | | 43 +(5 rows) + +ROLLBACK; +------ +-- Tests on a larger relation +------ +CREATE TABLE sensor_data ( +time timestamptz NOT NULL, +sensor_id integer NOT NULL, +cpu double precision NULL, +temperature double precision NULL); +SELECT FROM create_hypertable('sensor_data','time'); +-- +(1 row) + +INSERT INTO sensor_data +SELECT +time + (INTERVAL '1 minute' * random()) AS time, +sensor_id, +random() AS cpu, +random() * 100 AS temperature +FROM +generate_series('1980-01-01 00:00:00-00', '1980-02-01 00:00:00-00', INTERVAL '10 minute') AS g1(time), +generate_series(1, 100, 1 ) AS g2(sensor_id) +ORDER BY +time; +ALTER TABLE sensor_data SET (timescaledb.compress, timescaledb.compress_segmentby='sensor_id', timescaledb.compress_orderby = 'time DESC'); +SELECT add_compression_policy('sensor_data','1 minute'::INTERVAL); + add_compression_policy +------------------------ + 1000 +(1 row) + +SELECT compress_chunk(i) FROM show_chunks('sensor_data') i; + compress_chunk +----------------------------------------- + _timescaledb_internal._hyper_7_8_chunk + _timescaledb_internal._hyper_7_9_chunk + _timescaledb_internal._hyper_7_10_chunk + _timescaledb_internal._hyper_7_11_chunk + _timescaledb_internal._hyper_7_12_chunk + _timescaledb_internal._hyper_7_13_chunk +(6 rows) + +-- Ensure the optimization is used for queries on this table +:PREFIX +SELECT * FROM sensor_data ORDER BY time DESC LIMIT 1; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=1 loops=1) + Output: sensor_data."time", sensor_data.sensor_id, sensor_data.cpu, sensor_data.temperature + -> Custom Scan (ChunkAppend) on public.sensor_data (actual rows=1 loops=1) + Output: sensor_data."time", sensor_data.sensor_id, sensor_data.cpu, sensor_data.temperature + Order: sensor_data."time" DESC + Startup Exclusion: false + Runtime Exclusion: false + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_7_13_chunk (actual rows=1 loops=1) + Output: _hyper_7_13_chunk."time", _hyper_7_13_chunk.sensor_id, _hyper_7_13_chunk.cpu, _hyper_7_13_chunk.temperature + Sorted merge append: true + Bulk Decompression: false + -> Sort (actual rows=2 loops=1) + Output: compress_hyper_8_19_chunk."time", compress_hyper_8_19_chunk.sensor_id, compress_hyper_8_19_chunk.cpu, compress_hyper_8_19_chunk.temperature, compress_hyper_8_19_chunk._ts_meta_count, compress_hyper_8_19_chunk._ts_meta_sequence_num, compress_hyper_8_19_chunk._ts_meta_min_1, compress_hyper_8_19_chunk._ts_meta_max_1 + Sort Key: compress_hyper_8_19_chunk._ts_meta_max_1 DESC + Sort Method: quicksort + -> Seq Scan on _timescaledb_internal.compress_hyper_8_19_chunk (actual rows=100 loops=1) + Output: compress_hyper_8_19_chunk."time", compress_hyper_8_19_chunk.sensor_id, compress_hyper_8_19_chunk.cpu, compress_hyper_8_19_chunk.temperature, compress_hyper_8_19_chunk._ts_meta_count, compress_hyper_8_19_chunk._ts_meta_sequence_num, compress_hyper_8_19_chunk._ts_meta_min_1, compress_hyper_8_19_chunk._ts_meta_max_1 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_7_12_chunk (never executed) + Output: _hyper_7_12_chunk."time", _hyper_7_12_chunk.sensor_id, _hyper_7_12_chunk.cpu, _hyper_7_12_chunk.temperature + Sorted merge append: true + Bulk Decompression: false + -> Sort (never executed) + Output: compress_hyper_8_18_chunk."time", compress_hyper_8_18_chunk.sensor_id, compress_hyper_8_18_chunk.cpu, compress_hyper_8_18_chunk.temperature, compress_hyper_8_18_chunk._ts_meta_count, compress_hyper_8_18_chunk._ts_meta_sequence_num, compress_hyper_8_18_chunk._ts_meta_min_1, compress_hyper_8_18_chunk._ts_meta_max_1 + Sort Key: compress_hyper_8_18_chunk._ts_meta_max_1 DESC + -> Seq Scan on _timescaledb_internal.compress_hyper_8_18_chunk (never executed) + Output: compress_hyper_8_18_chunk."time", compress_hyper_8_18_chunk.sensor_id, compress_hyper_8_18_chunk.cpu, compress_hyper_8_18_chunk.temperature, compress_hyper_8_18_chunk._ts_meta_count, compress_hyper_8_18_chunk._ts_meta_sequence_num, compress_hyper_8_18_chunk._ts_meta_min_1, compress_hyper_8_18_chunk._ts_meta_max_1 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_7_11_chunk (never executed) + Output: _hyper_7_11_chunk."time", _hyper_7_11_chunk.sensor_id, _hyper_7_11_chunk.cpu, _hyper_7_11_chunk.temperature + Sorted merge append: true + Bulk Decompression: false + -> Sort (never executed) + Output: compress_hyper_8_17_chunk."time", compress_hyper_8_17_chunk.sensor_id, compress_hyper_8_17_chunk.cpu, compress_hyper_8_17_chunk.temperature, compress_hyper_8_17_chunk._ts_meta_count, compress_hyper_8_17_chunk._ts_meta_sequence_num, compress_hyper_8_17_chunk._ts_meta_min_1, compress_hyper_8_17_chunk._ts_meta_max_1 + Sort Key: compress_hyper_8_17_chunk._ts_meta_max_1 DESC + -> Seq Scan on _timescaledb_internal.compress_hyper_8_17_chunk (never executed) + Output: compress_hyper_8_17_chunk."time", compress_hyper_8_17_chunk.sensor_id, compress_hyper_8_17_chunk.cpu, compress_hyper_8_17_chunk.temperature, compress_hyper_8_17_chunk._ts_meta_count, compress_hyper_8_17_chunk._ts_meta_sequence_num, compress_hyper_8_17_chunk._ts_meta_min_1, compress_hyper_8_17_chunk._ts_meta_max_1 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_7_10_chunk (never executed) + Output: _hyper_7_10_chunk."time", _hyper_7_10_chunk.sensor_id, _hyper_7_10_chunk.cpu, _hyper_7_10_chunk.temperature + Sorted merge append: true + Bulk Decompression: false + -> Sort (never executed) + Output: compress_hyper_8_16_chunk."time", compress_hyper_8_16_chunk.sensor_id, compress_hyper_8_16_chunk.cpu, compress_hyper_8_16_chunk.temperature, compress_hyper_8_16_chunk._ts_meta_count, compress_hyper_8_16_chunk._ts_meta_sequence_num, compress_hyper_8_16_chunk._ts_meta_min_1, compress_hyper_8_16_chunk._ts_meta_max_1 + Sort Key: compress_hyper_8_16_chunk._ts_meta_max_1 DESC + -> Seq Scan on _timescaledb_internal.compress_hyper_8_16_chunk (never executed) + Output: compress_hyper_8_16_chunk."time", compress_hyper_8_16_chunk.sensor_id, compress_hyper_8_16_chunk.cpu, compress_hyper_8_16_chunk.temperature, compress_hyper_8_16_chunk._ts_meta_count, compress_hyper_8_16_chunk._ts_meta_sequence_num, compress_hyper_8_16_chunk._ts_meta_min_1, compress_hyper_8_16_chunk._ts_meta_max_1 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_7_9_chunk (never executed) + Output: _hyper_7_9_chunk."time", _hyper_7_9_chunk.sensor_id, _hyper_7_9_chunk.cpu, _hyper_7_9_chunk.temperature + Sorted merge append: true + Bulk Decompression: false + -> Sort (never executed) + Output: compress_hyper_8_15_chunk."time", compress_hyper_8_15_chunk.sensor_id, compress_hyper_8_15_chunk.cpu, compress_hyper_8_15_chunk.temperature, compress_hyper_8_15_chunk._ts_meta_count, compress_hyper_8_15_chunk._ts_meta_sequence_num, compress_hyper_8_15_chunk._ts_meta_min_1, compress_hyper_8_15_chunk._ts_meta_max_1 + Sort Key: compress_hyper_8_15_chunk._ts_meta_max_1 DESC + -> Seq Scan on _timescaledb_internal.compress_hyper_8_15_chunk (never executed) + Output: compress_hyper_8_15_chunk."time", compress_hyper_8_15_chunk.sensor_id, compress_hyper_8_15_chunk.cpu, compress_hyper_8_15_chunk.temperature, compress_hyper_8_15_chunk._ts_meta_count, compress_hyper_8_15_chunk._ts_meta_sequence_num, compress_hyper_8_15_chunk._ts_meta_min_1, compress_hyper_8_15_chunk._ts_meta_max_1 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_7_8_chunk (never executed) + Output: _hyper_7_8_chunk."time", _hyper_7_8_chunk.sensor_id, _hyper_7_8_chunk.cpu, _hyper_7_8_chunk.temperature + Sorted merge append: true + Bulk Decompression: false + -> Sort (never executed) + Output: compress_hyper_8_14_chunk."time", compress_hyper_8_14_chunk.sensor_id, compress_hyper_8_14_chunk.cpu, compress_hyper_8_14_chunk.temperature, compress_hyper_8_14_chunk._ts_meta_count, compress_hyper_8_14_chunk._ts_meta_sequence_num, compress_hyper_8_14_chunk._ts_meta_min_1, compress_hyper_8_14_chunk._ts_meta_max_1 + Sort Key: compress_hyper_8_14_chunk._ts_meta_max_1 DESC + -> Seq Scan on _timescaledb_internal.compress_hyper_8_14_chunk (never executed) + Output: compress_hyper_8_14_chunk."time", compress_hyper_8_14_chunk.sensor_id, compress_hyper_8_14_chunk.cpu, compress_hyper_8_14_chunk.temperature, compress_hyper_8_14_chunk._ts_meta_count, compress_hyper_8_14_chunk._ts_meta_sequence_num, compress_hyper_8_14_chunk._ts_meta_min_1, compress_hyper_8_14_chunk._ts_meta_max_1 +(62 rows) + +-- Verify that we produce the same order without and with the optimization +CREATE PROCEDURE order_test(query text) LANGUAGE plpgsql AS $$ + DECLARE + count integer; + BEGIN + + SET timescaledb.enable_decompression_sorted_merge = 0; + EXECUTE format('CREATE TABLE temp_data1 AS %s;', query); + ALTER TABLE temp_data1 ADD COLUMN new_id SERIAL PRIMARY KEY; + + SET timescaledb.enable_decompression_sorted_merge = 1; + EXECUTE format('CREATE TABLE temp_data2 AS %s;', query); + ALTER TABLE temp_data2 ADD COLUMN new_id SERIAL PRIMARY KEY; + + CREATE TEMP TABLE temp_data3 AS ( + SELECT * FROM temp_data1 UNION ALL SELECT * FROM temp_data2 + ); + + count := (SELECT COUNT(*) FROM (SELECT COUNT(*) FROM temp_data3 GROUP BY time, new_id HAVING COUNT(*) != 2) AS s); + + IF count > 0 THEN + RAISE EXCEPTION 'Detected different order with and without the optimization %', count; + END IF; + + -- Drop old tables + DROP TABLE temp_data1; + DROP TABLE temp_data2; + DROP TABLE temp_data3; + + END; +$$; +CALL order_test('SELECT * FROM sensor_data ORDER BY time DESC'); +CALL order_test('SELECT * FROM sensor_data ORDER BY time DESC LIMIT 100'); +CALL order_test('SELECT * FROM sensor_data ORDER BY time ASC NULLS FIRST'); +CALL order_test('SELECT * FROM sensor_data ORDER BY time ASC NULLS FIRST LIMIT 100'); +CALL order_test('SELECT * FROM test1 ORDER BY time DESC'); +CALL order_test('SELECT * FROM test1 ORDER BY time ASC NULLS LAST'); +------ +-- Test window functions +------ +CREATE TABLE insert_test(id INT); +INSERT INTO insert_test SELECT time_bucket_gapfill(1,time,1,5) FROM (VALUES (1),(2)) v(time) GROUP BY 1 ORDER BY 1; +SELECT * FROM insert_test AS ref_0 +WHERE EXISTS ( + SELECT + sum(ref_0.id) OVER (partition by ref_0.id ORDER BY ref_0.id,ref_0.id,sample_0.time) + FROM + sensor_data AS sample_0 + WHERE (1 > sample_0.temperature) +); + id +---- + 1 + 2 + 3 + 4 +(4 rows) + +------ +-- Test enabling and disabling the optimization based on costs +------ +CREATE TABLE test_costs ( +time timestamptz NOT NULL, +segment_by integer NOT NULL, +x1 integer NOT NULL); +SELECT FROM create_hypertable('test_costs', 'time'); +-- +(1 row) + +ALTER TABLE test_costs SET (timescaledb.compress, timescaledb.compress_segmentby='segment_by', timescaledb.compress_orderby = 'time DESC, x1'); +-- Create 100 segments +INSERT INTO test_costs +SELECT +'2000-01-01 02:01:00-00'::timestamptz AS time, +segment_by, +random() as x1 +FROM +generate_series(1, 100, 1) AS g2(segment_by) +ORDER BY time; +SELECT add_compression_policy('test_costs','1 minute'::INTERVAL); + add_compression_policy +------------------------ + 1001 +(1 row) + +SELECT compress_chunk(i) FROM show_chunks('test_costs') i; + compress_chunk +----------------------------------------- + _timescaledb_internal._hyper_9_20_chunk +(1 row) + +ANALYZE test_costs; +-- Number of segments +SELECT count(*) FROM (SELECT segment_by from test_costs group by segment_by) AS s; + count +------- + 100 +(1 row) + +-- Test query plan (should be optimized due to 100 different segments) +:PREFIX +SELECT time, segment_by, x1 FROM test_costs ORDER BY time DESC; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_9_20_chunk (actual rows=100 loops=1) + Output: _hyper_9_20_chunk."time", _hyper_9_20_chunk.segment_by, _hyper_9_20_chunk.x1 + Sorted merge append: true + Bulk Decompression: false + -> Sort (actual rows=100 loops=1) + Output: compress_hyper_10_21_chunk."time", compress_hyper_10_21_chunk.segment_by, compress_hyper_10_21_chunk.x1, compress_hyper_10_21_chunk._ts_meta_count, compress_hyper_10_21_chunk._ts_meta_sequence_num, compress_hyper_10_21_chunk._ts_meta_min_1, compress_hyper_10_21_chunk._ts_meta_max_1, compress_hyper_10_21_chunk._ts_meta_min_2, compress_hyper_10_21_chunk._ts_meta_max_2 + Sort Key: compress_hyper_10_21_chunk._ts_meta_max_1 DESC + Sort Method: quicksort + -> Seq Scan on _timescaledb_internal.compress_hyper_10_21_chunk (actual rows=100 loops=1) + Output: compress_hyper_10_21_chunk."time", compress_hyper_10_21_chunk.segment_by, compress_hyper_10_21_chunk.x1, compress_hyper_10_21_chunk._ts_meta_count, compress_hyper_10_21_chunk._ts_meta_sequence_num, compress_hyper_10_21_chunk._ts_meta_min_1, compress_hyper_10_21_chunk._ts_meta_max_1, compress_hyper_10_21_chunk._ts_meta_min_2, compress_hyper_10_21_chunk._ts_meta_max_2 +(10 rows) + +-- Decompress chunk +SELECT decompress_chunk(i) FROM show_chunks('test_costs') i; + decompress_chunk +----------------------------------------- + _timescaledb_internal._hyper_9_20_chunk +(1 row) + +-- Add 900 segments (1000 segments total) +INSERT INTO test_costs +SELECT +'2000-01-01 02:01:00-00'::timestamptz AS time, +segment_by, +random() as x1 +FROM +generate_series(100, 1000, 1) AS g2(segment_by) +ORDER BY time; +-- Recompress chunk +SELECT compress_chunk(i) FROM show_chunks('test_costs') i; + compress_chunk +----------------------------------------- + _timescaledb_internal._hyper_9_20_chunk +(1 row) + +ANALYZE test_costs; +-- Number of segments +SELECT count(*) FROM (SELECT segment_by from test_costs group by segment_by) AS s; + count +------- + 1000 +(1 row) + +-- Test query plan (should not be optimized due to 1000 different segments) +:PREFIX +SELECT time, segment_by, x1 FROM test_costs ORDER BY time DESC; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort (actual rows=1001 loops=1) + Output: _hyper_9_20_chunk."time", _hyper_9_20_chunk.segment_by, _hyper_9_20_chunk.x1 + Sort Key: _hyper_9_20_chunk."time" DESC + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_9_20_chunk (actual rows=1001 loops=1) + Output: _hyper_9_20_chunk."time", _hyper_9_20_chunk.segment_by, _hyper_9_20_chunk.x1 + Bulk Decompression: true + -> Seq Scan on _timescaledb_internal.compress_hyper_10_22_chunk (actual rows=1000 loops=1) + Output: compress_hyper_10_22_chunk."time", compress_hyper_10_22_chunk.segment_by, compress_hyper_10_22_chunk.x1, compress_hyper_10_22_chunk._ts_meta_count, compress_hyper_10_22_chunk._ts_meta_sequence_num, compress_hyper_10_22_chunk._ts_meta_min_1, compress_hyper_10_22_chunk._ts_meta_max_1, compress_hyper_10_22_chunk._ts_meta_min_2, compress_hyper_10_22_chunk._ts_meta_max_2 +(9 rows) + +-- Test query plan with predicate (query should be optimized due to ~100 segments) +:PREFIX +SELECT time, segment_by, x1 FROM test_costs WHERE segment_by > 900 and segment_by < 999 ORDER BY time DESC; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_9_20_chunk (actual rows=98 loops=1) + Output: _hyper_9_20_chunk."time", _hyper_9_20_chunk.segment_by, _hyper_9_20_chunk.x1 + Sorted merge append: true + Bulk Decompression: false + -> Sort (actual rows=98 loops=1) + Output: compress_hyper_10_22_chunk."time", compress_hyper_10_22_chunk.segment_by, compress_hyper_10_22_chunk.x1, compress_hyper_10_22_chunk._ts_meta_count, compress_hyper_10_22_chunk._ts_meta_sequence_num, compress_hyper_10_22_chunk._ts_meta_min_1, compress_hyper_10_22_chunk._ts_meta_max_1, compress_hyper_10_22_chunk._ts_meta_min_2, compress_hyper_10_22_chunk._ts_meta_max_2 + Sort Key: compress_hyper_10_22_chunk._ts_meta_max_1 DESC + Sort Method: quicksort + -> Index Scan using compress_hyper_10_22_chunk__compressed_hypertable_10_segment_by on _timescaledb_internal.compress_hyper_10_22_chunk (actual rows=98 loops=1) + Output: compress_hyper_10_22_chunk."time", compress_hyper_10_22_chunk.segment_by, compress_hyper_10_22_chunk.x1, compress_hyper_10_22_chunk._ts_meta_count, compress_hyper_10_22_chunk._ts_meta_sequence_num, compress_hyper_10_22_chunk._ts_meta_min_1, compress_hyper_10_22_chunk._ts_meta_max_1, compress_hyper_10_22_chunk._ts_meta_min_2, compress_hyper_10_22_chunk._ts_meta_max_2 + Index Cond: ((compress_hyper_10_22_chunk.segment_by > 900) AND (compress_hyper_10_22_chunk.segment_by < 999)) +(11 rows) + +-- Target list creation - Issue 5738 +CREATE TABLE bugtab( + time timestamp without time zone, + hin character varying(128) NOT NULL, + model character varying(128) NOT NULL, + block character varying(128) NOT NULL, + message_name character varying(128) NOT NULL, + signal_name character varying(128) NOT NULL, + signal_numeric_value double precision, + signal_string_value character varying(128) +); +SELECT create_hypertable('bugtab', 'time'); +WARNING: column type "timestamp without time zone" used for "time" does not follow best practices +WARNING: column type "character varying" used for "hin" does not follow best practices +WARNING: column type "character varying" used for "model" does not follow best practices +WARNING: column type "character varying" used for "block" does not follow best practices +WARNING: column type "character varying" used for "message_name" does not follow best practices +WARNING: column type "character varying" used for "signal_name" does not follow best practices +WARNING: column type "character varying" used for "signal_string_value" does not follow best practices +NOTICE: adding not-null constraint to column "time" + create_hypertable +---------------------- + (11,public,bugtab,t) +(1 row) + +INSERT INTO bugtab values('2020-01-01 10:00', 'hin1111', 'model111', 'blok111', 'message_here', 'signal1', 12.34, '12.34'); +ALTER TABLE bugtab SET (timescaledb.compress, timescaledb.compress_segmentby = 'hin, signal_name', timescaledb.compress_orderby = 'time'); +SELECT chunk_schema || '.' || chunk_name AS "chunk_table_bugtab" + FROM timescaledb_information.chunks + WHERE hypertable_name = 'bugtab' ORDER BY range_start LIMIT 1 \gset +SELECT compress_chunk(i) FROM show_chunks('bugtab') i; + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_11_23_chunk +(1 row) + +:PREFIX +SELECT "time","hin"::text,"model"::text,"block"::text,"message_name"::text,"signal_name"::text,"signal_numeric_value","signal_string_value"::text FROM :chunk_table_bugtab ORDER BY "time" DESC; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + Output: _hyper_11_23_chunk."time", (_hyper_11_23_chunk.hin)::text, (_hyper_11_23_chunk.model)::text, (_hyper_11_23_chunk.block)::text, (_hyper_11_23_chunk.message_name)::text, (_hyper_11_23_chunk.signal_name)::text, _hyper_11_23_chunk.signal_numeric_value, (_hyper_11_23_chunk.signal_string_value)::text + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_11_23_chunk (actual rows=1 loops=1) + Output: _hyper_11_23_chunk."time", _hyper_11_23_chunk.hin, _hyper_11_23_chunk.model, _hyper_11_23_chunk.block, _hyper_11_23_chunk.message_name, _hyper_11_23_chunk.signal_name, _hyper_11_23_chunk.signal_numeric_value, _hyper_11_23_chunk.signal_string_value + Sorted merge append: true + Bulk Decompression: false + -> Sort (actual rows=1 loops=1) + Output: compress_hyper_12_24_chunk."time", compress_hyper_12_24_chunk.hin, compress_hyper_12_24_chunk.model, compress_hyper_12_24_chunk.block, compress_hyper_12_24_chunk.message_name, compress_hyper_12_24_chunk.signal_name, compress_hyper_12_24_chunk.signal_numeric_value, compress_hyper_12_24_chunk.signal_string_value, compress_hyper_12_24_chunk._ts_meta_count, compress_hyper_12_24_chunk._ts_meta_sequence_num, compress_hyper_12_24_chunk._ts_meta_min_1, compress_hyper_12_24_chunk._ts_meta_max_1 + Sort Key: compress_hyper_12_24_chunk._ts_meta_max_1 DESC + Sort Method: quicksort + -> Seq Scan on _timescaledb_internal.compress_hyper_12_24_chunk (actual rows=1 loops=1) + Output: compress_hyper_12_24_chunk."time", compress_hyper_12_24_chunk.hin, compress_hyper_12_24_chunk.model, compress_hyper_12_24_chunk.block, compress_hyper_12_24_chunk.message_name, compress_hyper_12_24_chunk.signal_name, compress_hyper_12_24_chunk.signal_numeric_value, compress_hyper_12_24_chunk.signal_string_value, compress_hyper_12_24_chunk._ts_meta_count, compress_hyper_12_24_chunk._ts_meta_sequence_num, compress_hyper_12_24_chunk._ts_meta_min_1, compress_hyper_12_24_chunk._ts_meta_max_1 +(12 rows) + +SELECT "time","hin"::text,"model"::text,"block"::text,"message_name"::text,"signal_name"::text,"signal_numeric_value","signal_string_value"::text FROM :chunk_table_bugtab ORDER BY "time" DESC; + time | hin | model | block | message_name | signal_name | signal_numeric_value | signal_string_value +--------------------------+---------+----------+---------+--------------+-------------+----------------------+--------------------- + Wed Jan 01 10:00:00 2020 | hin1111 | model111 | blok111 | message_here | signal1 | 12.34 | 12.34 +(1 row) + +SELECT "time","hin"::text,"model"::text,"block"::text,"message_name"::text,"signal_name"::text,"signal_numeric_value","signal_string_value"::text FROM bugtab ORDER BY "time" DESC; + time | hin | model | block | message_name | signal_name | signal_numeric_value | signal_string_value +--------------------------+---------+----------+---------+--------------+-------------+----------------------+--------------------- + Wed Jan 01 10:00:00 2020 | hin1111 | model111 | blok111 | message_here | signal1 | 12.34 | 12.34 +(1 row) + +-- Condition that filter the first tuple of a batch - Issue 5797 +CREATE TABLE test ( + id bigint, + dttm timestamp, + otherId int, + valueFk int, + otherFk int, + measure double precision +); +SELECT create_hypertable('test', 'dttm'); +WARNING: column type "timestamp without time zone" used for "dttm" does not follow best practices +NOTICE: adding not-null constraint to column "dttm" + create_hypertable +-------------------- + (13,public,test,t) +(1 row) + +ALTER TABLE test SET ( + timescaledb.compress, + timescaledb.compress_segmentby = 'otherId,valueFk,otherFk' +); +INSERT INTO public.test (id, dttm, otherid, valuefk, otherfk, measure) +VALUES (109288, '2023-05-25 23:12:13.000000', 130, 14499, 13, 0.13216569884001217), + (109286, '2023-05-25 23:12:13.000000', 130, 14500, 13, 0.3740651978942786), + (107617, '2023-05-25 14:24:12.000000', 130, 14850, 13, 0.5978259144311195), + (103864, '2023-05-25 13:30:51.000000', 130, 16760, 13, 0.4733429856616205), + (104977, '2023-05-25 12:11:47.000000', 133, 14843, 13, 0.24366893909655118), + (108321, '2023-05-25 18:39:07.000000', 133, 15294, 13, 0.8768629101819378), + (108320, '2023-05-25 16:09:17.000000', 133, 15294, 13, 0.6185638532799445), + (104987, '2023-05-25 13:27:19.000000', 133, 15294, 13, 0.9830846939109854), + (104737, '2023-05-25 19:59:54.000000', 135, 14238, 13, 0.2388520055224177), + (106278, '2023-05-25 19:59:54.000000', 135, 14238, 13, 0.6305156586688518), + (104741, '2023-05-25 19:59:54.000000', 135, 14238, 13, 0.4990673076480263), + (106277, '2023-05-25 12:53:34.000000', 135, 14238, 13, 0.46086278330000496), + (97409, '2023-05-25 12:38:48.000000', 137, 14533, 13, 0.8308173375978924), + (105234, '2023-05-25 12:38:45.000000', 137, 14533, 13, 0.10860962941223917), + (105233, '2023-05-25 12:06:35.000000', 137, 14533, 13, 0.09058791972962155), + (97434, '2023-05-25 12:39:46.000000', 137, 14657, 13, 0.023315916140422388), + (108167, '2023-05-25 15:41:30.000000', 137, 14964, 13, 0.21757999385617666), + (107741, '2023-05-25 14:40:37.000000', 137, 14964, 13, 0.3449447147508202), + (106312, '2023-05-25 14:40:16.000000', 137, 14964, 13, 0.11890456868959376), + (106134, '2023-05-25 12:56:18.000000', 137, 14964, 13, 0.8004332371337775), + (103696, '2023-05-25 12:54:31.000000', 137, 14964, 13, 0.30147495793613643), + (106311, '2023-05-25 12:44:22.000000', 137, 14964, 13, 0.7412968055185551), + (106133, '2023-05-25 12:44:22.000000', 137, 14964, 13, 0.12940337622720932), + (105711, '2023-05-25 12:43:57.000000', 137, 14964, 13, 0.1044849979830822), + (105710, '2023-05-25 12:34:04.000000', 137, 14964, 13, 0.9113563410974876), + (108787, '2023-05-25 17:59:35.000000', 137, 15377, 13, 0.921829256160489), + (107833, '2023-05-25 14:53:08.000000', 137, 16302, 13, 0.9663117845438407), + (105435, '2023-05-25 12:30:59.000000', 137, 16568, 13, 0.13774896612028797), + (105434, '2023-05-25 12:29:29.000000', 137, 16568, 13, 0.3891495411502035), + (108357, '2023-05-25 16:18:39.000000', 137, 16665, 13, 0.44701901843246716), + (98564, '2023-05-25 17:12:43.000000', 138, 14760, 13, 0.8463114782142114), + (109032, '2023-05-25 19:00:00.000000', 138, 14992, 13, 0.025578609447126865), + (108800, '2023-05-25 18:43:18.000000', 138, 14992, 13, 0.5397724043221928), + (108799, '2023-05-25 18:00:00.000000', 138, 14992, 13, 0.0321658507434357), + (107320, '2023-05-25 14:00:01.000000', 138, 14992, 13, 0.9042941365487067), + (107296, '2023-05-25 14:00:00.000000', 138, 14992, 13, 0.7821178685669885), + (104700, '2023-05-25 12:36:55.000000', 138, 14992, 13, 0.6854496458178119), + (105177, '2023-05-25 12:00:01.000000', 138, 14992, 13, 0.23780719110724746), + (109330, '2023-05-25 23:59:13.000000', 138, 15080, 13, 0.5409015970284159), + (107400, '2023-05-25 16:45:13.000000', 138, 15080, 13, 0.6233594483468217), + (107399, '2023-05-25 14:03:49.000000', 138, 15080, 13, 0.8192327792045404), + (105004, '2023-05-25 13:37:49.000000', 138, 15080, 13, 0.2993620446103442), + (102592, '2023-05-25 13:31:48.000000', 138, 15080, 13, 0.24649704579496046), + (109028, '2023-05-25 19:00:00.000000', 138, 15123, 13, 0.5442767942906279), + (108794, '2023-05-25 18:43:18.000000', 138, 15123, 13, 0.29095714680616425), + (108793, '2023-05-25 18:00:00.000000', 138, 15123, 13, 0.681894893772391), + (107314, '2023-05-25 14:00:01.000000', 138, 15123, 13, 0.9637603904838059), + (107292, '2023-05-25 14:00:00.000000', 138, 15123, 13, 0.05956707862994293), + (104696, '2023-05-25 12:36:55.000000', 138, 15123, 13, 0.27039489547807705), + (105171, '2023-05-25 12:00:01.000000', 138, 15123, 13, 0.1269705046788907), + (106625, '2023-05-25 13:22:19.000000', 138, 15326, 13, 0.7712280764026431), + (106624, '2023-05-25 13:15:49.000000', 138, 15326, 13, 0.585381418741779), + (105699, '2023-05-25 12:44:33.000000', 138, 15326, 13, 0.3710994669938259), + (105698, '2023-05-25 12:33:41.000000', 138, 15326, 13, 0.8992328857980105), + (108514, '2023-05-25 16:47:37.000000', 138, 15620, 13, 0.40346934167556725), + (102691, '2023-05-25 13:33:57.000000', 138, 15620, 13, 0.8046719989908304), + (103655, '2023-05-25 13:34:39.000000', 138, 15740, 13, 0.2541099322817928), + (106987, '2023-05-25 13:37:36.000000', 138, 15766, 13, 0.8407818724583045), + (102180, '2023-05-25 13:37:11.000000', 138, 15766, 13, 0.19149633299917213), + (102717, '2023-05-25 13:38:17.000000', 138, 15868, 13, 0.03196157886032225), + (102719, '2023-05-25 13:38:42.000000', 138, 15921, 13, 0.9986438564169191), + (103659, '2023-05-25 13:35:11.000000', 138, 15926, 13, 0.8549591705597201), + (108796, '2023-05-25 18:43:18.000000', 138, 15932, 13, 0.6213586835883191), + (108795, '2023-05-25 18:00:00.000000', 138, 15932, 13, 0.6730718577847092), + (107326, '2023-05-25 14:00:01.000000', 138, 15932, 13, 0.278131899094646), + (107298, '2023-05-25 14:00:00.000000', 138, 15932, 13, 0.92423751071723), + (104702, '2023-05-25 12:36:55.000000', 138, 15932, 13, 0.22221315122722984), + (105175, '2023-05-25 12:00:01.000000', 138, 15932, 13, 0.28839114292751233), + (102736, '2023-05-25 16:15:29.000000', 138, 16052, 13, 0.431037595792759), + (99163, '2023-05-25 13:47:30.000000', 138, 16419, 13, 0.5291021511946319), + (102738, '2023-05-25 13:45:05.000000', 138, 16420, 13, 0.6506497895856924), + (99109, '2023-05-25 13:37:49.000000', 138, 16420, 13, 0.019501542758906254), + (108798, '2023-05-25 18:43:18.000000', 138, 16590, 13, 0.8990882904615916), + (108797, '2023-05-25 18:00:00.000000', 138, 16590, 13, 0.8888186371755147), + (107328, '2023-05-25 14:00:01.000000', 138, 16590, 13, 0.019486942610562608), + (107300, '2023-05-25 14:00:00.000000', 138, 16590, 13, 0.5614292991802508), + (104698, '2023-05-25 12:36:55.000000', 138, 16590, 13, 0.01866956387405594), + (105173, '2023-05-25 12:00:01.000000', 138, 16590, 13, 0.25661478763909074), + (107224, '2023-05-25 13:51:57.000000', 138, 16633, 13, 0.0010321723593804677), + (99064, '2023-05-25 13:37:49.000000', 138, 16633, 13, 0.8675616866165861), + (109225, '2023-05-25 22:13:01.000000', 138, 16669, 13, 0.1076822852142385), + (109224, '2023-05-25 21:11:56.000000', 138, 16669, 13, 0.24001365186054713); +SELECT compress_chunk(show_chunks('test', older_than => INTERVAL '1 week'), true); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_13_25_chunk +(1 row) + +SELECT t.dttm FROM test t WHERE t.dttm > '2023-05-25T14:23:12' ORDER BY t.dttm; + dttm +-------------------------- + Thu May 25 14:24:12 2023 + Thu May 25 14:40:16 2023 + Thu May 25 14:40:37 2023 + Thu May 25 14:53:08 2023 + Thu May 25 15:41:30 2023 + Thu May 25 16:09:17 2023 + Thu May 25 16:15:29 2023 + Thu May 25 16:18:39 2023 + Thu May 25 16:45:13 2023 + Thu May 25 16:47:37 2023 + Thu May 25 17:12:43 2023 + Thu May 25 17:59:35 2023 + Thu May 25 18:00:00 2023 + Thu May 25 18:00:00 2023 + Thu May 25 18:00:00 2023 + Thu May 25 18:00:00 2023 + Thu May 25 18:39:07 2023 + Thu May 25 18:43:18 2023 + Thu May 25 18:43:18 2023 + Thu May 25 18:43:18 2023 + Thu May 25 18:43:18 2023 + Thu May 25 19:00:00 2023 + Thu May 25 19:00:00 2023 + Thu May 25 19:59:54 2023 + Thu May 25 19:59:54 2023 + Thu May 25 19:59:54 2023 + Thu May 25 21:11:56 2023 + Thu May 25 22:13:01 2023 + Thu May 25 23:12:13 2023 + Thu May 25 23:12:13 2023 + Thu May 25 23:59:13 2023 +(31 rows) + diff --git a/tsl/test/expected/continuous_aggs-16.out b/tsl/test/expected/continuous_aggs-16.out new file mode 100644 index 00000000000..dfe105bb8a0 --- /dev/null +++ b/tsl/test/expected/continuous_aggs-16.out @@ -0,0 +1,2349 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- initialize the bgw mock state to prevent the materialization workers from running +\c :TEST_DBNAME :ROLE_SUPERUSER +CREATE OR REPLACE FUNCTION ts_bgw_params_create() RETURNS VOID +AS :MODULE_PATHNAME LANGUAGE C VOLATILE; +CREATE OR REPLACE FUNCTION test.continuous_aggs_find_view(cagg REGCLASS) RETURNS VOID +AS :TSL_MODULE_PATHNAME, 'ts_test_continuous_agg_find_by_view_name' LANGUAGE C; +\set WAIT_ON_JOB 0 +\set IMMEDIATELY_SET_UNTIL 1 +\set WAIT_FOR_OTHER_TO_ADVANCE 2 +-- remove any default jobs, e.g., telemetry so bgw_job isn't polluted +DELETE FROM _timescaledb_config.bgw_job; +SET ROLE :ROLE_DEFAULT_PERM_USER; +SELECT * FROM _timescaledb_config.bgw_job; + id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone +----+------------------+-------------------+-------------+-------------+--------------+-------------+-----------+-------+-----------+----------------+---------------+---------------+--------+--------------+------------+---------- +(0 rows) + +--TEST1 --- +--basic test with count +create table foo (a integer, b integer, c integer); +select table_name from create_hypertable('foo', 'a', chunk_time_interval=> 10); +NOTICE: adding not-null constraint to column "a" + table_name +------------ + foo +(1 row) + +insert into foo values( 3 , 16 , 20); +insert into foo values( 1 , 10 , 20); +insert into foo values( 1 , 11 , 20); +insert into foo values( 1 , 12 , 20); +insert into foo values( 1 , 13 , 20); +insert into foo values( 1 , 14 , 20); +insert into foo values( 2 , 14 , 20); +insert into foo values( 2 , 15 , 20); +insert into foo values( 2 , 16 , 20); +CREATE OR REPLACE FUNCTION integer_now_foo() returns int LANGUAGE SQL STABLE as $$ SELECT coalesce(max(a), 0) FROM foo $$; +SELECT set_integer_now_func('foo', 'integer_now_foo'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE MATERIALIZED VIEW mat_m1(a, countb) +WITH (timescaledb.continuous, timescaledb.materialized_only=true) +as +select a, count(b) +from foo +group by time_bucket(1, a), a WITH NO DATA; +SELECT add_continuous_aggregate_policy('mat_m1', NULL, 2::integer, '12 h'::interval) AS job_id +\gset +SELECT * FROM _timescaledb_config.bgw_job; + id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone +------+--------------------------------------------+-------------------+-------------+-------------+--------------+------------------------+-------------------------------------+-------------------+-----------+----------------+---------------+---------------+-----------------------------------------------------------------+------------------------+-------------------------------------------+---------- + 1000 | Refresh Continuous Aggregate Policy [1000] | @ 12 hours | @ 0 | -1 | @ 12 hours | _timescaledb_functions | policy_refresh_continuous_aggregate | default_perm_user | t | f | | 2 | {"end_offset": 2, "start_offset": null, "mat_hypertable_id": 2} | _timescaledb_functions | policy_refresh_continuous_aggregate_check | +(1 row) + +SELECT ca.raw_hypertable_id as "RAW_HYPERTABLE_ID", + h.schema_name AS "MAT_SCHEMA_NAME", + h.table_name AS "MAT_TABLE_NAME", + partial_view_name as "PART_VIEW_NAME", + partial_view_schema as "PART_VIEW_SCHEMA" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'mat_m1' +\gset +insert into :"MAT_SCHEMA_NAME".:"MAT_TABLE_NAME" +select a, count(b), +time_bucket(1, a) +from foo +group by time_bucket(1, a) , a ; +select * from mat_m1 order by a ; + a | countb +---+-------- + 1 | 5 + 2 | 3 + 3 | 1 +(3 rows) + +--check triggers on user hypertable -- +SET ROLE :ROLE_SUPERUSER; +select tgname, tgtype, tgenabled , relname from pg_trigger, pg_class +where tgrelid = pg_class.oid and pg_class.relname like 'foo' +order by tgname; + tgname | tgtype | tgenabled | relname +------------------------------+--------+-----------+--------- + ts_cagg_invalidation_trigger | 29 | O | foo + ts_insert_blocker | 7 | O | foo +(2 rows) + +SET ROLE :ROLE_DEFAULT_PERM_USER; +-- TEST2 --- +DROP MATERIALIZED VIEW mat_m1; +NOTICE: drop cascades to table _timescaledb_internal._hyper_2_2_chunk +SHOW enable_partitionwise_aggregate; + enable_partitionwise_aggregate +-------------------------------- + off +(1 row) + +SET enable_partitionwise_aggregate = on; +SELECT * FROM _timescaledb_config.bgw_job; + id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone +----+------------------+-------------------+-------------+-------------+--------------+-------------+-----------+-------+-----------+----------------+---------------+---------------+--------+--------------+------------+---------- +(0 rows) + +CREATE TABLE conditions ( + timec TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL + ); +select table_name from create_hypertable( 'conditions', 'timec'); + table_name +------------ + conditions +(1 row) + +insert into conditions values ( '2010-01-01 09:00:00-08', 'SFO', 55, 45); +insert into conditions values ( '2010-01-02 09:00:00-08', 'por', 100, 100); +insert into conditions values ( '2010-01-02 09:00:00-08', 'SFO', 65, 45); +insert into conditions values ( '2010-01-02 09:00:00-08', 'NYC', 65, 45); +insert into conditions values ( '2018-11-01 09:00:00-08', 'NYC', 45, 35); +insert into conditions values ( '2018-11-02 09:00:00-08', 'NYC', 35, 15); +CREATE MATERIALIZED VIEW mat_m1( timec, minl, sumt , sumh) +WITH (timescaledb.continuous, timescaledb.materialized_only=true) +as +select time_bucket('1day', timec), min(location), sum(temperature),sum(humidity) +from conditions +group by time_bucket('1day', timec) WITH NO DATA; +SELECT ca.raw_hypertable_id as "RAW_HYPERTABLE_ID", + h.schema_name AS "MAT_SCHEMA_NAME", + h.table_name AS "MAT_TABLE_NAME", + partial_view_name as "PART_VIEW_NAME", + partial_view_schema as "PART_VIEW_SCHEMA" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'mat_m1' +\gset +-- Change `finalized` option is forbiden +\set ON_ERROR_STOP 0 +ALTER MATERIALIZED VIEW mat_m1 SET (timescaledb.finalized=false); +ERROR: cannot alter finalized option for continuous aggregates +ALTER MATERIALIZED VIEW mat_m1 SET (timescaledb.finalized=true); +ERROR: cannot alter finalized option for continuous aggregates +\set ON_ERROR_STOP 1 +-- Materialized hypertable for mat_m1 should not be visible in the +-- hypertables view: +SELECT hypertable_schema, hypertable_name +FROM timescaledb_information.hypertables ORDER BY 1,2; + hypertable_schema | hypertable_name +-------------------+----------------- + public | conditions + public | foo +(2 rows) + +SET ROLE :ROLE_SUPERUSER; +insert into :"MAT_SCHEMA_NAME".:"MAT_TABLE_NAME" +select + time_bucket('1day', timec), min(location), sum(temperature), sum(humidity) +from conditions +group by time_bucket('1day', timec) ; +SET ROLE :ROLE_DEFAULT_PERM_USER; +--should have same results -- +select timec, minl, sumt, sumh +from mat_m1 +order by timec; + timec | minl | sumt | sumh +------------------------------+------+------+------ + Thu Dec 31 16:00:00 2009 PST | SFO | 55 | 45 + Fri Jan 01 16:00:00 2010 PST | NYC | 230 | 190 + Wed Oct 31 17:00:00 2018 PDT | NYC | 45 | 35 + Thu Nov 01 17:00:00 2018 PDT | NYC | 35 | 15 +(4 rows) + +select time_bucket('1day', timec), min(location), sum(temperature), sum(humidity) +from conditions +group by time_bucket('1day', timec) +order by 1; + time_bucket | min | sum | sum +------------------------------+-----+-----+----- + Thu Dec 31 16:00:00 2009 PST | SFO | 55 | 45 + Fri Jan 01 16:00:00 2010 PST | NYC | 230 | 190 + Wed Oct 31 17:00:00 2018 PDT | NYC | 45 | 35 + Thu Nov 01 17:00:00 2018 PDT | NYC | 35 | 15 +(4 rows) + +SET enable_partitionwise_aggregate = off; +-- TEST3 -- +-- drop on table conditions should cascade to materialized mat_v1 +drop table conditions cascade; +NOTICE: drop cascades to 2 other objects +NOTICE: drop cascades to 2 other objects +CREATE TABLE conditions ( + timec TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL + ); +select table_name from create_hypertable( 'conditions', 'timec'); + table_name +------------ + conditions +(1 row) + +insert into conditions values ( '2010-01-01 09:00:00-08', 'SFO', 55, 45); +insert into conditions values ( '2010-01-02 09:00:00-08', 'por', 100, 100); +insert into conditions values ( '2010-01-02 09:00:00-08', 'NYC', 65, 45); +insert into conditions values ( '2010-01-02 09:00:00-08', 'SFO', 65, 45); +insert into conditions values ( '2010-01-03 09:00:00-08', 'NYC', 45, 55); +insert into conditions values ( '2010-01-05 09:00:00-08', 'SFO', 75, 100); +insert into conditions values ( '2018-11-01 09:00:00-08', 'NYC', 45, 35); +insert into conditions values ( '2018-11-02 09:00:00-08', 'NYC', 35, 15); +insert into conditions values ( '2018-11-03 09:00:00-08', 'NYC', 35, 25); +CREATE MATERIALIZED VIEW mat_m1( timec, minl, sumth, stddevh) +WITH (timescaledb.continuous, timescaledb.materialized_only=true) +as +select time_bucket('1week', timec) , +min(location), sum(temperature)+sum(humidity), stddev(humidity) +from conditions +group by time_bucket('1week', timec) WITH NO DATA; +SELECT ca.raw_hypertable_id as "RAW_HYPERTABLE_ID", + h.schema_name AS "MAT_SCHEMA_NAME", + h.table_name AS "MAT_TABLE_NAME", + partial_view_name as "PART_VIEW_NAME", + partial_view_schema as "PART_VIEW_SCHEMA" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'mat_m1' +\gset +SET ROLE :ROLE_SUPERUSER; +insert into :"MAT_SCHEMA_NAME".:"MAT_TABLE_NAME" +select + time_bucket('1week', timec), min(location), sum(temperature)+sum(humidity), stddev(humidity) +from conditions +group by time_bucket('1week', timec) ; +SET ROLE :ROLE_DEFAULT_PERM_USER; +--should have same results -- +select timec, minl, sumth, stddevh +from mat_m1 +order by timec; + timec | minl | sumth | stddevh +------------------------------+------+-------+------------------ + Sun Dec 27 16:00:00 2009 PST | NYC | 620 | 23.8746727726266 + Sun Jan 03 16:00:00 2010 PST | SFO | 175 | + Sun Oct 28 17:00:00 2018 PDT | NYC | 190 | 10 +(3 rows) + +select time_bucket('1week', timec) , +min(location), sum(temperature)+ sum(humidity), stddev(humidity) +from conditions +group by time_bucket('1week', timec) +order by time_bucket('1week', timec); + time_bucket | min | ?column? | stddev +------------------------------+-----+----------+------------------ + Sun Dec 27 16:00:00 2009 PST | NYC | 620 | 23.8746727726266 + Sun Jan 03 16:00:00 2010 PST | SFO | 175 | + Sun Oct 28 17:00:00 2018 PDT | NYC | 190 | 10 +(3 rows) + +-- TEST4 -- +--materialized view with group by clause + expression in SELECT +-- use previous data from conditions +--drop only the view. +-- apply where clause on result of mat_m1 -- +DROP MATERIALIZED VIEW mat_m1; +NOTICE: drop cascades to 2 other objects +CREATE MATERIALIZED VIEW mat_m1( timec, minl, sumth, stddevh) +WITH (timescaledb.continuous, timescaledb.materialized_only=true) +as +select time_bucket('1week', timec) , +min(location), sum(temperature)+sum(humidity), stddev(humidity) +from conditions +where location = 'NYC' +group by time_bucket('1week', timec) + WITH NO DATA; +SELECT ca.raw_hypertable_id as "RAW_HYPERTABLE_ID", + h.schema_name AS "MAT_SCHEMA_NAME", + h.table_name AS "MAT_TABLE_NAME", + partial_view_name as "PART_VIEW_NAME", + partial_view_schema as "PART_VIEW_SCHEMA" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'mat_m1' +\gset +SET ROLE :ROLE_SUPERUSER; +insert into :"MAT_SCHEMA_NAME".:"MAT_TABLE_NAME" +select + time_bucket('1week', timec), min(location), sum(temperature)+sum(humidity), stddev(humidity) +from conditions +where location = 'NYC' +group by time_bucket('1week', timec) ; +SET ROLE :ROLE_DEFAULT_PERM_USER; +--should have same results -- +select timec, minl, sumth, stddevh +from mat_m1 +where stddevh is not null +order by timec; + timec | minl | sumth | stddevh +------------------------------+------+-------+------------------ + Sun Dec 27 16:00:00 2009 PST | NYC | 210 | 7.07106781186548 + Sun Oct 28 17:00:00 2018 PDT | NYC | 190 | 10 +(2 rows) + +select time_bucket('1week', timec) , +min(location), sum(temperature)+ sum(humidity), stddev(humidity) +from conditions +where location = 'NYC' +group by time_bucket('1week', timec) +order by time_bucket('1week', timec); + time_bucket | min | ?column? | stddev +------------------------------+-----+----------+------------------ + Sun Dec 27 16:00:00 2009 PST | NYC | 210 | 7.07106781186548 + Sun Oct 28 17:00:00 2018 PDT | NYC | 190 | 10 +(2 rows) + +-- TEST5 -- +---------test with having clause ---------------------- +DROP MATERIALIZED VIEW mat_m1; +NOTICE: drop cascades to 2 other objects +create materialized view mat_m1( timec, minl, sumth, stddevh) +WITH (timescaledb.continuous, timescaledb.materialized_only=true) +as +select time_bucket('1week', timec) , +min(location), sum(temperature)+sum(humidity), stddev(humidity) +from conditions +group by time_bucket('1week', timec) +having stddev(humidity) is not null WITH NO DATA; +; +SELECT ca.raw_hypertable_id as "RAW_HYPERTABLE_ID", + h.schema_name AS "MAT_SCHEMA_NAME", + h.table_name AS "MAT_TABLE_NAME", + partial_view_name as "PART_VIEW_NAME", + partial_view_schema as "PART_VIEW_SCHEMA" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'mat_m1' +\gset +SET ROLE :ROLE_SUPERUSER; +insert into :"MAT_SCHEMA_NAME".:"MAT_TABLE_NAME" +select + time_bucket('1week', timec), min(location), sum(temperature)+sum(humidity), stddev(humidity) +from conditions +group by time_bucket('1week', timec) +having stddev(humidity) is not null; +SET ROLE :ROLE_DEFAULT_PERM_USER; +-- should have same results -- +select * from mat_m1 +order by sumth; + timec | minl | sumth | stddevh +------------------------------+------+-------+------------------ + Sun Oct 28 17:00:00 2018 PDT | NYC | 190 | 10 + Sun Dec 27 16:00:00 2009 PST | NYC | 620 | 23.8746727726266 +(2 rows) + +select time_bucket('1week', timec) , +min(location), sum(temperature)+sum(humidity), stddev(humidity) +from conditions +group by time_bucket('1week', timec) +having stddev(humidity) is not null +order by sum(temperature)+sum(humidity); + time_bucket | min | ?column? | stddev +------------------------------+-----+----------+------------------ + Sun Oct 28 17:00:00 2018 PDT | NYC | 190 | 10 + Sun Dec 27 16:00:00 2009 PST | NYC | 620 | 23.8746727726266 +(2 rows) + +-- TEST6 -- +--group by with more than 1 group column +-- having clause with a mix of columns from select list + others +drop table conditions cascade; +NOTICE: drop cascades to 2 other objects +NOTICE: drop cascades to 2 other objects +CREATE TABLE conditions ( + timec TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL, + lowp numeric NULL, + highp numeric null + ); +select table_name from create_hypertable( 'conditions', 'timec'); + table_name +------------ + conditions +(1 row) + +insert into conditions +select generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '1 day'), 'POR', 55, 75, 40, 70; +insert into conditions +select generate_series('2018-11-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '1 day'), 'NYC', 35, 45, 50, 40; +insert into conditions +select generate_series('2018-11-01 00:00'::timestamp, '2018-12-15 00:00'::timestamp, '1 day'), 'LA', 73, 55, 71, 28; +--naming with AS clauses +CREATE MATERIALIZED VIEW mat_naming +WITH (timescaledb.continuous, timescaledb.materialized_only=true) +as +select time_bucket('1week', timec) as bucket, location as loc, sum(temperature)+sum(humidity) as sumth, stddev(humidity) +from conditions +group by bucket, loc +having min(location) >= 'NYC' and avg(temperature) > 20 WITH NO DATA; +SELECT ca.raw_hypertable_id as "RAW_HYPERTABLE_ID", + h.schema_name AS "MAT_SCHEMA_NAME", + h.table_name AS "MAT_TABLE_NAME", + partial_view_name as "PART_VIEW_NAME", + partial_view_schema as "PART_VIEW_SCHEMA" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'mat_naming' +\gset +select attnum , attname from pg_attribute +where attnum > 0 and attrelid = +(Select oid from pg_class where relname like :'MAT_TABLE_NAME') +order by attnum, attname; + attnum | attname +--------+--------- + 1 | bucket + 2 | loc + 3 | sumth + 4 | stddev +(4 rows) + +DROP MATERIALIZED VIEW mat_naming; +--naming with default names +CREATE MATERIALIZED VIEW mat_naming +WITH (timescaledb.continuous, timescaledb.materialized_only=true) +as +select time_bucket('1week', timec), location, sum(temperature)+sum(humidity) as sumth, stddev(humidity) +from conditions +group by 1,2 +having min(location) >= 'NYC' and avg(temperature) > 20 WITH NO DATA; +SELECT ca.raw_hypertable_id as "RAW_HYPERTABLE_ID", + h.schema_name AS "MAT_SCHEMA_NAME", + h.table_name AS "MAT_TABLE_NAME", + partial_view_name as "PART_VIEW_NAME", + partial_view_schema as "PART_VIEW_SCHEMA" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'mat_naming' +\gset +select attnum , attname from pg_attribute +where attnum > 0 and attrelid = +(Select oid from pg_class where relname like :'MAT_TABLE_NAME') +order by attnum, attname; + attnum | attname +--------+------------- + 1 | time_bucket + 2 | location + 3 | sumth + 4 | stddev +(4 rows) + +DROP MATERIALIZED VIEW mat_naming; +--naming with view col names +CREATE MATERIALIZED VIEW mat_naming(bucket, loc, sum_t_h, stdd) +WITH (timescaledb.continuous, timescaledb.materialized_only=true) +as +select time_bucket('1week', timec), location, sum(temperature)+sum(humidity), stddev(humidity) +from conditions +group by 1,2 +having min(location) >= 'NYC' and avg(temperature) > 20 WITH NO DATA; +SELECT ca.raw_hypertable_id as "RAW_HYPERTABLE_ID", + h.schema_name AS "MAT_SCHEMA_NAME", + h.table_name AS "MAT_TABLE_NAME", + partial_view_name as "PART_VIEW_NAME", + partial_view_schema as "PART_VIEW_SCHEMA" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'mat_naming' +\gset +select attnum , attname from pg_attribute +where attnum > 0 and attrelid = +(Select oid from pg_class where relname like :'MAT_TABLE_NAME') +order by attnum, attname; + attnum | attname +--------+--------- + 1 | bucket + 2 | loc + 3 | sum_t_h + 4 | stdd +(4 rows) + +DROP MATERIALIZED VIEW mat_naming; +CREATE MATERIALIZED VIEW mat_m1(timec, minl, sumth, stddevh) +WITH (timescaledb.continuous, timescaledb.materialized_only=true) +as +select time_bucket('1week', timec) , +min(location), sum(temperature)+sum(humidity), stddev(humidity) +from conditions +group by time_bucket('1week', timec) +having min(location) >= 'NYC' and avg(temperature) > 20 WITH NO DATA; +SELECT ca.raw_hypertable_id as "RAW_HYPERTABLE_ID", + h.schema_name AS "MAT_SCHEMA_NAME", + h.table_name AS "MAT_TABLE_NAME", + partial_view_name as "PART_VIEW_NAME", + partial_view_schema as "PART_VIEW_SCHEMA" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'mat_m1' +\gset +select attnum , attname from pg_attribute +where attnum > 0 and attrelid = +(Select oid from pg_class where relname like :'MAT_TABLE_NAME') +order by attnum, attname; + attnum | attname +--------+--------- + 1 | timec + 2 | minl + 3 | sumth + 4 | stddevh +(4 rows) + +SET ROLE :ROLE_SUPERUSER; +insert into :"MAT_SCHEMA_NAME".:"MAT_TABLE_NAME" +select + time_bucket('1week', timec), min(location), sum(temperature)+sum(humidity), stddev(humidity) +from conditions +group by time_bucket('1week', timec) +having min(location) >= 'NYC' and avg(temperature) > 20; +SET ROLE :ROLE_DEFAULT_PERM_USER; +--should have same results -- +select timec, minl, sumth, stddevh +from mat_m1 +order by timec, minl; + timec | minl | sumth | stddevh +------------------------------+------+-------+------------------ + Sun Dec 16 16:00:00 2018 PST | NYC | 1470 | 15.5662356498831 + Sun Dec 23 16:00:00 2018 PST | NYC | 1470 | 15.5662356498831 + Sun Dec 30 16:00:00 2018 PST | NYC | 210 | 21.2132034355964 +(3 rows) + +select time_bucket('1week', timec) , +min(location), sum(temperature)+sum(humidity), stddev(humidity) +from conditions +group by time_bucket('1week', timec) +having min(location) >= 'NYC' and avg(temperature) > 20 and avg(lowp) > 10 +order by time_bucket('1week', timec), min(location); + time_bucket | min | ?column? | stddev +------------------------------+-----+----------+------------------ + Sun Dec 16 16:00:00 2018 PST | NYC | 1470 | 15.5662356498831 + Sun Dec 23 16:00:00 2018 PST | NYC | 1470 | 15.5662356498831 + Sun Dec 30 16:00:00 2018 PST | NYC | 210 | 21.2132034355964 +(3 rows) + +--check view defintion in information views +select view_name, view_definition from timescaledb_information.continuous_aggregates +where view_name::text like 'mat_m1'; + view_name | view_definition +-----------+------------------------------------------------------------------------------------------- + mat_m1 | SELECT time_bucket('@ 7 days'::interval, timec) AS timec, + + | min(location) AS minl, + + | (sum(temperature) + sum(humidity)) AS sumth, + + | stddev(humidity) AS stddevh + + | FROM conditions + + | GROUP BY (time_bucket('@ 7 days'::interval, timec)) + + | HAVING ((min(location) >= 'NYC'::text) AND (avg(temperature) > (20)::double precision)); +(1 row) + +--TEST6 -- select from internal view +SET ROLE :ROLE_SUPERUSER; +insert into :"MAT_SCHEMA_NAME".:"MAT_TABLE_NAME" +select * from :"PART_VIEW_SCHEMA".:"PART_VIEW_NAME"; +SET ROLE :ROLE_DEFAULT_PERM_USER; +--lets drop the view and check +DROP MATERIALIZED VIEW mat_m1; +NOTICE: drop cascades to table _timescaledb_internal._hyper_13_24_chunk +drop table conditions; +CREATE TABLE conditions ( + timec TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL, + lowp double precision NULL, + highp double precision null, + allnull double precision null + ); +select table_name from create_hypertable( 'conditions', 'timec'); + table_name +------------ + conditions +(1 row) + +insert into conditions +select generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '1 day'), 'POR', 55, 75, 40, 70, NULL; +insert into conditions +select generate_series('2018-11-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '1 day'), 'NYC', 35, 45, 50, 40, NULL; +insert into conditions +select generate_series('2018-11-01 00:00'::timestamp, '2018-12-15 00:00'::timestamp, '1 day'), 'LA', 73, 55, NULL, 28, NULL; +SELECT + $$ + select time_bucket('1week', timec) , + min(location) as col1, sum(temperature)+sum(humidity) as col2, stddev(humidity) as col3, min(allnull) as col4 + from conditions + group by time_bucket('1week', timec) + having min(location) >= 'NYC' and avg(temperature) > 20 + $$ AS "QUERY" +\gset +\set ECHO errors +psql:include/cont_agg_equal.sql:8: NOTICE: materialized view "mat_test" does not exist, skipping + ?column? | count +---------------------------------------------------------------+------- + Number of rows different between view and original (expect 0) | 0 +(1 row) + +SELECT + $$ + select time_bucket('1week', timec), location, + sum(temperature)+sum(humidity) as col2, stddev(humidity) as col3, min(allnull) as col4 + from conditions + group by location, time_bucket('1week', timec) + $$ AS "QUERY" +\gset +\set ECHO errors +psql:include/cont_agg_equal.sql:8: NOTICE: drop cascades to table _timescaledb_internal._hyper_15_34_chunk + ?column? | count +---------------------------------------------------------------+------- + Number of rows different between view and original (expect 0) | 0 +(1 row) + +--TEST7 -- drop tests for view and hypertable +--DROP tests +\set ON_ERROR_STOP 0 +SELECT h.schema_name AS "MAT_SCHEMA_NAME", + h.table_name AS "MAT_TABLE_NAME", + partial_view_name as "PART_VIEW_NAME", + partial_view_schema as "PART_VIEW_SCHEMA", + direct_view_name as "DIR_VIEW_NAME", + direct_view_schema as "DIR_VIEW_SCHEMA" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'mat_test' +\gset +DROP TABLE :"MAT_SCHEMA_NAME".:"MAT_TABLE_NAME"; +ERROR: cannot drop table _timescaledb_internal._materialized_hypertable_16 because other objects depend on it +DROP VIEW :"PART_VIEW_SCHEMA".:"PART_VIEW_NAME"; +ERROR: cannot drop the partial/direct view because it is required by a continuous aggregate +DROP VIEW :"DIR_VIEW_SCHEMA".:"DIR_VIEW_NAME"; +ERROR: cannot drop the partial/direct view because it is required by a continuous aggregate +\set ON_ERROR_STOP 1 +--catalog entry still there; +SELECT count(*) +FROM _timescaledb_catalog.continuous_agg ca +WHERE user_view_name = 'mat_test'; + count +------- + 1 +(1 row) + +--mat table, user_view, direct view and partial view all there +select count(*) from pg_class where relname = :'PART_VIEW_NAME'; + count +------- + 1 +(1 row) + +select count(*) from pg_class where relname = :'MAT_TABLE_NAME'; + count +------- + 1 +(1 row) + +select count(*) from pg_class where relname = :'DIR_VIEW_NAME'; + count +------- + 1 +(1 row) + +select count(*) from pg_class where relname = 'mat_test'; + count +------- + 1 +(1 row) + +DROP MATERIALIZED VIEW mat_test; +NOTICE: drop cascades to 2 other objects +--catalog entry should be gone +SELECT count(*) +FROM _timescaledb_catalog.continuous_agg ca +WHERE user_view_name = 'mat_test'; + count +------- + 0 +(1 row) + +--mat table, user_view, direct view and partial view all gone +select count(*) from pg_class where relname = :'PART_VIEW_NAME'; + count +------- + 0 +(1 row) + +select count(*) from pg_class where relname = :'MAT_TABLE_NAME'; + count +------- + 0 +(1 row) + +select count(*) from pg_class where relname = :'DIR_VIEW_NAME'; + count +------- + 0 +(1 row) + +select count(*) from pg_class where relname = 'mat_test'; + count +------- + 0 +(1 row) + +--test dropping raw table +DROP TABLE conditions; +CREATE TABLE conditions ( + timec TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL, + lowp double precision NULL, + highp double precision null, + allnull double precision null + ); +select table_name from create_hypertable( 'conditions', 'timec'); + table_name +------------ + conditions +(1 row) + +--no data in hyper table on purpose so that CASCADE is not required because of chunks +CREATE MATERIALIZED VIEW mat_drop_test(timec, minl, sumt , sumh) +WITH (timescaledb.continuous, timescaledb.materialized_only=true) +as +select time_bucket('1day', timec), min(location), sum(temperature),sum(humidity) +from conditions +group by time_bucket('1day', timec) WITH NO DATA; +\set ON_ERROR_STOP 0 +DROP TABLE conditions; +ERROR: cannot drop table conditions because other objects depend on it +\set ON_ERROR_STOP 1 +--insert data now +insert into conditions +select generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '1 day'), 'POR', 55, 75, 40, 70, NULL; +insert into conditions +select generate_series('2018-11-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '1 day'), 'NYC', 35, 45, 50, 40, NULL; +insert into conditions +select generate_series('2018-11-01 00:00'::timestamp, '2018-12-15 00:00'::timestamp, '1 day'), 'LA', 73, 55, NULL, 28, NULL; +SELECT ca.raw_hypertable_id as "RAW_HYPERTABLE_ID", + h.schema_name AS "MAT_SCHEMA_NAME", + h.table_name AS "MAT_TABLE_NAME", + partial_view_name as "PART_VIEW_NAME", + partial_view_schema as "PART_VIEW_SCHEMA" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'mat_drop_test' +\gset +SET client_min_messages TO NOTICE; +CALL refresh_continuous_aggregate('mat_drop_test', NULL, NULL); +--force invalidation +insert into conditions +select generate_series('2017-11-01 00:00'::timestamp, '2017-12-15 00:00'::timestamp, '1 day'), 'LA', 73, 55, NULL, 28, NULL; +select count(*) from _timescaledb_catalog.continuous_aggs_invalidation_threshold; + count +------- + 1 +(1 row) + +select count(*) from _timescaledb_catalog.continuous_aggs_hypertable_invalidation_log; + count +------- + 1 +(1 row) + +DROP TABLE conditions CASCADE; +NOTICE: drop cascades to 2 other objects +NOTICE: drop cascades to 2 other objects +--catalog entry should be gone +SELECT count(*) +FROM _timescaledb_catalog.continuous_agg ca +WHERE user_view_name = 'mat_drop_test'; + count +------- + 0 +(1 row) + +select count(*) from _timescaledb_catalog.continuous_aggs_invalidation_threshold; + count +------- + 0 +(1 row) + +select count(*) from _timescaledb_catalog.continuous_aggs_hypertable_invalidation_log; + count +------- + 0 +(1 row) + +select count(*) from _timescaledb_catalog.continuous_aggs_materialization_invalidation_log; + count +------- + 0 +(1 row) + +SELECT * FROM _timescaledb_config.bgw_job; + id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone +----+------------------+-------------------+-------------+-------------+--------------+-------------+-----------+-------+-----------+----------------+---------------+---------------+--------+--------------+------------+---------- +(0 rows) + +--mat table, user_view, and partial view all gone +select count(*) from pg_class where relname = :'PART_VIEW_NAME'; + count +------- + 0 +(1 row) + +select count(*) from pg_class where relname = :'MAT_TABLE_NAME'; + count +------- + 0 +(1 row) + +select count(*) from pg_class where relname = 'mat_drop_test'; + count +------- + 0 +(1 row) + +--TEST With options +CREATE TABLE conditions ( + timec TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL, + lowp double precision NULL, + highp double precision null, + allnull double precision null + ); +select table_name from create_hypertable( 'conditions', 'timec'); + table_name +------------ + conditions +(1 row) + +CREATE MATERIALIZED VIEW mat_with_test(timec, minl, sumt , sumh) +WITH (timescaledb.continuous, + timescaledb.materialized_only=true) +as +select time_bucket('1day', timec), min(location), sum(temperature),sum(humidity) +from conditions +group by time_bucket('1day', timec), location, humidity, temperature WITH NO DATA; +SELECT add_continuous_aggregate_policy('mat_with_test', NULL, '5 h'::interval, '12 h'::interval); + add_continuous_aggregate_policy +--------------------------------- + 1001 +(1 row) + +SELECT alter_job(id, schedule_interval => '1h') FROM _timescaledb_config.bgw_job; + alter_job +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (1001,"@ 1 hour","@ 0",-1,"@ 12 hours",t,"{""end_offset"": ""@ 5 hours"", ""start_offset"": null, ""mat_hypertable_id"": 20}",-infinity,_timescaledb_functions.policy_refresh_continuous_aggregate_check,f,,) +(1 row) + +SELECT schedule_interval FROM _timescaledb_config.bgw_job; + schedule_interval +------------------- + @ 1 hour +(1 row) + +SELECT alter_job(id, schedule_interval => '2h') FROM _timescaledb_config.bgw_job; + alter_job +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (1001,"@ 2 hours","@ 0",-1,"@ 12 hours",t,"{""end_offset"": ""@ 5 hours"", ""start_offset"": null, ""mat_hypertable_id"": 20}",-infinity,_timescaledb_functions.policy_refresh_continuous_aggregate_check,f,,) +(1 row) + +SELECT schedule_interval FROM _timescaledb_config.bgw_job; + schedule_interval +------------------- + @ 2 hours +(1 row) + +select indexname, indexdef from pg_indexes where tablename = +(SELECT h.table_name +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'mat_with_test') +order by indexname; + indexname | indexdef +---------------------------------------+---------------------------------------------------------------------------------------------------------------------------------- + _materialized_hypertable_20_timec_idx | CREATE INDEX _materialized_hypertable_20_timec_idx ON _timescaledb_internal._materialized_hypertable_20 USING btree (timec DESC) +(1 row) + +DROP MATERIALIZED VIEW mat_with_test; +--no additional indexes +CREATE MATERIALIZED VIEW mat_with_test(timec, minl, sumt , sumh) +WITH (timescaledb.continuous, + timescaledb.materialized_only=true, + timescaledb.create_group_indexes=false) +as +select time_bucket('1day', timec), min(location), sum(temperature),sum(humidity) +from conditions +group by time_bucket('1day', timec), location, humidity, temperature WITH NO DATA; +select indexname, indexdef from pg_indexes where tablename = +(SELECT h.table_name +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'mat_with_test'); + indexname | indexdef +---------------------------------------+---------------------------------------------------------------------------------------------------------------------------------- + _materialized_hypertable_21_timec_idx | CREATE INDEX _materialized_hypertable_21_timec_idx ON _timescaledb_internal._materialized_hypertable_21 USING btree (timec DESC) +(1 row) + +DROP TABLE conditions CASCADE; +NOTICE: drop cascades to 2 other objects +--test WITH using a hypertable with an integer time dimension +CREATE TABLE conditions ( + timec INT NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL, + lowp double precision NULL, + highp double precision null, + allnull double precision null + ); +select table_name from create_hypertable( 'conditions', 'timec', chunk_time_interval=> 100); + table_name +------------ + conditions +(1 row) + +CREATE OR REPLACE FUNCTION integer_now_conditions() returns int LANGUAGE SQL STABLE as $$ SELECT coalesce(max(timec), 0) FROM conditions $$; +SELECT set_integer_now_func('conditions', 'integer_now_conditions'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE MATERIALIZED VIEW mat_with_test(timec, minl, sumt , sumh) +WITH (timescaledb.continuous, + timescaledb.materialized_only=true) +as +select time_bucket(100, timec), min(location), sum(temperature),sum(humidity) +from conditions +group by time_bucket(100, timec) WITH NO DATA; +SELECT add_continuous_aggregate_policy('mat_with_test', NULL, 500::integer, '12 h'::interval); + add_continuous_aggregate_policy +--------------------------------- + 1002 +(1 row) + +SELECT alter_job(id, schedule_interval => '2h') FROM _timescaledb_config.bgw_job; + alter_job +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + (1002,"@ 2 hours","@ 0",-1,"@ 12 hours",t,"{""end_offset"": 500, ""start_offset"": null, ""mat_hypertable_id"": 23}",-infinity,_timescaledb_functions.policy_refresh_continuous_aggregate_check,f,,) +(1 row) + +SELECT schedule_interval FROM _timescaledb_config.bgw_job; + schedule_interval +------------------- + @ 2 hours +(1 row) + +DROP TABLE conditions CASCADE; +NOTICE: drop cascades to 2 other objects +--test space partitions +CREATE TABLE space_table ( + time BIGINT, + dev BIGINT, + data BIGINT +); +SELECT create_hypertable( + 'space_table', + 'time', + chunk_time_interval => 10, + partitioning_column => 'dev', + number_partitions => 3); +NOTICE: adding not-null constraint to column "time" + create_hypertable +--------------------------- + (24,public,space_table,t) +(1 row) + +CREATE OR REPLACE FUNCTION integer_now_space_table() returns BIGINT LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), BIGINT '0') FROM space_table $$; +SELECT set_integer_now_func('space_table', 'integer_now_space_table'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE MATERIALIZED VIEW space_view +WITH (timescaledb.continuous, + timescaledb.materialized_only=true) +AS SELECT time_bucket('4', time), COUNT(data) + FROM space_table + GROUP BY 1 WITH NO DATA; +INSERT INTO space_table VALUES + (0, 1, 1), (0, 2, 1), (1, 1, 1), (1, 2, 1), + (10, 1, 1), (10, 2, 1), (11, 1, 1), (11, 2, 1); +SELECT h.schema_name AS "MAT_SCHEMA_NAME", + h.table_name AS "MAT_TABLE_NAME", + partial_view_name as "PART_VIEW_NAME", + partial_view_schema as "PART_VIEW_SCHEMA", + direct_view_name as "DIR_VIEW_NAME", + direct_view_schema as "DIR_VIEW_SCHEMA" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'space_view' +\gset +SELECT * FROM :"MAT_SCHEMA_NAME".:"MAT_TABLE_NAME" + ORDER BY time_bucket; + time_bucket | count +-------------+------- +(0 rows) + +CALL refresh_continuous_aggregate('space_view', NULL, NULL); +SELECT * FROM space_view ORDER BY 1; + time_bucket | count +-------------+------- + 0 | 4 + 8 | 4 +(2 rows) + +SELECT * FROM :"MAT_SCHEMA_NAME".:"MAT_TABLE_NAME" + ORDER BY time_bucket; + time_bucket | count +-------------+------- + 0 | 4 + 8 | 4 +(2 rows) + +INSERT INTO space_table VALUES (3, 2, 1); +CALL refresh_continuous_aggregate('space_view', NULL, NULL); +SELECT * FROM space_view ORDER BY 1; + time_bucket | count +-------------+------- + 0 | 5 + 8 | 4 +(2 rows) + +SELECT * FROM :"MAT_SCHEMA_NAME".:"MAT_TABLE_NAME" + ORDER BY time_bucket; + time_bucket | count +-------------+------- + 0 | 5 + 8 | 4 +(2 rows) + +INSERT INTO space_table VALUES (2, 3, 1); +CALL refresh_continuous_aggregate('space_view', NULL, NULL); +SELECT * FROM space_view ORDER BY 1; + time_bucket | count +-------------+------- + 0 | 6 + 8 | 4 +(2 rows) + +SELECT * FROM :"MAT_SCHEMA_NAME".:"MAT_TABLE_NAME" + ORDER BY time_bucket; + time_bucket | count +-------------+------- + 0 | 6 + 8 | 4 +(2 rows) + +DROP TABLE space_table CASCADE; +NOTICE: drop cascades to 2 other objects +NOTICE: drop cascades to table _timescaledb_internal._hyper_25_60_chunk +-- +-- TEST FINALIZEFUNC_EXTRA +-- +-- create special aggregate to test ffunc_extra +-- Raise warning with the actual type being passed in +CREATE OR REPLACE FUNCTION fake_ffunc(a int8, b int, c int, d int, x anyelement) +RETURNS anyelement AS $$ +BEGIN + RAISE WARNING 'type % %', pg_typeof(d), pg_typeof(x); + RETURN x; +END; +$$ +LANGUAGE plpgsql; +CREATE OR REPLACE FUNCTION fake_sfunc(a int8, b int, c int, d int, x anyelement) +RETURNS int8 AS $$ +BEGIN + RETURN b; +END; $$ +LANGUAGE plpgsql; +CREATE AGGREGATE aggregate_to_test_ffunc_extra(int, int, int, anyelement) ( + SFUNC = fake_sfunc, + STYPE = int8, + COMBINEFUNC = int8pl, + FINALFUNC = fake_ffunc, + PARALLEL = SAFE, + FINALFUNC_EXTRA +); +CREATE TABLE conditions ( + timec INT NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL, + lowp double precision NULL, + highp double precision null, + allnull double precision null + ); +select table_name from create_hypertable( 'conditions', 'timec', chunk_time_interval=> 100); + table_name +------------ + conditions +(1 row) + +CREATE OR REPLACE FUNCTION integer_now_conditions() returns int LANGUAGE SQL STABLE as $$ SELECT coalesce(max(timec), 0) FROM conditions $$; +SELECT set_integer_now_func('conditions', 'integer_now_conditions'); + set_integer_now_func +---------------------- + +(1 row) + +insert into conditions +select generate_series(0, 200, 10), 'POR', 55, 75, 40, 70, NULL; +CREATE MATERIALIZED VIEW mat_ffunc_test +WITH (timescaledb.continuous, timescaledb.materialized_only=true) +as +select time_bucket(100, timec), aggregate_to_test_ffunc_extra(timec, 1, 3, 'test'::text) +from conditions +group by time_bucket(100, timec); +NOTICE: refreshing continuous aggregate "mat_ffunc_test" +WARNING: type integer text +WARNING: type integer text +WARNING: type integer text +SELECT * FROM mat_ffunc_test ORDER BY time_bucket; + time_bucket | aggregate_to_test_ffunc_extra +-------------+------------------------------- + 0 | + 100 | + 200 | +(3 rows) + +DROP MATERIALIZED view mat_ffunc_test; +NOTICE: drop cascades to table _timescaledb_internal._hyper_27_65_chunk +CREATE MATERIALIZED VIEW mat_ffunc_test +WITH (timescaledb.continuous, timescaledb.materialized_only=true) +as +select time_bucket(100, timec), aggregate_to_test_ffunc_extra(timec, 4, 5, bigint '123') +from conditions +group by time_bucket(100, timec); +NOTICE: refreshing continuous aggregate "mat_ffunc_test" +WARNING: type integer bigint +WARNING: type integer bigint +WARNING: type integer bigint +SELECT * FROM mat_ffunc_test ORDER BY time_bucket; + time_bucket | aggregate_to_test_ffunc_extra +-------------+------------------------------- + 0 | + 100 | + 200 | +(3 rows) + +--refresh mat view test when time_bucket is not projected -- +DROP MATERIALIZED VIEW mat_ffunc_test; +NOTICE: drop cascades to table _timescaledb_internal._hyper_28_66_chunk +CREATE MATERIALIZED VIEW mat_refresh_test +WITH (timescaledb.continuous, timescaledb.materialized_only=true) +as +select location, max(humidity) +from conditions +group by time_bucket(100, timec), location WITH NO DATA; +insert into conditions +select generate_series(0, 50, 10), 'NYC', 55, 75, 40, 70, NULL; +CALL refresh_continuous_aggregate('mat_refresh_test', NULL, NULL); +SELECT * FROM mat_refresh_test order by 1,2 ; + location | max +----------+----- + NYC | 75 + POR | 75 + POR | 75 + POR | 75 +(4 rows) + +-- test for bug when group by is not in project list +CREATE MATERIALIZED VIEW conditions_grpby_view with (timescaledb.continuous, timescaledb.materialized_only=false) as +select time_bucket(100, timec), sum(humidity) +from conditions +group by time_bucket(100, timec), location; +NOTICE: refreshing continuous aggregate "conditions_grpby_view" +select * from conditions_grpby_view order by 1, 2; + time_bucket | sum +-------------+----- + 0 | 450 + 0 | 750 + 100 | 750 + 200 | 75 +(4 rows) + +CREATE MATERIALIZED VIEW conditions_grpby_view2 with (timescaledb.continuous, timescaledb.materialized_only=false) as +select time_bucket(100, timec), sum(humidity) +from conditions +group by time_bucket(100, timec), location +having avg(temperature) > 0; +NOTICE: refreshing continuous aggregate "conditions_grpby_view2" +select * from conditions_grpby_view2 order by 1, 2; + time_bucket | sum +-------------+----- + 0 | 450 + 0 | 750 + 100 | 750 + 200 | 75 +(4 rows) + +-- Test internal functions for continuous aggregates +SELECT test.continuous_aggs_find_view('mat_refresh_test'); + continuous_aggs_find_view +--------------------------- + +(1 row) + +-- Test pseudotype/enum handling +CREATE TYPE status_enum AS ENUM ( + 'red', + 'yellow', + 'green' +); +CREATE TABLE cagg_types ( + time TIMESTAMPTZ NOT NULL, + status status_enum, + names NAME[], + floats FLOAT[] +); +SELECT + table_name +FROM + create_hypertable('cagg_types', 'time'); + table_name +------------ + cagg_types +(1 row) + +INSERT INTO cagg_types +SELECT + '2000-01-01', + 'yellow', + '{foo,bar,baz}', + '{1,2.5,3}'; +CREATE MATERIALIZED VIEW mat_types WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT + time_bucket('1d', time), + min(status) AS status, + max(names) AS names, + min(floats) AS floats +FROM + cagg_types +GROUP BY + 1; +NOTICE: refreshing continuous aggregate "mat_types" +CALL refresh_continuous_aggregate('mat_types',NULL,NULL); +NOTICE: continuous aggregate "mat_types" is already up-to-date +SELECT * FROM mat_types; + time_bucket | status | names | floats +------------------------------+--------+---------------+----------- + Fri Dec 31 16:00:00 1999 PST | yellow | {foo,bar,baz} | {1,2.5,3} +(1 row) + +------------------------------------------------------------------------------------- +-- Test issue #2616 where cagg view contains an experssion with several aggregates in +CREATE TABLE water_consumption +( + sensor_id integer NOT NULL, + timestamp timestamp(0) NOT NULL, + water_index integer +); +SELECT create_hypertable('water_consumption', 'timestamp', 'sensor_id', 2); +WARNING: column type "timestamp without time zone" used for "timestamp" does not follow best practices + create_hypertable +--------------------------------- + (34,public,water_consumption,t) +(1 row) + +INSERT INTO public.water_consumption (sensor_id, timestamp, water_index) VALUES + (1, '2010-11-03 09:42:30', 1030), + (1, '2010-11-03 09:42:40', 1032), + (1, '2010-11-03 09:42:50', 1035), + (1, '2010-11-03 09:43:30', 1040), + (1, '2010-11-03 09:43:40', 1045), + (1, '2010-11-03 09:43:50', 1050), + (1, '2010-11-03 09:44:30', 1052), + (1, '2010-11-03 09:44:40', 1057), + (1, '2010-11-03 09:44:50', 1060), + (1, '2010-11-03 09:45:30', 1063), + (1, '2010-11-03 09:45:40', 1067), + (1, '2010-11-03 09:45:50', 1070); +-- The test with the view originally reported in the issue. +CREATE MATERIALIZED VIEW water_consumption_aggregation_minute + WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) +AS +SELECT sensor_id, + time_bucket(INTERVAL '1 minute', timestamp) + '1 minute' AS timestamp, + (max(water_index) - min(water_index)) AS water_consumption +FROM water_consumption +GROUP BY sensor_id, time_bucket(INTERVAL '1 minute', timestamp) +WITH NO DATA; +CALL refresh_continuous_aggregate('water_consumption_aggregation_minute', NULL, NULL); +-- The results of the view and the query over hypertable should be the same +SELECT * FROM water_consumption_aggregation_minute ORDER BY water_consumption; + sensor_id | timestamp | water_consumption +-----------+--------------------------+------------------- + 1 | Wed Nov 03 09:43:00 2010 | 5 + 1 | Wed Nov 03 09:46:00 2010 | 7 + 1 | Wed Nov 03 09:45:00 2010 | 8 + 1 | Wed Nov 03 09:44:00 2010 | 10 +(4 rows) + +SELECT sensor_id, + time_bucket(INTERVAL '1 minute', timestamp) + '1 minute' AS timestamp, + (max(water_index) - min(water_index)) AS water_consumption +FROM water_consumption +GROUP BY sensor_id, time_bucket(INTERVAL '1 minute', timestamp) +ORDER BY water_consumption; + sensor_id | timestamp | water_consumption +-----------+--------------------------+------------------- + 1 | Wed Nov 03 09:43:00 2010 | 5 + 1 | Wed Nov 03 09:46:00 2010 | 7 + 1 | Wed Nov 03 09:45:00 2010 | 8 + 1 | Wed Nov 03 09:44:00 2010 | 10 +(4 rows) + +-- Simplified test, where the view doesn't contain all group by clauses +CREATE MATERIALIZED VIEW water_consumption_no_select_bucket + WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) +AS +SELECT sensor_id, + (max(water_index) - min(water_index)) AS water_consumption +FROM water_consumption +GROUP BY sensor_id, time_bucket(INTERVAL '1 minute', timestamp) +WITH NO DATA; +CALL refresh_continuous_aggregate('water_consumption_no_select_bucket', NULL, NULL); +-- The results of the view and the query over hypertable should be the same +SELECT * FROM water_consumption_no_select_bucket ORDER BY water_consumption; + sensor_id | water_consumption +-----------+------------------- + 1 | 5 + 1 | 7 + 1 | 8 + 1 | 10 +(4 rows) + +SELECT sensor_id, + (max(water_index) - min(water_index)) AS water_consumption +FROM water_consumption +GROUP BY sensor_id, time_bucket(INTERVAL '1 minute', timestamp) +ORDER BY water_consumption; + sensor_id | water_consumption +-----------+------------------- + 1 | 5 + 1 | 7 + 1 | 8 + 1 | 10 +(4 rows) + +-- The test with SELECT matching GROUP BY and placing aggregate expression not the last +CREATE MATERIALIZED VIEW water_consumption_aggregation_no_addition + WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) +AS +SELECT sensor_id, + (max(water_index) - min(water_index)) AS water_consumption, + time_bucket(INTERVAL '1 minute', timestamp) AS timestamp +FROM water_consumption +GROUP BY sensor_id, time_bucket(INTERVAL '1 minute', timestamp) +WITH NO DATA; +CALL refresh_continuous_aggregate('water_consumption_aggregation_no_addition', NULL, NULL); +-- The results of the view and the query over hypertable should be the same +SELECT * FROM water_consumption_aggregation_no_addition ORDER BY water_consumption; + sensor_id | water_consumption | timestamp +-----------+-------------------+-------------------------- + 1 | 5 | Wed Nov 03 09:42:00 2010 + 1 | 7 | Wed Nov 03 09:45:00 2010 + 1 | 8 | Wed Nov 03 09:44:00 2010 + 1 | 10 | Wed Nov 03 09:43:00 2010 +(4 rows) + +SELECT sensor_id, + (max(water_index) - min(water_index)) AS water_consumption, + time_bucket(INTERVAL '1 minute', timestamp) AS timestamp +FROM water_consumption +GROUP BY sensor_id, time_bucket(INTERVAL '1 minute', timestamp) +ORDER BY water_consumption; + sensor_id | water_consumption | timestamp +-----------+-------------------+-------------------------- + 1 | 5 | Wed Nov 03 09:42:00 2010 + 1 | 7 | Wed Nov 03 09:45:00 2010 + 1 | 8 | Wed Nov 03 09:44:00 2010 + 1 | 10 | Wed Nov 03 09:43:00 2010 +(4 rows) + +DROP TABLE water_consumption CASCADE; +NOTICE: drop cascades to 6 other objects +NOTICE: drop cascades to table _timescaledb_internal._hyper_35_73_chunk +NOTICE: drop cascades to table _timescaledb_internal._hyper_36_74_chunk +NOTICE: drop cascades to table _timescaledb_internal._hyper_37_75_chunk +---- +--- github issue 2655 --- +create table raw_data(time timestamptz, search_query text, cnt integer, cnt2 integer); +select create_hypertable('raw_data','time', chunk_time_interval=>'15 days'::interval); +NOTICE: adding not-null constraint to column "time" + create_hypertable +------------------------ + (38,public,raw_data,t) +(1 row) + +insert into raw_data select '2000-01-01','Q1'; +--having has exprs that appear in select +CREATE MATERIALIZED VIEW search_query_count_1m WITH (timescaledb.continuous, timescaledb.materialized_only=false) +AS + SELECT search_query,count(search_query) as count, + time_bucket(INTERVAL '1 minute', time) AS bucket + FROM raw_data + WHERE search_query is not null AND LENGTH(TRIM(both from search_query))>0 + GROUP BY search_query, bucket HAVING count(search_query) > 3 OR sum(cnt) > 1; +NOTICE: refreshing continuous aggregate "search_query_count_1m" +--having has aggregates + grp by columns that appear in select +CREATE MATERIALIZED VIEW search_query_count_2 WITH (timescaledb.continuous, timescaledb.materialized_only=false) +AS + SELECT search_query,count(search_query) as count, sum(cnt), + time_bucket(INTERVAL '1 minute', time) AS bucket + FROM raw_data + WHERE search_query is not null AND LENGTH(TRIM(both from search_query))>0 + GROUP BY search_query, bucket +HAVING count(search_query) > 3 OR sum(cnt) > 1 OR + ( sum(cnt) + count(cnt)) > 1 + AND search_query = 'Q1'; +NOTICE: refreshing continuous aggregate "search_query_count_2" +CREATE MATERIALIZED VIEW search_query_count_3 WITH (timescaledb.continuous, timescaledb.materialized_only=false) +AS + SELECT search_query,count(search_query) as count, sum(cnt), + time_bucket(INTERVAL '1 minute', time) AS bucket + FROM raw_data + WHERE search_query is not null AND LENGTH(TRIM(both from search_query))>0 + GROUP BY cnt +cnt2 , bucket, search_query + HAVING cnt + cnt2 + sum(cnt) > 2 or count(cnt2) > 10; +NOTICE: refreshing continuous aggregate "search_query_count_3" +insert into raw_data select '2000-01-01 00:00+0','Q1', 1, 100; +insert into raw_data select '2000-01-01 00:00+0','Q1', 2, 200; +insert into raw_data select '2000-01-01 00:00+0','Q1', 3, 300; +insert into raw_data select '2000-01-02 00:00+0','Q2', 10, 10; +insert into raw_data select '2000-01-02 00:00+0','Q2', 20, 20; +CALL refresh_continuous_aggregate('search_query_count_1m', NULL, NULL); +SELECT * FROM search_query_count_1m ORDER BY 1, 2; + search_query | count | bucket +--------------+-------+------------------------------ + Q1 | 3 | Fri Dec 31 16:00:00 1999 PST + Q2 | 2 | Sat Jan 01 16:00:00 2000 PST +(2 rows) + +--only 1 of these should appear in the result +insert into raw_data select '2000-01-02 00:00+0','Q3', 0, 0; +insert into raw_data select '2000-01-03 00:00+0','Q4', 20, 20; +CALL refresh_continuous_aggregate('search_query_count_1m', NULL, NULL); +SELECT * FROM search_query_count_1m ORDER BY 1, 2; + search_query | count | bucket +--------------+-------+------------------------------ + Q1 | 3 | Fri Dec 31 16:00:00 1999 PST + Q2 | 2 | Sat Jan 01 16:00:00 2000 PST + Q4 | 1 | Sun Jan 02 16:00:00 2000 PST +(3 rows) + +--refresh search_query_count_2--- +CALL refresh_continuous_aggregate('search_query_count_2', NULL, NULL); +SELECT * FROM search_query_count_2 ORDER BY 1, 2; + search_query | count | sum | bucket +--------------+-------+-----+------------------------------ + Q1 | 3 | 6 | Fri Dec 31 16:00:00 1999 PST + Q2 | 2 | 30 | Sat Jan 01 16:00:00 2000 PST + Q4 | 1 | 20 | Sun Jan 02 16:00:00 2000 PST +(3 rows) + +--refresh search_query_count_3--- +CALL refresh_continuous_aggregate('search_query_count_3', NULL, NULL); +SELECT * FROM search_query_count_3 ORDER BY 1, 2, 3; + search_query | count | sum | bucket +--------------+-------+-----+------------------------------ + Q1 | 1 | 1 | Fri Dec 31 16:00:00 1999 PST + Q1 | 1 | 2 | Fri Dec 31 16:00:00 1999 PST + Q1 | 1 | 3 | Fri Dec 31 16:00:00 1999 PST + Q2 | 1 | 10 | Sat Jan 01 16:00:00 2000 PST + Q2 | 1 | 20 | Sat Jan 01 16:00:00 2000 PST + Q4 | 1 | 20 | Sun Jan 02 16:00:00 2000 PST +(6 rows) + +--- TEST enable compression on continuous aggregates +CREATE VIEW cagg_compression_status as +SELECT ca.mat_hypertable_id AS mat_htid, + ca.user_view_name AS cagg_name , + h.schema_name AS mat_schema_name, + h.table_name AS mat_table_name, + ca.materialized_only +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) +; +SELECT mat_htid AS "MAT_HTID" + , mat_schema_name || '.' || mat_table_name AS "MAT_HTNAME" + , mat_table_name AS "MAT_TABLE_NAME" +FROM cagg_compression_status +WHERE cagg_name = 'search_query_count_3' \gset +ALTER MATERIALIZED VIEW search_query_count_3 SET (timescaledb.compress = 'true'); +NOTICE: defaulting compress_segmentby to search_query +NOTICE: defaulting compress_orderby to bucket +SELECT cagg_name, mat_table_name +FROM cagg_compression_status where cagg_name = 'search_query_count_3'; + cagg_name | mat_table_name +----------------------+----------------------------- + search_query_count_3 | _materialized_hypertable_41 +(1 row) + +\x +SELECT * FROM timescaledb_information.compression_settings +WHERE hypertable_name = :'MAT_TABLE_NAME'; +-[ RECORD 1 ]----------+---------------------------- +hypertable_schema | _timescaledb_internal +hypertable_name | _materialized_hypertable_41 +attname | search_query +segmentby_column_index | 1 +orderby_column_index | +orderby_asc | +orderby_nullsfirst | +-[ RECORD 2 ]----------+---------------------------- +hypertable_schema | _timescaledb_internal +hypertable_name | _materialized_hypertable_41 +attname | bucket +segmentby_column_index | +orderby_column_index | 1 +orderby_asc | t +orderby_nullsfirst | f + +\x +SELECT compress_chunk(ch) +FROM show_chunks('search_query_count_3') ch; + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_41_79_chunk +(1 row) + +SELECT * from search_query_count_3 ORDER BY 1, 2, 3; + search_query | count | sum | bucket +--------------+-------+-----+------------------------------ + Q1 | 1 | 1 | Fri Dec 31 16:00:00 1999 PST + Q1 | 1 | 2 | Fri Dec 31 16:00:00 1999 PST + Q1 | 1 | 3 | Fri Dec 31 16:00:00 1999 PST + Q2 | 1 | 10 | Sat Jan 01 16:00:00 2000 PST + Q2 | 1 | 20 | Sat Jan 01 16:00:00 2000 PST + Q4 | 1 | 20 | Sun Jan 02 16:00:00 2000 PST +(6 rows) + +-- insert into a new region of the hypertable and then refresh the cagg +-- (note we still do not support refreshes into existing regions. +-- cagg chunks do not map 1-1 to hypertabl regions. They encompass +-- more data +-- ). +insert into raw_data select '2000-05-01 00:00+0','Q3', 0, 0; +-- On PG >= 14 the refresh test below will pass because we added support for UPDATE/DELETE on compressed chunks in PR #5339 +\set ON_ERROR_STOP 0 +CALL refresh_continuous_aggregate('search_query_count_3', NULL, '2000-06-01 00:00+0'::timestamptz); +CALL refresh_continuous_aggregate('search_query_count_3', '2000-05-01 00:00+0'::timestamptz, '2000-06-01 00:00+0'::timestamptz); +NOTICE: continuous aggregate "search_query_count_3" is already up-to-date +\set ON_ERROR_STOP 1 +--insert row +insert into raw_data select '2001-05-10 00:00+0','Q3', 100, 100; +--this should succeed since it does not refresh any compressed regions in the cagg +CALL refresh_continuous_aggregate('search_query_count_3', '2001-05-01 00:00+0'::timestamptz, '2001-06-01 00:00+0'::timestamptz); +--verify watermark and check that chunks are compressed +SELECT _timescaledb_functions.to_timestamp(w) FROM _timescaledb_functions.cagg_watermark(:'MAT_HTID') w; + to_timestamp +------------------------------ + Wed May 09 17:01:00 2001 PDT +(1 row) + +SELECT chunk_name, range_start, range_end, is_compressed +FROM timescaledb_information.chunks +WHERE hypertable_name = :'MAT_TABLE_NAME' +ORDER BY 1; + chunk_name | range_start | range_end | is_compressed +--------------------+------------------------------+------------------------------+--------------- + _hyper_41_79_chunk | Fri Dec 24 16:00:00 1999 PST | Mon May 22 17:00:00 2000 PDT | t + _hyper_41_83_chunk | Sun Mar 18 16:00:00 2001 PST | Wed Aug 15 17:00:00 2001 PDT | f +(2 rows) + +SELECT * FROM _timescaledb_catalog.continuous_aggs_materialization_invalidation_log +WHERE materialization_id = :'MAT_HTID' ORDER BY 1, 2,3; + materialization_id | lowest_modified_value | greatest_modified_value +--------------------+-----------------------+------------------------- + 41 | -9223372036854775808 | -210866803200000001 + 41 | 959817600000000 | 988675199999999 + 41 | 991353600000000 | 9223372036854775807 +(3 rows) + +SELECT * from search_query_count_3 +WHERE bucket > '2001-01-01' +ORDER BY 1, 2, 3; + search_query | count | sum | bucket +--------------+-------+-----+------------------------------ + Q3 | 1 | 100 | Wed May 09 17:00:00 2001 PDT +(1 row) + +--now disable compression , will error out -- +\set ON_ERROR_STOP 0 +ALTER MATERIALIZED VIEW search_query_count_3 SET (timescaledb.compress = 'false'); +ERROR: cannot change configuration on already compressed chunks +\set ON_ERROR_STOP 1 +SELECT decompress_chunk(schema_name || '.' || table_name) +FROM _timescaledb_catalog.chunk +WHERE hypertable_id = :'MAT_HTID' and status = 1; + decompress_chunk +------------------------------------------ + _timescaledb_internal._hyper_41_79_chunk +(1 row) + +--disable compression on cagg after decompressing all chunks-- +ALTER MATERIALIZED VIEW search_query_count_3 SET (timescaledb.compress = 'false'); +SELECT cagg_name, mat_table_name +FROM cagg_compression_status where cagg_name = 'search_query_count_3'; + cagg_name | mat_table_name +----------------------+----------------------------- + search_query_count_3 | _materialized_hypertable_41 +(1 row) + +SELECT view_name, materialized_only, compression_enabled +FROM timescaledb_information.continuous_aggregates +where view_name = 'search_query_count_3'; + view_name | materialized_only | compression_enabled +----------------------+-------------------+--------------------- + search_query_count_3 | f | f +(1 row) + +-- TEST caggs on table with more columns than in the cagg view defn -- +CREATE TABLE test_morecols ( time TIMESTAMPTZ NOT NULL, + val1 INTEGER, val2 INTEGER, val3 INTEGER, val4 INTEGER, + val5 INTEGER, val6 INTEGER, val7 INTEGER, val8 INTEGER); +SELECT create_hypertable('test_morecols', 'time', chunk_time_interval=> '7 days'::interval); + create_hypertable +----------------------------- + (43,public,test_morecols,t) +(1 row) + +INSERT INTO test_morecols +SELECT generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '1 day'), 55, 75, 40, 70, NULL, 100, 200, 200; +CREATE MATERIALIZED VIEW test_morecols_cagg with (timescaledb.continuous, timescaledb.materialized_only=false) +AS SELECT time_bucket('30 days',time), avg(val1), count(val2) + FROM test_morecols GROUP BY 1; +NOTICE: refreshing continuous aggregate "test_morecols_cagg" +ALTER MATERIALIZED VIEW test_morecols_cagg SET (timescaledb.compress='true'); +NOTICE: defaulting compress_orderby to time_bucket +SELECT compress_chunk(ch) FROM show_chunks('test_morecols_cagg') ch; + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_44_89_chunk +(1 row) + +SELECT * FROM test_morecols_cagg ORDER BY time_bucket; + time_bucket | avg | count +------------------------------+---------------------+------- + Fri Nov 23 16:00:00 2018 PST | 55.0000000000000000 | 23 + Sun Dec 23 16:00:00 2018 PST | 55.0000000000000000 | 8 +(2 rows) + +SELECT view_name, materialized_only, compression_enabled +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_morecols_cagg'; + view_name | materialized_only | compression_enabled +--------------------+-------------------+--------------------- + test_morecols_cagg | f | t +(1 row) + +--should keep compressed option, modify only materialized -- +ALTER MATERIALIZED VIEW test_morecols_cagg SET (timescaledb.materialized_only='true'); +SELECT view_name, materialized_only, compression_enabled +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_morecols_cagg'; + view_name | materialized_only | compression_enabled +--------------------+-------------------+--------------------- + test_morecols_cagg | t | t +(1 row) + +CREATE TABLE issue3248(filler_1 int, filler_2 int, filler_3 int, time timestamptz NOT NULL, device_id int, v0 int, v1 int, v2 float, v3 float); +CREATE INDEX ON issue3248(time DESC); +CREATE INDEX ON issue3248(device_id,time DESC); +SELECT create_hypertable('issue3248','time',create_default_indexes:=false); + create_hypertable +------------------------- + (46,public,issue3248,t) +(1 row) + +ALTER TABLE issue3248 DROP COLUMN filler_1; +INSERT INTO issue3248(time,device_id,v0,v1,v2,v3) +SELECT time, device_id, device_id+1, device_id + 2, device_id + 0.5, NULL +FROM generate_series('2000-01-01 0:00:00+0'::timestamptz,'2000-01-05 23:55:00+0','8h') gtime(time), + generate_series(1,5,1) gdevice(device_id); +ALTER TABLE issue3248 DROP COLUMN filler_2; +INSERT INTO issue3248(time,device_id,v0,v1,v2,v3) +SELECT time, device_id, device_id-1, device_id + 2, device_id + 0.5, NULL +FROM generate_series('2000-01-06 0:00:00+0'::timestamptz,'2000-01-12 23:55:00+0','8h') gtime(time), + generate_series(1,5,1) gdevice(device_id); +ALTER TABLE issue3248 DROP COLUMN filler_3; +INSERT INTO issue3248(time,device_id,v0,v1,v2,v3) +SELECT time, device_id, device_id, device_id + 2, device_id + 0.5, NULL +FROM generate_series('2000-01-13 0:00:00+0'::timestamptz,'2000-01-19 23:55:00+0','8h') gtime(time), + generate_series(1,5,1) gdevice(device_id); +ANALYZE issue3248; +CREATE materialized view issue3248_cagg WITH (timescaledb.continuous, timescaledb.materialized_only=false) +AS SELECT time_bucket('1h',time), device_id, min(v0), max(v1), avg(v2) +FROM issue3248 GROUP BY 1,2; +NOTICE: refreshing continuous aggregate "issue3248_cagg" +SELECT + FROM issue3248 AS m, + LATERAL(SELECT m FROM issue3248_cagg WHERE avg IS NULL LIMIT 1) AS lat; +-- +(0 rows) + +-- test that option create_group_indexes is taken into account +CREATE TABLE test_group_idx ( +time timestamptz, +symbol int, +value numeric +); +select create_hypertable('test_group_idx', 'time'); +NOTICE: adding not-null constraint to column "time" + create_hypertable +------------------------------ + (48,public,test_group_idx,t) +(1 row) + +insert into test_group_idx +select t, round(random()*10), random()*5 +from generate_series('2020-01-01', '2020-02-25', INTERVAL '12 hours') t; +create materialized view cagg_index_true +with (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.create_group_indexes=true) as +select + time_bucket('1 day', "time") as bucket, + sum(value), + symbol +from test_group_idx +group by bucket, symbol; +NOTICE: refreshing continuous aggregate "cagg_index_true" +create materialized view cagg_index_false +with (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.create_group_indexes=false) as +select + time_bucket('1 day', "time") as bucket, + sum(value), + symbol +from test_group_idx +group by bucket, symbol; +NOTICE: refreshing continuous aggregate "cagg_index_false" +create materialized view cagg_index_default +with (timescaledb.continuous, timescaledb.materialized_only=false) as +select + time_bucket('1 day', "time") as bucket, + sum(value), + symbol +from test_group_idx +group by bucket, symbol; +NOTICE: refreshing continuous aggregate "cagg_index_default" +-- see corresponding materialization_hypertables +select view_name, materialization_hypertable_name from timescaledb_information.continuous_aggregates ca +where view_name like 'cagg_index_%'; + view_name | materialization_hypertable_name +--------------------+--------------------------------- + cagg_index_default | _materialized_hypertable_51 + cagg_index_false | _materialized_hypertable_50 + cagg_index_true | _materialized_hypertable_49 +(3 rows) + +-- now make sure a group index has been created when explicitly asked for +\x on +select i.* +from pg_indexes i +join pg_class c + on schemaname = relnamespace::regnamespace::text + and tablename = relname +where tablename in (select materialization_hypertable_name from timescaledb_information.continuous_aggregates +where view_name like 'cagg_index_%') +order by tablename; +-[ RECORD 1 ]------------------------------------------------------------------------------------------------------------------------------------------------- +schemaname | _timescaledb_internal +tablename | _materialized_hypertable_49 +indexname | _materialized_hypertable_49_bucket_idx +tablespace | +indexdef | CREATE INDEX _materialized_hypertable_49_bucket_idx ON _timescaledb_internal._materialized_hypertable_49 USING btree (bucket DESC) +-[ RECORD 2 ]------------------------------------------------------------------------------------------------------------------------------------------------- +schemaname | _timescaledb_internal +tablename | _materialized_hypertable_49 +indexname | _materialized_hypertable_49_symbol_bucket_idx +tablespace | +indexdef | CREATE INDEX _materialized_hypertable_49_symbol_bucket_idx ON _timescaledb_internal._materialized_hypertable_49 USING btree (symbol, bucket DESC) +-[ RECORD 3 ]------------------------------------------------------------------------------------------------------------------------------------------------- +schemaname | _timescaledb_internal +tablename | _materialized_hypertable_50 +indexname | _materialized_hypertable_50_bucket_idx +tablespace | +indexdef | CREATE INDEX _materialized_hypertable_50_bucket_idx ON _timescaledb_internal._materialized_hypertable_50 USING btree (bucket DESC) +-[ RECORD 4 ]------------------------------------------------------------------------------------------------------------------------------------------------- +schemaname | _timescaledb_internal +tablename | _materialized_hypertable_51 +indexname | _materialized_hypertable_51_bucket_idx +tablespace | +indexdef | CREATE INDEX _materialized_hypertable_51_bucket_idx ON _timescaledb_internal._materialized_hypertable_51 USING btree (bucket DESC) +-[ RECORD 5 ]------------------------------------------------------------------------------------------------------------------------------------------------- +schemaname | _timescaledb_internal +tablename | _materialized_hypertable_51 +indexname | _materialized_hypertable_51_symbol_bucket_idx +tablespace | +indexdef | CREATE INDEX _materialized_hypertable_51_symbol_bucket_idx ON _timescaledb_internal._materialized_hypertable_51 USING btree (symbol, bucket DESC) + +\x off +-- +-- TESTs for removing old CAggs restrictions +-- +DROP TABLE conditions CASCADE; +NOTICE: drop cascades to 8 other objects +NOTICE: drop cascades to table _timescaledb_internal._hyper_29_67_chunk +NOTICE: drop cascades to table _timescaledb_internal._hyper_30_68_chunk +NOTICE: drop cascades to table _timescaledb_internal._hyper_31_69_chunk +CREATE TABLE conditions ( + timec TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL +); +SELECT create_hypertable('conditions', 'timec'); + create_hypertable +-------------------------- + (52,public,conditions,t) +(1 row) + +INSERT INTO conditions +VALUES + ('2010-01-01 09:00:00-08', 'SFO', 55, 45), + ('2010-01-02 09:00:00-08', 'por', 100, 100), + ('2010-01-02 09:00:00-08', 'NYC', 65, 45), + ('2010-01-02 09:00:00-08', 'SFO', 65, 45), + ('2010-01-03 09:00:00-08', 'NYC', 45, 55), + ('2010-01-05 09:00:00-08', 'SFO', 75, 100), + ('2018-11-01 09:00:00-08', 'NYC', 45, 35), + ('2018-11-02 09:00:00-08', 'NYC', 35, 15), + ('2018-11-03 09:00:00-08', 'NYC', 35, 25); +-- aggregate with DISTINCT +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false) +AS +SELECT + time_bucket('1week', timec), + COUNT(location), + SUM(DISTINCT temperature) +FROM conditions +GROUP BY time_bucket('1week', timec), location; +NOTICE: refreshing continuous aggregate "mat_m1" +SELECT * FROM mat_m1 ORDER BY 1, 2, 3; + time_bucket | count | sum +------------------------------+-------+----- + Sun Dec 27 16:00:00 2009 PST | 1 | 100 + Sun Dec 27 16:00:00 2009 PST | 2 | 110 + Sun Dec 27 16:00:00 2009 PST | 2 | 120 + Sun Jan 03 16:00:00 2010 PST | 1 | 75 + Sun Oct 28 17:00:00 2018 PDT | 3 | 80 +(5 rows) + +-- aggregate with FILTER +DROP MATERIALIZED VIEW mat_m1; +NOTICE: drop cascades to 2 other objects +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false) +AS +SELECT + time_bucket('1week', timec), + SUM(temperature) FILTER (WHERE humidity > 60) +FROM conditions +GROUP BY time_bucket('1week', timec), location; +NOTICE: refreshing continuous aggregate "mat_m1" +SELECT * FROM mat_m1 ORDER BY 1, 2; + time_bucket | sum +------------------------------+----- + Sun Dec 27 16:00:00 2009 PST | 100 + Sun Dec 27 16:00:00 2009 PST | + Sun Dec 27 16:00:00 2009 PST | + Sun Jan 03 16:00:00 2010 PST | 75 + Sun Oct 28 17:00:00 2018 PDT | +(5 rows) + +-- aggregate with filter in having clause +DROP MATERIALIZED VIEW mat_m1; +NOTICE: drop cascades to 2 other objects +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false) +AS +SELECT + time_bucket('1week', timec), + MAX(temperature) +FROM conditions +GROUP BY time_bucket('1week', timec), location +HAVING SUM(temperature) FILTER (WHERE humidity > 40) > 50; +NOTICE: refreshing continuous aggregate "mat_m1" +SELECT * FROM mat_m1 ORDER BY 1, 2; + time_bucket | max +------------------------------+----- + Sun Dec 27 16:00:00 2009 PST | 65 + Sun Dec 27 16:00:00 2009 PST | 65 + Sun Dec 27 16:00:00 2009 PST | 100 + Sun Jan 03 16:00:00 2010 PST | 75 +(4 rows) + +-- ordered set aggr +DROP MATERIALIZED VIEW mat_m1; +NOTICE: drop cascades to table _timescaledb_internal._hyper_55_116_chunk +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false) +AS +SELECT + time_bucket('1week', timec), + MODE() WITHIN GROUP(ORDER BY humidity) +FROM conditions +GROUP BY time_bucket('1week', timec); +NOTICE: refreshing continuous aggregate "mat_m1" +SELECT * FROM mat_m1 ORDER BY 1; + time_bucket | mode +------------------------------+------ + Sun Dec 27 16:00:00 2009 PST | 45 + Sun Jan 03 16:00:00 2010 PST | 100 + Sun Oct 28 17:00:00 2018 PDT | 15 +(3 rows) + +-- hypothetical-set aggr +DROP MATERIALIZED VIEW mat_m1; +NOTICE: drop cascades to 2 other objects +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false) +AS +SELECT + time_bucket('1week', timec), + RANK(60) WITHIN GROUP (ORDER BY humidity), + DENSE_RANK(60) WITHIN GROUP (ORDER BY humidity), + PERCENT_RANK(60) WITHIN GROUP (ORDER BY humidity) +FROM conditions +GROUP BY time_bucket('1week', timec); +NOTICE: refreshing continuous aggregate "mat_m1" +SELECT * FROM mat_m1 ORDER BY 1; + time_bucket | rank | dense_rank | percent_rank +------------------------------+------+------------+-------------- + Sun Dec 27 16:00:00 2009 PST | 5 | 3 | 0.8 + Sun Jan 03 16:00:00 2010 PST | 1 | 1 | 0 + Sun Oct 28 17:00:00 2018 PDT | 4 | 4 | 1 +(3 rows) + +-- userdefined aggregate without combine function +DROP MATERIALIZED VIEW mat_m1; +NOTICE: drop cascades to 2 other objects +CREATE AGGREGATE newavg ( + sfunc = int4_avg_accum, + basetype = int4, + stype = _int8, + finalfunc = int8_avg, + initcond1 = '{0,0}' +); +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false) +AS +SELECT + SUM(humidity), + round(newavg(temperature::int4)) +FROM conditions +GROUP BY time_bucket('1week', timec), location; +NOTICE: refreshing continuous aggregate "mat_m1" +SELECT * FROM mat_m1 ORDER BY 1, 2; + sum | round +-----+------- + 75 | 38 + 90 | 60 + 100 | 55 + 100 | 75 + 100 | 100 +(5 rows) + +-- ORDER BY in the view definition +DROP MATERIALIZED VIEW mat_m1; +NOTICE: drop cascades to 2 other objects +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false) +AS +SELECT + time_bucket('1week', timec), + COUNT(location), + SUM(temperature) +FROM conditions +GROUP BY time_bucket('1week', timec) +ORDER BY sum DESC; +NOTICE: refreshing continuous aggregate "mat_m1" +-- CAgg definition for realtime +SELECT pg_get_viewdef('mat_m1',true); + pg_get_viewdef +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + ( SELECT _materialized_hypertable_59.time_bucket, + + _materialized_hypertable_59.count, + + _materialized_hypertable_59.sum + + FROM _timescaledb_internal._materialized_hypertable_59 + + WHERE _materialized_hypertable_59.time_bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(59)), '-infinity'::timestamp with time zone)+ + ORDER BY _materialized_hypertable_59.sum DESC) + + UNION ALL + + ( SELECT time_bucket('@ 7 days'::interval, conditions.timec) AS time_bucket, + + count(conditions.location) AS count, + + sum(conditions.temperature) AS sum + + FROM conditions + + WHERE conditions.timec >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(59)), '-infinity'::timestamp with time zone) + + GROUP BY (time_bucket('@ 7 days'::interval, conditions.timec)) + + ORDER BY (sum(conditions.temperature)) DESC) + + ORDER BY 3 DESC; +(1 row) + +-- Ordered result +SELECT * FROM mat_m1; + time_bucket | count | sum +------------------------------+-------+----- + Sun Dec 27 16:00:00 2009 PST | 5 | 330 + Sun Oct 28 17:00:00 2018 PDT | 3 | 115 + Sun Jan 03 16:00:00 2010 PST | 1 | 75 +(3 rows) + +-- Insert new data and query again to make sure we produce ordered data +INSERT INTO conditions VALUES ('2018-11-10 09:00:00-08', 'SFO', 10, 10); +SELECT * FROM mat_m1; + time_bucket | count | sum +------------------------------+-------+----- + Sun Dec 27 16:00:00 2009 PST | 5 | 330 + Sun Oct 28 17:00:00 2018 PDT | 3 | 115 + Sun Jan 03 16:00:00 2010 PST | 1 | 75 + Sun Nov 04 16:00:00 2018 PST | 1 | 10 +(4 rows) + +-- This new row will change the order again +INSERT INTO conditions VALUES ('2018-11-11 09:00:00-08', 'SFO', 400, 400); +SELECT * FROM mat_m1; + time_bucket | count | sum +------------------------------+-------+----- + Sun Nov 04 16:00:00 2018 PST | 2 | 410 + Sun Dec 27 16:00:00 2009 PST | 5 | 330 + Sun Oct 28 17:00:00 2018 PDT | 3 | 115 + Sun Jan 03 16:00:00 2010 PST | 1 | 75 +(4 rows) + +-- Merge Append +EXPLAIN (COSTS OFF) SELECT * FROM mat_m1; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Append + Sort Key: _materialized_hypertable_59.sum DESC + -> Custom Scan (ConstraintAwareAppend) + Hypertable: _materialized_hypertable_59 + Chunks excluded during startup: 0 + -> Merge Append + Sort Key: _hyper_59_123_chunk.sum DESC + -> Index Scan Backward using _hyper_59_123_chunk__materialized_hypertable_59_sum_time_bucket on _hyper_59_123_chunk + Index Cond: (time_bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(59)), '-infinity'::timestamp with time zone)) + -> Index Scan Backward using _hyper_59_124_chunk__materialized_hypertable_59_sum_time_bucket on _hyper_59_124_chunk + Index Cond: (time_bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(59)), '-infinity'::timestamp with time zone)) + -> Sort + Sort Key: (sum(conditions.temperature)) DESC + -> HashAggregate + Group Key: time_bucket('@ 7 days'::interval, conditions.timec) + -> Result + -> Custom Scan (ChunkAppend) on conditions + Chunks excluded during startup: 1 + -> Index Scan Backward using _hyper_52_111_chunk_conditions_timec_idx on _hyper_52_111_chunk + Index Cond: (timec >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(59)), '-infinity'::timestamp with time zone)) + -> Index Scan Backward using _hyper_52_125_chunk_conditions_timec_idx on _hyper_52_125_chunk + Index Cond: (timec >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(59)), '-infinity'::timestamp with time zone)) +(22 rows) + +-- Ordering by another column +SELECT * FROM mat_m1 ORDER BY count; + time_bucket | count | sum +------------------------------+-------+----- + Sun Jan 03 16:00:00 2010 PST | 1 | 75 + Sun Nov 04 16:00:00 2018 PST | 2 | 410 + Sun Oct 28 17:00:00 2018 PDT | 3 | 115 + Sun Dec 27 16:00:00 2009 PST | 5 | 330 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM mat_m1 ORDER BY count; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: _materialized_hypertable_59.count + -> Merge Append + Sort Key: _materialized_hypertable_59.sum DESC + -> Custom Scan (ConstraintAwareAppend) + Hypertable: _materialized_hypertable_59 + Chunks excluded during startup: 0 + -> Merge Append + Sort Key: _hyper_59_123_chunk.sum DESC + -> Index Scan Backward using _hyper_59_123_chunk__materialized_hypertable_59_sum_time_bucket on _hyper_59_123_chunk + Index Cond: (time_bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(59)), '-infinity'::timestamp with time zone)) + -> Index Scan Backward using _hyper_59_124_chunk__materialized_hypertable_59_sum_time_bucket on _hyper_59_124_chunk + Index Cond: (time_bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(59)), '-infinity'::timestamp with time zone)) + -> Sort + Sort Key: (sum(conditions.temperature)) DESC + -> HashAggregate + Group Key: time_bucket('@ 7 days'::interval, conditions.timec) + -> Result + -> Custom Scan (ChunkAppend) on conditions + Chunks excluded during startup: 1 + -> Index Scan Backward using _hyper_52_111_chunk_conditions_timec_idx on _hyper_52_111_chunk + Index Cond: (timec >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(59)), '-infinity'::timestamp with time zone)) + -> Index Scan Backward using _hyper_52_125_chunk_conditions_timec_idx on _hyper_52_125_chunk + Index Cond: (timec >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(59)), '-infinity'::timestamp with time zone)) +(24 rows) + +-- Change the type of cagg +ALTER MATERIALIZED VIEW mat_m1 SET (timescaledb.materialized_only=true); +-- CAgg definition for materialized only +SELECT pg_get_viewdef('mat_m1',true); + pg_get_viewdef +----------------------------------------------------------- + SELECT time_bucket, + + count, + + sum + + FROM _timescaledb_internal._materialized_hypertable_59+ + ORDER BY sum DESC; +(1 row) + +-- Now the query will show only the materialized data, without last two +-- records inserted into the original hypertable (last two insers above) +SELECT * FROM mat_m1; + time_bucket | count | sum +------------------------------+-------+----- + Sun Dec 27 16:00:00 2009 PST | 5 | 330 + Sun Oct 28 17:00:00 2018 PDT | 3 | 115 + Sun Jan 03 16:00:00 2010 PST | 1 | 75 +(3 rows) + +-- Merge Append +EXPLAIN (COSTS OFF) SELECT * FROM mat_m1; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------ + Merge Append + Sort Key: _hyper_59_123_chunk.sum DESC + -> Index Scan Backward using _hyper_59_123_chunk__materialized_hypertable_59_sum_time_bucket on _hyper_59_123_chunk + -> Index Scan Backward using _hyper_59_124_chunk__materialized_hypertable_59_sum_time_bucket on _hyper_59_124_chunk +(4 rows) + +-- Ordering by another column +SELECT * FROM mat_m1 ORDER BY count; + time_bucket | count | sum +------------------------------+-------+----- + Sun Jan 03 16:00:00 2010 PST | 1 | 75 + Sun Oct 28 17:00:00 2018 PDT | 3 | 115 + Sun Dec 27 16:00:00 2009 PST | 5 | 330 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM mat_m1 ORDER BY count; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------ + Sort + Sort Key: _hyper_59_123_chunk.count + -> Merge Append + Sort Key: _hyper_59_123_chunk.sum DESC + -> Index Scan Backward using _hyper_59_123_chunk__materialized_hypertable_59_sum_time_bucket on _hyper_59_123_chunk + -> Index Scan Backward using _hyper_59_124_chunk__materialized_hypertable_59_sum_time_bucket on _hyper_59_124_chunk +(6 rows) + +SELECT h.schema_name AS "MAT_SCHEMA_NAME", + h.table_name AS "MAT_TABLE_NAME" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'mat_m1' +\gset +-- Invalidate old region and refresh again +DELETE FROM conditions WHERE timec < '2010-01-05 09:00:00-08'; +CALL refresh_continuous_aggregate('mat_m1', NULL, NULL); +-- Querying the cagg produce ordered records as expected +SELECT * FROM mat_m1; + time_bucket | count | sum +------------------------------+-------+----- + Sun Nov 04 16:00:00 2018 PST | 2 | 410 + Sun Oct 28 17:00:00 2018 PDT | 3 | 115 + Sun Jan 03 16:00:00 2010 PST | 1 | 75 +(3 rows) + +-- Querying direct the materialization hypertable doesn't +-- produce ordered records +SELECT * FROM :"MAT_SCHEMA_NAME".:"MAT_TABLE_NAME"; + time_bucket | count | sum +------------------------------+-------+----- + Sun Jan 03 16:00:00 2010 PST | 1 | 75 + Sun Oct 28 17:00:00 2018 PDT | 3 | 115 + Sun Nov 04 16:00:00 2018 PST | 2 | 410 +(3 rows) + +-- +-- Testing the coexistence of both types of supported CAggs +-- over the same raw hypertable. +-- +-- . finalized = true: without chunk_id and partials +-- . finalized = false: with chunk_id and partials +-- +DROP TABLE conditions CASCADE; +NOTICE: drop cascades to 2 other objects +NOTICE: drop cascades to 2 other objects +CREATE TABLE conditions ( + timec TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL +); +SELECT table_name FROM create_hypertable('conditions', 'timec'); + table_name +------------ + conditions +(1 row) + +INSERT INTO conditions VALUES + ('2010-01-01 09:00:00-08', 'SFO', 55, 45), + ('2010-01-02 09:00:00-08', 'por', 100, 100), + ('2010-01-02 09:00:00-08', 'SFO', 65, 45), + ('2010-01-02 09:00:00-08', 'NYC', 65, 45), + ('2018-11-01 09:00:00-08', 'NYC', 45, 35), + ('2018-11-02 09:00:00-08', 'NYC', 35, 15); +CREATE MATERIALIZED VIEW conditions_summary_new(timec, minl, sumt, sumh) +WITH (timescaledb.continuous, timescaledb.materialized_only=true) +AS +SELECT time_bucket('1day', timec), min(location), sum(temperature), sum(humidity) +FROM conditions +GROUP BY time_bucket('1day', timec) WITH NO DATA; +CREATE MATERIALIZED VIEW conditions_summary_old(timec, minl, sumt, sumh) +WITH (timescaledb.continuous, timescaledb.materialized_only=true, timescaledb.finalized=false) +AS +SELECT time_bucket('1day', timec), min(location), sum(temperature), sum(humidity) +FROM conditions +GROUP BY time_bucket('1day', timec) WITH NO DATA; +\x ON +SELECT * +FROM timescaledb_information.continuous_aggregates +WHERE view_name IN ('conditions_summary_new', 'conditions_summary_old'); +-[ RECORD 1 ]---------------------+---------------------------------------------------------- +hypertable_schema | public +hypertable_name | conditions +view_schema | public +view_name | conditions_summary_new +view_owner | default_perm_user +materialized_only | t +compression_enabled | f +materialization_hypertable_schema | _timescaledb_internal +materialization_hypertable_name | _materialized_hypertable_61 +view_definition | SELECT time_bucket('@ 1 day'::interval, timec) AS timec,+ + | min(location) AS minl, + + | sum(temperature) AS sumt, + + | sum(humidity) AS sumh + + | FROM conditions + + | GROUP BY (time_bucket('@ 1 day'::interval, timec)); +finalized | t +-[ RECORD 2 ]---------------------+---------------------------------------------------------- +hypertable_schema | public +hypertable_name | conditions +view_schema | public +view_name | conditions_summary_old +view_owner | default_perm_user +materialized_only | t +compression_enabled | f +materialization_hypertable_schema | _timescaledb_internal +materialization_hypertable_name | _materialized_hypertable_62 +view_definition | SELECT time_bucket('@ 1 day'::interval, timec) AS timec,+ + | min(location) AS minl, + + | sum(temperature) AS sumt, + + | sum(humidity) AS sumh + + | FROM conditions + + | GROUP BY (time_bucket('@ 1 day'::interval, timec)); +finalized | f + +\x OFF +CALL refresh_continuous_aggregate('conditions_summary_new', NULL, NULL); +CALL refresh_continuous_aggregate('conditions_summary_old', NULL, NULL); +-- Check and compare number of returned rows +SELECT count(*) FROM conditions_summary_new +UNION +SELECT count(*) FROM conditions_summary_old; + count +------- + 4 +(1 row) + +-- Should return 4 rows that is the same number of rows above +SELECT * +FROM conditions_summary_new +NATURAL JOIN conditions_summary_old +ORDER BY timec; + timec | minl | sumt | sumh +------------------------------+------+------+------ + Thu Dec 31 16:00:00 2009 PST | SFO | 55 | 45 + Fri Jan 01 16:00:00 2010 PST | NYC | 230 | 190 + Wed Oct 31 17:00:00 2018 PDT | NYC | 45 | 35 + Thu Nov 01 17:00:00 2018 PDT | NYC | 35 | 15 +(4 rows) + +-- Parallel planning test for realtime Continuous Aggregate +DROP TABLE conditions CASCADE; +NOTICE: drop cascades to 4 other objects +NOTICE: drop cascades to 2 other objects +NOTICE: drop cascades to 2 other objects +CREATE TABLE conditions ( + timec TIMESTAMPTZ NOT NULL, + temperature DOUBLE PRECISION NULL +); +SELECT table_name FROM create_hypertable('conditions', 'timec'); + table_name +------------ + conditions +(1 row) + +INSERT INTO conditions +SELECT t, 10 FROM generate_series('2023-01-01 00:00-03'::timestamptz, '2023-12-31 23:59-03'::timestamptz, '1 hour'::interval) AS t; +CREATE MATERIALIZED VIEW conditions_daily WITH (timescaledb.continuous, timescaledb.materialized_only=false) +AS +SELECT + time_bucket('1 day', timec), + SUM(temperature) +FROM conditions +GROUP BY 1 +ORDER BY 2 DESC; +NOTICE: refreshing continuous aggregate "conditions_daily" +SELECT set_config(CASE WHEN current_setting('server_version_num')::int < 160000 THEN 'force_parallel_mode' ELSE 'debug_parallel_query' END,'on', false); + set_config +------------ + on +(1 row) + +SET max_parallel_workers_per_gather = 4; +SET parallel_setup_cost = 0; +SET parallel_tuple_cost = 0; +-- Parallel planning +EXPLAIN (COSTS OFF, TIMING OFF) SELECT * FROM conditions_daily WHERE time_bucket >= '2023-07-01'; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Merge Append + Sort Key: _materialized_hypertable_64.sum DESC + -> Sort + Sort Key: _materialized_hypertable_64.sum DESC + -> Custom Scan (ChunkAppend) on _materialized_hypertable_64 + Chunks excluded during startup: 0 + -> Index Scan using _hyper_64_185_chunk__materialized_hypertable_64_time_bucket_idx on _hyper_64_185_chunk + Index Cond: ((time_bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(64)), '-infinity'::timestamp with time zone)) AND (time_bucket >= 'Sat Jul 01 00:00:00 2023 PDT'::timestamp with time zone)) + -> Index Scan using _hyper_64_189_chunk__materialized_hypertable_64_time_bucket_idx on _hyper_64_189_chunk + Index Cond: ((time_bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(64)), '-infinity'::timestamp with time zone)) AND (time_bucket >= 'Sat Jul 01 00:00:00 2023 PDT'::timestamp with time zone)) + -> Index Scan using _hyper_64_190_chunk__materialized_hypertable_64_time_bucket_idx on _hyper_64_190_chunk + Index Cond: ((time_bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(64)), '-infinity'::timestamp with time zone)) AND (time_bucket >= 'Sat Jul 01 00:00:00 2023 PDT'::timestamp with time zone)) + -> Sort + Sort Key: (sum(conditions.temperature)) DESC + -> HashAggregate + Group Key: time_bucket('@ 1 day'::interval, conditions.timec) + -> Result + -> Custom Scan (ChunkAppend) on conditions + Chunks excluded during startup: 26 + -> Index Scan Backward using _hyper_63_184_chunk_conditions_timec_idx on _hyper_63_184_chunk + Index Cond: ((timec >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(64)), '-infinity'::timestamp with time zone)) AND (timec >= 'Sat Jul 01 00:00:00 2023 PDT'::timestamp with time zone)) + Filter: (time_bucket('@ 1 day'::interval, timec) >= 'Sat Jul 01 00:00:00 2023 PDT'::timestamp with time zone) +(22 rows) + diff --git a/tsl/test/expected/continuous_aggs_deprecated-16.out b/tsl/test/expected/continuous_aggs_deprecated-16.out new file mode 100644 index 00000000000..dc5066a1c6c --- /dev/null +++ b/tsl/test/expected/continuous_aggs_deprecated-16.out @@ -0,0 +1,1992 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- initialize the bgw mock state to prevent the materialization workers from running +\c :TEST_DBNAME :ROLE_SUPERUSER +CREATE OR REPLACE FUNCTION ts_bgw_params_create() RETURNS VOID +AS :MODULE_PATHNAME LANGUAGE C VOLATILE; +CREATE OR REPLACE FUNCTION test.continuous_aggs_find_view(cagg REGCLASS) RETURNS VOID +AS :TSL_MODULE_PATHNAME, 'ts_test_continuous_agg_find_by_view_name' LANGUAGE C; +\set WAIT_ON_JOB 0 +\set IMMEDIATELY_SET_UNTIL 1 +\set WAIT_FOR_OTHER_TO_ADVANCE 2 +-- remove any default jobs, e.g., telemetry so bgw_job isn't polluted +DELETE FROM _timescaledb_config.bgw_job; +SET ROLE :ROLE_DEFAULT_PERM_USER; +SELECT * FROM _timescaledb_config.bgw_job; + id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone +----+------------------+-------------------+-------------+-------------+--------------+-------------+-----------+-------+-----------+----------------+---------------+---------------+--------+--------------+------------+---------- +(0 rows) + +--TEST1 --- +--basic test with count +create table foo (a integer, b integer, c integer); +select table_name from create_hypertable('foo', 'a', chunk_time_interval=> 10); +NOTICE: adding not-null constraint to column "a" + table_name +------------ + foo +(1 row) + +insert into foo values( 3 , 16 , 20); +insert into foo values( 1 , 10 , 20); +insert into foo values( 1 , 11 , 20); +insert into foo values( 1 , 12 , 20); +insert into foo values( 1 , 13 , 20); +insert into foo values( 1 , 14 , 20); +insert into foo values( 2 , 14 , 20); +insert into foo values( 2 , 15 , 20); +insert into foo values( 2 , 16 , 20); +CREATE OR REPLACE FUNCTION integer_now_foo() returns int LANGUAGE SQL STABLE as $$ SELECT coalesce(max(a), 0) FROM foo $$; +SELECT set_integer_now_func('foo', 'integer_now_foo'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE MATERIALIZED VIEW mat_m1(a, countb) +WITH (timescaledb.continuous, timescaledb.materialized_only=true, timescaledb.finalized=false) +as +select a, count(b) +from foo +group by time_bucket(1, a), a WITH NO DATA; +SELECT add_continuous_aggregate_policy('mat_m1', NULL, 2::integer, '12 h'::interval) AS job_id +\gset +SELECT * FROM _timescaledb_config.bgw_job; + id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone +------+--------------------------------------------+-------------------+-------------+-------------+--------------+------------------------+-------------------------------------+-------------------+-----------+----------------+---------------+---------------+-----------------------------------------------------------------+------------------------+-------------------------------------------+---------- + 1000 | Refresh Continuous Aggregate Policy [1000] | @ 12 hours | @ 0 | -1 | @ 12 hours | _timescaledb_functions | policy_refresh_continuous_aggregate | default_perm_user | t | f | | 2 | {"end_offset": 2, "start_offset": null, "mat_hypertable_id": 2} | _timescaledb_functions | policy_refresh_continuous_aggregate_check | +(1 row) + +SELECT ca.raw_hypertable_id as "RAW_HYPERTABLE_ID", + h.schema_name AS "MAT_SCHEMA_NAME", + h.table_name AS "MAT_TABLE_NAME", + partial_view_name as "PART_VIEW_NAME", + partial_view_schema as "PART_VIEW_SCHEMA" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'mat_m1' +\gset +insert into :"MAT_SCHEMA_NAME".:"MAT_TABLE_NAME" +select a, _timescaledb_functions.partialize_agg(count(b)), +time_bucket(1, a) +,1 +from foo +group by time_bucket(1, a) , a ; +select * from mat_m1 order by a ; + a | countb +---+-------- + 1 | 5 + 2 | 3 + 3 | 1 +(3 rows) + +--check triggers on user hypertable -- +SET ROLE :ROLE_SUPERUSER; +select tgname, tgtype, tgenabled , relname from pg_trigger, pg_class +where tgrelid = pg_class.oid and pg_class.relname like 'foo' +order by tgname; + tgname | tgtype | tgenabled | relname +------------------------------+--------+-----------+--------- + ts_cagg_invalidation_trigger | 29 | O | foo + ts_insert_blocker | 7 | O | foo +(2 rows) + +SET ROLE :ROLE_DEFAULT_PERM_USER; +-- TEST2 --- +DROP MATERIALIZED VIEW mat_m1; +NOTICE: drop cascades to table _timescaledb_internal._hyper_2_2_chunk +SHOW enable_partitionwise_aggregate; + enable_partitionwise_aggregate +-------------------------------- + off +(1 row) + +SET enable_partitionwise_aggregate = on; +SELECT * FROM _timescaledb_config.bgw_job; + id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone +----+------------------+-------------------+-------------+-------------+--------------+-------------+-----------+-------+-----------+----------------+---------------+---------------+--------+--------------+------------+---------- +(0 rows) + +CREATE TABLE conditions ( + timec TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL + ); +select table_name from create_hypertable( 'conditions', 'timec'); + table_name +------------ + conditions +(1 row) + +insert into conditions values ( '2010-01-01 09:00:00-08', 'SFO', 55, 45); +insert into conditions values ( '2010-01-02 09:00:00-08', 'por', 100, 100); +insert into conditions values ( '2010-01-02 09:00:00-08', 'SFO', 65, 45); +insert into conditions values ( '2010-01-02 09:00:00-08', 'NYC', 65, 45); +insert into conditions values ( '2018-11-01 09:00:00-08', 'NYC', 45, 35); +insert into conditions values ( '2018-11-02 09:00:00-08', 'NYC', 35, 15); +CREATE MATERIALIZED VIEW mat_m1( timec, minl, sumt , sumh) +WITH (timescaledb.continuous, timescaledb.materialized_only=true, timescaledb.finalized=false) +as +select time_bucket('1day', timec), min(location), sum(temperature),sum(humidity) +from conditions +group by time_bucket('1day', timec) WITH NO DATA; +SELECT ca.raw_hypertable_id as "RAW_HYPERTABLE_ID", + h.schema_name AS "MAT_SCHEMA_NAME", + h.table_name AS "MAT_TABLE_NAME", + partial_view_name as "PART_VIEW_NAME", + partial_view_schema as "PART_VIEW_SCHEMA" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'mat_m1' +\gset +-- Materialized hypertable for mat_m1 should not be visible in the +-- hypertables view: +SELECT hypertable_schema, hypertable_name +FROM timescaledb_information.hypertables ORDER BY 1,2; + hypertable_schema | hypertable_name +-------------------+----------------- + public | conditions + public | foo +(2 rows) + +SET ROLE :ROLE_SUPERUSER; +insert into :"MAT_SCHEMA_NAME".:"MAT_TABLE_NAME" +select + time_bucket('1day', timec), _timescaledb_functions.partialize_agg( min(location)), _timescaledb_functions.partialize_agg( sum(temperature)) , _timescaledb_functions.partialize_agg( sum(humidity)) +,1 +from conditions +group by time_bucket('1day', timec) ; +SET ROLE :ROLE_DEFAULT_PERM_USER; +--should have same results -- +select timec, minl, sumt, sumh +from mat_m1 +order by timec; + timec | minl | sumt | sumh +------------------------------+------+------+------ + Thu Dec 31 16:00:00 2009 PST | SFO | 55 | 45 + Fri Jan 01 16:00:00 2010 PST | NYC | 230 | 190 + Wed Oct 31 17:00:00 2018 PDT | NYC | 45 | 35 + Thu Nov 01 17:00:00 2018 PDT | NYC | 35 | 15 +(4 rows) + +select time_bucket('1day', timec), min(location), sum(temperature), sum(humidity) +from conditions +group by time_bucket('1day', timec) +order by 1; + time_bucket | min | sum | sum +------------------------------+-----+-----+----- + Thu Dec 31 16:00:00 2009 PST | SFO | 55 | 45 + Fri Jan 01 16:00:00 2010 PST | NYC | 230 | 190 + Wed Oct 31 17:00:00 2018 PDT | NYC | 45 | 35 + Thu Nov 01 17:00:00 2018 PDT | NYC | 35 | 15 +(4 rows) + +SET enable_partitionwise_aggregate = off; +-- TEST3 -- +-- drop on table conditions should cascade to materialized mat_v1 +drop table conditions cascade; +NOTICE: drop cascades to 2 other objects +NOTICE: drop cascades to 2 other objects +CREATE TABLE conditions ( + timec TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL + ); +select table_name from create_hypertable( 'conditions', 'timec'); + table_name +------------ + conditions +(1 row) + +insert into conditions values ( '2010-01-01 09:00:00-08', 'SFO', 55, 45); +insert into conditions values ( '2010-01-02 09:00:00-08', 'por', 100, 100); +insert into conditions values ( '2010-01-02 09:00:00-08', 'NYC', 65, 45); +insert into conditions values ( '2010-01-02 09:00:00-08', 'SFO', 65, 45); +insert into conditions values ( '2010-01-03 09:00:00-08', 'NYC', 45, 55); +insert into conditions values ( '2010-01-05 09:00:00-08', 'SFO', 75, 100); +insert into conditions values ( '2018-11-01 09:00:00-08', 'NYC', 45, 35); +insert into conditions values ( '2018-11-02 09:00:00-08', 'NYC', 35, 15); +insert into conditions values ( '2018-11-03 09:00:00-08', 'NYC', 35, 25); +CREATE MATERIALIZED VIEW mat_m1( timec, minl, sumth, stddevh) +WITH (timescaledb.continuous, timescaledb.materialized_only=true, timescaledb.finalized=false) +as +select time_bucket('1week', timec) , +min(location), sum(temperature)+sum(humidity), stddev(humidity) +from conditions +group by time_bucket('1week', timec) WITH NO DATA; +SELECT ca.raw_hypertable_id as "RAW_HYPERTABLE_ID", + h.schema_name AS "MAT_SCHEMA_NAME", + h.table_name AS "MAT_TABLE_NAME", + partial_view_name as "PART_VIEW_NAME", + partial_view_schema as "PART_VIEW_SCHEMA" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'mat_m1' +\gset +SET ROLE :ROLE_SUPERUSER; +insert into :"MAT_SCHEMA_NAME".:"MAT_TABLE_NAME" +select + time_bucket('1week', timec), _timescaledb_functions.partialize_agg( min(location)), _timescaledb_functions.partialize_agg( sum(temperature)) , _timescaledb_functions.partialize_agg( sum(humidity)), _timescaledb_functions.partialize_agg(stddev(humidity)) +,1 +from conditions +group by time_bucket('1week', timec) ; +SET ROLE :ROLE_DEFAULT_PERM_USER; +--should have same results -- +select timec, minl, sumth, stddevh +from mat_m1 +order by timec; + timec | minl | sumth | stddevh +------------------------------+------+-------+------------------ + Sun Dec 27 16:00:00 2009 PST | NYC | 620 | 23.8746727726266 + Sun Jan 03 16:00:00 2010 PST | SFO | 175 | + Sun Oct 28 17:00:00 2018 PDT | NYC | 190 | 10 +(3 rows) + +select time_bucket('1week', timec) , +min(location), sum(temperature)+ sum(humidity), stddev(humidity) +from conditions +group by time_bucket('1week', timec) +order by time_bucket('1week', timec); + time_bucket | min | ?column? | stddev +------------------------------+-----+----------+------------------ + Sun Dec 27 16:00:00 2009 PST | NYC | 620 | 23.8746727726266 + Sun Jan 03 16:00:00 2010 PST | SFO | 175 | + Sun Oct 28 17:00:00 2018 PDT | NYC | 190 | 10 +(3 rows) + +-- TEST4 -- +--materialized view with group by clause + expression in SELECT +-- use previous data from conditions +--drop only the view. +-- apply where clause on result of mat_m1 -- +DROP MATERIALIZED VIEW mat_m1; +NOTICE: drop cascades to 2 other objects +CREATE MATERIALIZED VIEW mat_m1( timec, minl, sumth, stddevh) +WITH (timescaledb.continuous, timescaledb.materialized_only=true, timescaledb.finalized=false) +as +select time_bucket('1week', timec) , +min(location), sum(temperature)+sum(humidity), stddev(humidity) +from conditions +where location = 'NYC' +group by time_bucket('1week', timec) + WITH NO DATA; +SELECT ca.raw_hypertable_id as "RAW_HYPERTABLE_ID", + h.schema_name AS "MAT_SCHEMA_NAME", + h.table_name AS "MAT_TABLE_NAME", + partial_view_name as "PART_VIEW_NAME", + partial_view_schema as "PART_VIEW_SCHEMA" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'mat_m1' +\gset +SET ROLE :ROLE_SUPERUSER; +insert into :"MAT_SCHEMA_NAME".:"MAT_TABLE_NAME" +select + time_bucket('1week', timec), _timescaledb_functions.partialize_agg( min(location)), _timescaledb_functions.partialize_agg( sum(temperature)) , _timescaledb_functions.partialize_agg( sum(humidity)), _timescaledb_functions.partialize_agg(stddev(humidity)) +,1 +from conditions +where location = 'NYC' +group by time_bucket('1week', timec) ; +SET ROLE :ROLE_DEFAULT_PERM_USER; +--should have same results -- +select timec, minl, sumth, stddevh +from mat_m1 +where stddevh is not null +order by timec; + timec | minl | sumth | stddevh +------------------------------+------+-------+------------------ + Sun Dec 27 16:00:00 2009 PST | NYC | 210 | 7.07106781186548 + Sun Oct 28 17:00:00 2018 PDT | NYC | 190 | 10 +(2 rows) + +select time_bucket('1week', timec) , +min(location), sum(temperature)+ sum(humidity), stddev(humidity) +from conditions +where location = 'NYC' +group by time_bucket('1week', timec) +order by time_bucket('1week', timec); + time_bucket | min | ?column? | stddev +------------------------------+-----+----------+------------------ + Sun Dec 27 16:00:00 2009 PST | NYC | 210 | 7.07106781186548 + Sun Oct 28 17:00:00 2018 PDT | NYC | 190 | 10 +(2 rows) + +-- TEST5 -- +---------test with having clause ---------------------- +DROP MATERIALIZED VIEW mat_m1; +NOTICE: drop cascades to 2 other objects +create materialized view mat_m1( timec, minl, sumth, stddevh) +WITH (timescaledb.continuous, timescaledb.materialized_only=true, timescaledb.finalized=false) +as +select time_bucket('1week', timec) , +min(location), sum(temperature)+sum(humidity), stddev(humidity) +from conditions +group by time_bucket('1week', timec) +having stddev(humidity) is not null WITH NO DATA; +; +SELECT ca.raw_hypertable_id as "RAW_HYPERTABLE_ID", + h.schema_name AS "MAT_SCHEMA_NAME", + h.table_name AS "MAT_TABLE_NAME", + partial_view_name as "PART_VIEW_NAME", + partial_view_schema as "PART_VIEW_SCHEMA" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'mat_m1' +\gset +SET ROLE :ROLE_SUPERUSER; +insert into :"MAT_SCHEMA_NAME".:"MAT_TABLE_NAME" +select + time_bucket('1week', timec), _timescaledb_functions.partialize_agg( min(location)), _timescaledb_functions.partialize_agg( sum(temperature)) , _timescaledb_functions.partialize_agg( sum(humidity)), _timescaledb_functions.partialize_agg(stddev(humidity)) +,1 +from conditions +group by time_bucket('1week', timec) ; +SET ROLE :ROLE_DEFAULT_PERM_USER; +-- should have same results -- +select * from mat_m1 +order by sumth; + timec | minl | sumth | stddevh +------------------------------+------+-------+------------------ + Sun Oct 28 17:00:00 2018 PDT | NYC | 190 | 10 + Sun Dec 27 16:00:00 2009 PST | NYC | 620 | 23.8746727726266 +(2 rows) + +select time_bucket('1week', timec) , +min(location), sum(temperature)+sum(humidity), stddev(humidity) +from conditions +group by time_bucket('1week', timec) +having stddev(humidity) is not null +order by sum(temperature)+sum(humidity); + time_bucket | min | ?column? | stddev +------------------------------+-----+----------+------------------ + Sun Oct 28 17:00:00 2018 PDT | NYC | 190 | 10 + Sun Dec 27 16:00:00 2009 PST | NYC | 620 | 23.8746727726266 +(2 rows) + +-- TEST6 -- +--group by with more than 1 group column +-- having clause with a mix of columns from select list + others +drop table conditions cascade; +NOTICE: drop cascades to 2 other objects +NOTICE: drop cascades to 2 other objects +CREATE TABLE conditions ( + timec TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL, + lowp numeric NULL, + highp numeric null + ); +select table_name from create_hypertable( 'conditions', 'timec'); + table_name +------------ + conditions +(1 row) + +insert into conditions +select generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '1 day'), 'POR', 55, 75, 40, 70; +insert into conditions +select generate_series('2018-11-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '1 day'), 'NYC', 35, 45, 50, 40; +insert into conditions +select generate_series('2018-11-01 00:00'::timestamp, '2018-12-15 00:00'::timestamp, '1 day'), 'LA', 73, 55, 71, 28; +--naming with AS clauses +CREATE MATERIALIZED VIEW mat_naming +WITH (timescaledb.continuous, timescaledb.materialized_only=true, timescaledb.finalized=false) +as +select time_bucket('1week', timec) as bucket, location as loc, sum(temperature)+sum(humidity), stddev(humidity) +from conditions +group by bucket, loc +having min(location) >= 'NYC' and avg(temperature) > 20 WITH NO DATA; +SELECT ca.raw_hypertable_id as "RAW_HYPERTABLE_ID", + h.schema_name AS "MAT_SCHEMA_NAME", + h.table_name AS "MAT_TABLE_NAME", + partial_view_name as "PART_VIEW_NAME", + partial_view_schema as "PART_VIEW_SCHEMA" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'mat_naming' +\gset +select attnum , attname from pg_attribute +where attnum > 0 and attrelid = +(Select oid from pg_class where relname like :'MAT_TABLE_NAME') +order by attnum, attname; + attnum | attname +--------+---------- + 1 | bucket + 2 | loc + 3 | agg_3_3 + 4 | agg_3_4 + 5 | agg_4_5 + 6 | agg_0_6 + 7 | agg_0_7 + 8 | chunk_id +(8 rows) + +DROP MATERIALIZED VIEW mat_naming; +--naming with default names +CREATE MATERIALIZED VIEW mat_naming +WITH (timescaledb.continuous, timescaledb.materialized_only=true, timescaledb.finalized=false) +as +select time_bucket('1week', timec), location, sum(temperature)+sum(humidity), stddev(humidity) +from conditions +group by 1,2 +having min(location) >= 'NYC' and avg(temperature) > 20 WITH NO DATA; +SELECT ca.raw_hypertable_id as "RAW_HYPERTABLE_ID", + h.schema_name AS "MAT_SCHEMA_NAME", + h.table_name AS "MAT_TABLE_NAME", + partial_view_name as "PART_VIEW_NAME", + partial_view_schema as "PART_VIEW_SCHEMA" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'mat_naming' +\gset +select attnum , attname from pg_attribute +where attnum > 0 and attrelid = +(Select oid from pg_class where relname like :'MAT_TABLE_NAME') +order by attnum, attname; + attnum | attname +--------+------------- + 1 | time_bucket + 2 | location + 3 | agg_3_3 + 4 | agg_3_4 + 5 | agg_4_5 + 6 | agg_0_6 + 7 | agg_0_7 + 8 | chunk_id +(8 rows) + +DROP MATERIALIZED VIEW mat_naming; +--naming with view col names +CREATE MATERIALIZED VIEW mat_naming(bucket, loc, sum_t_h, stdd) +WITH (timescaledb.continuous, timescaledb.materialized_only=true, timescaledb.finalized=false) +as +select time_bucket('1week', timec), location, sum(temperature)+sum(humidity), stddev(humidity) +from conditions +group by 1,2 +having min(location) >= 'NYC' and avg(temperature) > 20 WITH NO DATA; +SELECT ca.raw_hypertable_id as "RAW_HYPERTABLE_ID", + h.schema_name AS "MAT_SCHEMA_NAME", + h.table_name AS "MAT_TABLE_NAME", + partial_view_name as "PART_VIEW_NAME", + partial_view_schema as "PART_VIEW_SCHEMA" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'mat_naming' +\gset +select attnum , attname from pg_attribute +where attnum > 0 and attrelid = +(Select oid from pg_class where relname like :'MAT_TABLE_NAME') +order by attnum, attname; + attnum | attname +--------+---------- + 1 | bucket + 2 | loc + 3 | agg_3_3 + 4 | agg_3_4 + 5 | agg_4_5 + 6 | agg_0_6 + 7 | agg_0_7 + 8 | chunk_id +(8 rows) + +DROP MATERIALIZED VIEW mat_naming; +CREATE MATERIALIZED VIEW mat_m1(timec, minl, sumth, stddevh) +WITH (timescaledb.continuous, timescaledb.materialized_only=true, timescaledb.finalized=false) +as +select time_bucket('1week', timec) , +min(location), sum(temperature)+sum(humidity), stddev(humidity) +from conditions +group by time_bucket('1week', timec) +having min(location) >= 'NYC' and avg(temperature) > 20 WITH NO DATA; +SELECT ca.raw_hypertable_id as "RAW_HYPERTABLE_ID", + h.schema_name AS "MAT_SCHEMA_NAME", + h.table_name AS "MAT_TABLE_NAME", + partial_view_name as "PART_VIEW_NAME", + partial_view_schema as "PART_VIEW_SCHEMA" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'mat_m1' +\gset +select attnum , attname from pg_attribute +where attnum > 0 and attrelid = +(Select oid from pg_class where relname like :'MAT_TABLE_NAME') +order by attnum, attname; + attnum | attname +--------+---------- + 1 | timec + 2 | agg_2_2 + 3 | agg_3_3 + 4 | agg_3_4 + 5 | agg_4_5 + 6 | agg_0_6 + 7 | chunk_id +(7 rows) + +SET ROLE :ROLE_SUPERUSER; +insert into :"MAT_SCHEMA_NAME".:"MAT_TABLE_NAME" +select + time_bucket('1week', timec), _timescaledb_functions.partialize_agg( min(location)), _timescaledb_functions.partialize_agg( sum(temperature)) , _timescaledb_functions.partialize_agg( sum(humidity)), _timescaledb_functions.partialize_agg(stddev(humidity)) +,_timescaledb_functions.partialize_agg( avg(temperature)) +,1 +from conditions +group by time_bucket('1week', timec) ; +SET ROLE :ROLE_DEFAULT_PERM_USER; +--should have same results -- +select timec, minl, sumth, stddevh +from mat_m1 +order by timec, minl; + timec | minl | sumth | stddevh +------------------------------+------+-------+------------------ + Sun Dec 16 16:00:00 2018 PST | NYC | 1470 | 15.5662356498831 + Sun Dec 23 16:00:00 2018 PST | NYC | 1470 | 15.5662356498831 + Sun Dec 30 16:00:00 2018 PST | NYC | 210 | 21.2132034355964 +(3 rows) + +select time_bucket('1week', timec) , +min(location), sum(temperature)+sum(humidity), stddev(humidity) +from conditions +group by time_bucket('1week', timec) +having min(location) >= 'NYC' and avg(temperature) > 20 and avg(lowp) > 10 +order by time_bucket('1week', timec), min(location); + time_bucket | min | ?column? | stddev +------------------------------+-----+----------+------------------ + Sun Dec 16 16:00:00 2018 PST | NYC | 1470 | 15.5662356498831 + Sun Dec 23 16:00:00 2018 PST | NYC | 1470 | 15.5662356498831 + Sun Dec 30 16:00:00 2018 PST | NYC | 210 | 21.2132034355964 +(3 rows) + +--check view defintion in information views +select view_name, view_definition from timescaledb_information.continuous_aggregates +where view_name::text like 'mat_m1'; + view_name | view_definition +-----------+------------------------------------------------------------------------------------------- + mat_m1 | SELECT time_bucket('@ 7 days'::interval, timec) AS timec, + + | min(location) AS minl, + + | (sum(temperature) + sum(humidity)) AS sumth, + + | stddev(humidity) AS stddevh + + | FROM conditions + + | GROUP BY (time_bucket('@ 7 days'::interval, timec)) + + | HAVING ((min(location) >= 'NYC'::text) AND (avg(temperature) > (20)::double precision)); +(1 row) + +--TEST6 -- select from internal view +SET ROLE :ROLE_SUPERUSER; +insert into :"MAT_SCHEMA_NAME".:"MAT_TABLE_NAME" +select * from :"PART_VIEW_SCHEMA".:"PART_VIEW_NAME"; +SET ROLE :ROLE_DEFAULT_PERM_USER; +--lets drop the view and check +DROP MATERIALIZED VIEW mat_m1; +NOTICE: drop cascades to 2 other objects +drop table conditions; +CREATE TABLE conditions ( + timec TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL, + lowp double precision NULL, + highp double precision null, + allnull double precision null + ); +select table_name from create_hypertable( 'conditions', 'timec'); + table_name +------------ + conditions +(1 row) + +insert into conditions +select generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '1 day'), 'POR', 55, 75, 40, 70, NULL; +insert into conditions +select generate_series('2018-11-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '1 day'), 'NYC', 35, 45, 50, 40, NULL; +insert into conditions +select generate_series('2018-11-01 00:00'::timestamp, '2018-12-15 00:00'::timestamp, '1 day'), 'LA', 73, 55, NULL, 28, NULL; +SELECT + $$ + select time_bucket('1week', timec) , + min(location) as col1, sum(temperature)+sum(humidity) as col2, stddev(humidity) as col3, min(allnull) as col4 + from conditions + group by time_bucket('1week', timec) + having min(location) >= 'NYC' and avg(temperature) > 20 + $$ AS "QUERY" +\gset +\set ECHO errors +psql:include/cont_agg_equal_deprecated.sql:8: NOTICE: materialized view "mat_test" does not exist, skipping + ?column? | count +---------------------------------------------------------------+------- + Number of rows different between view and original (expect 0) | 0 +(1 row) + +SELECT + $$ + select time_bucket('1week', timec), location, + sum(temperature)+sum(humidity) as col2, stddev(humidity) as col3, min(allnull) as col4 + from conditions + group by location, time_bucket('1week', timec) + $$ AS "QUERY" +\gset +\set ECHO errors +psql:include/cont_agg_equal_deprecated.sql:8: NOTICE: drop cascades to 2 other objects + ?column? | count +---------------------------------------------------------------+------- + Number of rows different between view and original (expect 0) | 0 +(1 row) + +--TEST7 -- drop tests for view and hypertable +--DROP tests +\set ON_ERROR_STOP 0 +SELECT h.schema_name AS "MAT_SCHEMA_NAME", + h.table_name AS "MAT_TABLE_NAME", + partial_view_name as "PART_VIEW_NAME", + partial_view_schema as "PART_VIEW_SCHEMA", + direct_view_name as "DIR_VIEW_NAME", + direct_view_schema as "DIR_VIEW_SCHEMA" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'mat_test' +\gset +DROP TABLE :"MAT_SCHEMA_NAME".:"MAT_TABLE_NAME"; +ERROR: cannot drop table _timescaledb_internal._materialized_hypertable_16 because other objects depend on it +DROP VIEW :"PART_VIEW_SCHEMA".:"PART_VIEW_NAME"; +ERROR: cannot drop the partial/direct view because it is required by a continuous aggregate +DROP VIEW :"DIR_VIEW_SCHEMA".:"DIR_VIEW_NAME"; +ERROR: cannot drop the partial/direct view because it is required by a continuous aggregate +\set ON_ERROR_STOP 1 +--catalog entry still there; +SELECT count(*) +FROM _timescaledb_catalog.continuous_agg ca +WHERE user_view_name = 'mat_test'; + count +------- + 1 +(1 row) + +--mat table, user_view, direct view and partial view all there +select count(*) from pg_class where relname = :'PART_VIEW_NAME'; + count +------- + 1 +(1 row) + +select count(*) from pg_class where relname = :'MAT_TABLE_NAME'; + count +------- + 1 +(1 row) + +select count(*) from pg_class where relname = :'DIR_VIEW_NAME'; + count +------- + 1 +(1 row) + +select count(*) from pg_class where relname = 'mat_test'; + count +------- + 1 +(1 row) + +DROP MATERIALIZED VIEW mat_test; +NOTICE: drop cascades to 2 other objects +--catalog entry should be gone +SELECT count(*) +FROM _timescaledb_catalog.continuous_agg ca +WHERE user_view_name = 'mat_test'; + count +------- + 0 +(1 row) + +--mat table, user_view, direct view and partial view all gone +select count(*) from pg_class where relname = :'PART_VIEW_NAME'; + count +------- + 0 +(1 row) + +select count(*) from pg_class where relname = :'MAT_TABLE_NAME'; + count +------- + 0 +(1 row) + +select count(*) from pg_class where relname = :'DIR_VIEW_NAME'; + count +------- + 0 +(1 row) + +select count(*) from pg_class where relname = 'mat_test'; + count +------- + 0 +(1 row) + +--test dropping raw table +DROP TABLE conditions; +CREATE TABLE conditions ( + timec TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL, + lowp double precision NULL, + highp double precision null, + allnull double precision null + ); +select table_name from create_hypertable( 'conditions', 'timec'); + table_name +------------ + conditions +(1 row) + +--no data in hyper table on purpose so that CASCADE is not required because of chunks +CREATE MATERIALIZED VIEW mat_drop_test(timec, minl, sumt , sumh) +WITH (timescaledb.continuous, timescaledb.materialized_only=true, timescaledb.finalized=false) +as +select time_bucket('1day', timec), min(location), sum(temperature),sum(humidity) +from conditions +group by time_bucket('1day', timec) WITH NO DATA; +\set ON_ERROR_STOP 0 +DROP TABLE conditions; +ERROR: cannot drop table conditions because other objects depend on it +\set ON_ERROR_STOP 1 +--insert data now +insert into conditions +select generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '1 day'), 'POR', 55, 75, 40, 70, NULL; +insert into conditions +select generate_series('2018-11-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '1 day'), 'NYC', 35, 45, 50, 40, NULL; +insert into conditions +select generate_series('2018-11-01 00:00'::timestamp, '2018-12-15 00:00'::timestamp, '1 day'), 'LA', 73, 55, NULL, 28, NULL; +SELECT ca.raw_hypertable_id as "RAW_HYPERTABLE_ID", + h.schema_name AS "MAT_SCHEMA_NAME", + h.table_name AS "MAT_TABLE_NAME", + partial_view_name as "PART_VIEW_NAME", + partial_view_schema as "PART_VIEW_SCHEMA" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'mat_drop_test' +\gset +SET client_min_messages TO NOTICE; +CALL refresh_continuous_aggregate('mat_drop_test', NULL, NULL); +--force invalidation +insert into conditions +select generate_series('2017-11-01 00:00'::timestamp, '2017-12-15 00:00'::timestamp, '1 day'), 'LA', 73, 55, NULL, 28, NULL; +select count(*) from _timescaledb_catalog.continuous_aggs_invalidation_threshold; + count +------- + 1 +(1 row) + +select count(*) from _timescaledb_catalog.continuous_aggs_hypertable_invalidation_log; + count +------- + 1 +(1 row) + +DROP TABLE conditions CASCADE; +NOTICE: drop cascades to 2 other objects +NOTICE: drop cascades to 2 other objects +--catalog entry should be gone +SELECT count(*) +FROM _timescaledb_catalog.continuous_agg ca +WHERE user_view_name = 'mat_drop_test'; + count +------- + 0 +(1 row) + +select count(*) from _timescaledb_catalog.continuous_aggs_invalidation_threshold; + count +------- + 0 +(1 row) + +select count(*) from _timescaledb_catalog.continuous_aggs_hypertable_invalidation_log; + count +------- + 0 +(1 row) + +select count(*) from _timescaledb_catalog.continuous_aggs_materialization_invalidation_log; + count +------- + 0 +(1 row) + +SELECT * FROM _timescaledb_config.bgw_job; + id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone +----+------------------+-------------------+-------------+-------------+--------------+-------------+-----------+-------+-----------+----------------+---------------+---------------+--------+--------------+------------+---------- +(0 rows) + +--mat table, user_view, and partial view all gone +select count(*) from pg_class where relname = :'PART_VIEW_NAME'; + count +------- + 0 +(1 row) + +select count(*) from pg_class where relname = :'MAT_TABLE_NAME'; + count +------- + 0 +(1 row) + +select count(*) from pg_class where relname = 'mat_drop_test'; + count +------- + 0 +(1 row) + +--TEST With options +CREATE TABLE conditions ( + timec TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL, + lowp double precision NULL, + highp double precision null, + allnull double precision null + ); +select table_name from create_hypertable( 'conditions', 'timec'); + table_name +------------ + conditions +(1 row) + +CREATE MATERIALIZED VIEW mat_with_test(timec, minl, sumt , sumh) +WITH (timescaledb.continuous, + timescaledb.materialized_only=true, + timescaledb.finalized=false) +as +select time_bucket('1day', timec), min(location), sum(temperature),sum(humidity) +from conditions +group by time_bucket('1day', timec), location, humidity, temperature WITH NO DATA; +SELECT add_continuous_aggregate_policy('mat_with_test', NULL, '5 h'::interval, '12 h'::interval); + add_continuous_aggregate_policy +--------------------------------- + 1001 +(1 row) + +SELECT alter_job(id, schedule_interval => '1h') FROM _timescaledb_config.bgw_job; + alter_job +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (1001,"@ 1 hour","@ 0",-1,"@ 12 hours",t,"{""end_offset"": ""@ 5 hours"", ""start_offset"": null, ""mat_hypertable_id"": 20}",-infinity,_timescaledb_functions.policy_refresh_continuous_aggregate_check,f,,) +(1 row) + +SELECT schedule_interval FROM _timescaledb_config.bgw_job; + schedule_interval +------------------- + @ 1 hour +(1 row) + +SELECT alter_job(id, schedule_interval => '2h') FROM _timescaledb_config.bgw_job; + alter_job +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (1001,"@ 2 hours","@ 0",-1,"@ 12 hours",t,"{""end_offset"": ""@ 5 hours"", ""start_offset"": null, ""mat_hypertable_id"": 20}",-infinity,_timescaledb_functions.policy_refresh_continuous_aggregate_check,f,,) +(1 row) + +SELECT schedule_interval FROM _timescaledb_config.bgw_job; + schedule_interval +------------------- + @ 2 hours +(1 row) + +select indexname, indexdef from pg_indexes where tablename = +(SELECT h.table_name +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'mat_with_test') +order by indexname; + indexname | indexdef +-----------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------- + _materialized_hypertable_20_grp_5_5_timec_idx | CREATE INDEX _materialized_hypertable_20_grp_5_5_timec_idx ON _timescaledb_internal._materialized_hypertable_20 USING btree (grp_5_5, timec DESC) + _materialized_hypertable_20_grp_6_6_timec_idx | CREATE INDEX _materialized_hypertable_20_grp_6_6_timec_idx ON _timescaledb_internal._materialized_hypertable_20 USING btree (grp_6_6, timec DESC) + _materialized_hypertable_20_grp_7_7_timec_idx | CREATE INDEX _materialized_hypertable_20_grp_7_7_timec_idx ON _timescaledb_internal._materialized_hypertable_20 USING btree (grp_7_7, timec DESC) + _materialized_hypertable_20_timec_idx | CREATE INDEX _materialized_hypertable_20_timec_idx ON _timescaledb_internal._materialized_hypertable_20 USING btree (timec DESC) +(4 rows) + +DROP MATERIALIZED VIEW mat_with_test; +--no additional indexes +CREATE MATERIALIZED VIEW mat_with_test(timec, minl, sumt , sumh) +WITH (timescaledb.continuous, + timescaledb.materialized_only=true, + timescaledb.create_group_indexes=false, + timescaledb.finalized=false) +as +select time_bucket('1day', timec), min(location), sum(temperature),sum(humidity) +from conditions +group by time_bucket('1day', timec), location, humidity, temperature WITH NO DATA; +select indexname, indexdef from pg_indexes where tablename = +(SELECT h.table_name +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'mat_with_test'); + indexname | indexdef +---------------------------------------+---------------------------------------------------------------------------------------------------------------------------------- + _materialized_hypertable_21_timec_idx | CREATE INDEX _materialized_hypertable_21_timec_idx ON _timescaledb_internal._materialized_hypertable_21 USING btree (timec DESC) +(1 row) + +DROP TABLE conditions CASCADE; +NOTICE: drop cascades to 2 other objects +--test WITH using a hypertable with an integer time dimension +CREATE TABLE conditions ( + timec INT NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL, + lowp double precision NULL, + highp double precision null, + allnull double precision null + ); +select table_name from create_hypertable( 'conditions', 'timec', chunk_time_interval=> 100); + table_name +------------ + conditions +(1 row) + +CREATE OR REPLACE FUNCTION integer_now_conditions() returns int LANGUAGE SQL STABLE as $$ SELECT coalesce(max(timec), 0) FROM conditions $$; +SELECT set_integer_now_func('conditions', 'integer_now_conditions'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE MATERIALIZED VIEW mat_with_test(timec, minl, sumt , sumh) +WITH (timescaledb.continuous, + timescaledb.materialized_only=true, + timescaledb.finalized=false) +as +select time_bucket(100, timec), min(location), sum(temperature),sum(humidity) +from conditions +group by time_bucket(100, timec) WITH NO DATA; +SELECT add_continuous_aggregate_policy('mat_with_test', NULL, 500::integer, '12 h'::interval); + add_continuous_aggregate_policy +--------------------------------- + 1002 +(1 row) + +SELECT alter_job(id, schedule_interval => '2h') FROM _timescaledb_config.bgw_job; + alter_job +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + (1002,"@ 2 hours","@ 0",-1,"@ 12 hours",t,"{""end_offset"": 500, ""start_offset"": null, ""mat_hypertable_id"": 23}",-infinity,_timescaledb_functions.policy_refresh_continuous_aggregate_check,f,,) +(1 row) + +SELECT schedule_interval FROM _timescaledb_config.bgw_job; + schedule_interval +------------------- + @ 2 hours +(1 row) + +DROP TABLE conditions CASCADE; +NOTICE: drop cascades to 2 other objects +--test space partitions +CREATE TABLE space_table ( + time BIGINT, + dev BIGINT, + data BIGINT +); +SELECT create_hypertable( + 'space_table', + 'time', + chunk_time_interval => 10, + partitioning_column => 'dev', + number_partitions => 3); +NOTICE: adding not-null constraint to column "time" + create_hypertable +--------------------------- + (24,public,space_table,t) +(1 row) + +CREATE OR REPLACE FUNCTION integer_now_space_table() returns BIGINT LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), BIGINT '0') FROM space_table $$; +SELECT set_integer_now_func('space_table', 'integer_now_space_table'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE MATERIALIZED VIEW space_view +WITH (timescaledb.continuous, + timescaledb.materialized_only=true, + timescaledb.finalized=false) +AS SELECT time_bucket('4', time), COUNT(data) + FROM space_table + GROUP BY 1 WITH NO DATA; +INSERT INTO space_table VALUES + (0, 1, 1), (0, 2, 1), (1, 1, 1), (1, 2, 1), + (10, 1, 1), (10, 2, 1), (11, 1, 1), (11, 2, 1); +SELECT h.schema_name AS "MAT_SCHEMA_NAME", + h.table_name AS "MAT_TABLE_NAME", + partial_view_name as "PART_VIEW_NAME", + partial_view_schema as "PART_VIEW_SCHEMA", + direct_view_name as "DIR_VIEW_NAME", + direct_view_schema as "DIR_VIEW_SCHEMA" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'space_view' +\gset +SELECT * FROM :"MAT_SCHEMA_NAME".:"MAT_TABLE_NAME" + ORDER BY time_bucket, chunk_id; + time_bucket | agg_2_2 | chunk_id +-------------+---------+---------- +(0 rows) + +CALL refresh_continuous_aggregate('space_view', NULL, NULL); +SELECT * FROM space_view ORDER BY 1; + time_bucket | count +-------------+------- + 0 | 4 + 8 | 4 +(2 rows) + +SELECT * FROM :"MAT_SCHEMA_NAME".:"MAT_TABLE_NAME" + ORDER BY time_bucket, chunk_id; + time_bucket | agg_2_2 | chunk_id +-------------+--------------------+---------- + 0 | \x0000000000000002 | 58 + 0 | \x0000000000000002 | 59 + 8 | \x0000000000000002 | 60 + 8 | \x0000000000000002 | 61 +(4 rows) + +INSERT INTO space_table VALUES (3, 2, 1); +CALL refresh_continuous_aggregate('space_view', NULL, NULL); +SELECT * FROM space_view ORDER BY 1; + time_bucket | count +-------------+------- + 0 | 5 + 8 | 4 +(2 rows) + +SELECT * FROM :"MAT_SCHEMA_NAME".:"MAT_TABLE_NAME" + ORDER BY time_bucket, chunk_id; + time_bucket | agg_2_2 | chunk_id +-------------+--------------------+---------- + 0 | \x0000000000000002 | 58 + 0 | \x0000000000000003 | 59 + 8 | \x0000000000000002 | 60 + 8 | \x0000000000000002 | 61 +(4 rows) + +INSERT INTO space_table VALUES (2, 3, 1); +CALL refresh_continuous_aggregate('space_view', NULL, NULL); +SELECT * FROM space_view ORDER BY 1; + time_bucket | count +-------------+------- + 0 | 6 + 8 | 4 +(2 rows) + +SELECT * FROM :"MAT_SCHEMA_NAME".:"MAT_TABLE_NAME" + ORDER BY time_bucket, chunk_id; + time_bucket | agg_2_2 | chunk_id +-------------+--------------------+---------- + 0 | \x0000000000000002 | 58 + 0 | \x0000000000000003 | 59 + 0 | \x0000000000000001 | 63 + 8 | \x0000000000000002 | 60 + 8 | \x0000000000000002 | 61 +(5 rows) + +DROP TABLE space_table CASCADE; +NOTICE: drop cascades to 2 other objects +NOTICE: drop cascades to table _timescaledb_internal._hyper_25_62_chunk +-- +-- TEST FINALIZEFUNC_EXTRA +-- +-- create special aggregate to test ffunc_extra +-- Raise warning with the actual type being passed in +CREATE OR REPLACE FUNCTION fake_ffunc(a int8, b int, c int, d int, x anyelement) +RETURNS anyelement AS $$ +BEGIN + RAISE WARNING 'type % %', pg_typeof(d), pg_typeof(x); + RETURN x; +END; +$$ +LANGUAGE plpgsql; +CREATE OR REPLACE FUNCTION fake_sfunc(a int8, b int, c int, d int, x anyelement) +RETURNS int8 AS $$ +BEGIN + RETURN b; +END; $$ +LANGUAGE plpgsql; +CREATE AGGREGATE aggregate_to_test_ffunc_extra(int, int, int, anyelement) ( + SFUNC = fake_sfunc, + STYPE = int8, + COMBINEFUNC = int8pl, + FINALFUNC = fake_ffunc, + PARALLEL = SAFE, + FINALFUNC_EXTRA +); +CREATE TABLE conditions ( + timec INT NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL, + lowp double precision NULL, + highp double precision null, + allnull double precision null + ); +select table_name from create_hypertable( 'conditions', 'timec', chunk_time_interval=> 100); + table_name +------------ + conditions +(1 row) + +CREATE OR REPLACE FUNCTION integer_now_conditions() returns int LANGUAGE SQL STABLE as $$ SELECT coalesce(max(timec), 0) FROM conditions $$; +SELECT set_integer_now_func('conditions', 'integer_now_conditions'); + set_integer_now_func +---------------------- + +(1 row) + +insert into conditions +select generate_series(0, 200, 10), 'POR', 55, 75, 40, 70, NULL; +CREATE MATERIALIZED VIEW mat_ffunc_test +WITH (timescaledb.continuous, timescaledb.materialized_only=true, timescaledb.finalized=false) +as +select time_bucket(100, timec), aggregate_to_test_ffunc_extra(timec, 1, 3, 'test'::text) +from conditions +group by time_bucket(100, timec); +NOTICE: refreshing continuous aggregate "mat_ffunc_test" +SELECT * FROM mat_ffunc_test; +WARNING: type integer text +WARNING: type integer text +WARNING: type integer text + time_bucket | aggregate_to_test_ffunc_extra +-------------+------------------------------- + 200 | + 0 | + 100 | +(3 rows) + +DROP MATERIALIZED view mat_ffunc_test; +NOTICE: drop cascades to table _timescaledb_internal._hyper_27_67_chunk +CREATE MATERIALIZED VIEW mat_ffunc_test +WITH (timescaledb.continuous, timescaledb.materialized_only=true, timescaledb.finalized=false) +as +select time_bucket(100, timec), aggregate_to_test_ffunc_extra(timec, 4, 5, bigint '123') +from conditions +group by time_bucket(100, timec); +NOTICE: refreshing continuous aggregate "mat_ffunc_test" +SELECT * FROM mat_ffunc_test; +WARNING: type integer bigint +WARNING: type integer bigint +WARNING: type integer bigint + time_bucket | aggregate_to_test_ffunc_extra +-------------+------------------------------- + 200 | + 0 | + 100 | +(3 rows) + +--refresh mat view test when time_bucket is not projected -- +DROP MATERIALIZED VIEW mat_ffunc_test; +NOTICE: drop cascades to table _timescaledb_internal._hyper_28_68_chunk +CREATE MATERIALIZED VIEW mat_refresh_test +WITH (timescaledb.continuous, timescaledb.materialized_only=true, timescaledb.finalized=false) +as +select location, max(humidity) +from conditions +group by time_bucket(100, timec), location WITH NO DATA; +insert into conditions +select generate_series(0, 50, 10), 'NYC', 55, 75, 40, 70, NULL; +CALL refresh_continuous_aggregate('mat_refresh_test', NULL, NULL); +SELECT * FROM mat_refresh_test order by 1,2 ; + location | max +----------+----- + NYC | 75 + POR | 75 + POR | 75 + POR | 75 +(4 rows) + +-- test for bug when group by is not in project list +CREATE MATERIALIZED VIEW conditions_grpby_view with (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) as +select time_bucket(100, timec), sum(humidity) +from conditions +group by time_bucket(100, timec), location; +NOTICE: refreshing continuous aggregate "conditions_grpby_view" +select * from conditions_grpby_view order by 1, 2; + time_bucket | sum +-------------+----- + 0 | 450 + 0 | 750 + 100 | 750 + 200 | 75 +(4 rows) + +CREATE MATERIALIZED VIEW conditions_grpby_view2 with (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) as +select time_bucket(100, timec), sum(humidity) +from conditions +group by time_bucket(100, timec), location +having avg(temperature) > 0; +NOTICE: refreshing continuous aggregate "conditions_grpby_view2" +select * from conditions_grpby_view2 order by 1, 2; + time_bucket | sum +-------------+----- + 0 | 450 + 0 | 750 + 100 | 750 + 200 | 75 +(4 rows) + +-- Test internal functions for continuous aggregates +SELECT test.continuous_aggs_find_view('mat_refresh_test'); + continuous_aggs_find_view +--------------------------- + +(1 row) + +-- Test pseudotype/enum handling +CREATE TYPE status_enum AS ENUM ( + 'red', + 'yellow', + 'green' +); +CREATE TABLE cagg_types ( + time TIMESTAMPTZ NOT NULL, + status status_enum, + names NAME[], + floats FLOAT[] +); +SELECT + table_name +FROM + create_hypertable('cagg_types', 'time'); + table_name +------------ + cagg_types +(1 row) + +INSERT INTO cagg_types +SELECT + '2000-01-01', + 'yellow', + '{foo,bar,baz}', + '{1,2.5,3}'; +CREATE MATERIALIZED VIEW mat_types WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) AS +SELECT + time_bucket('1d', time), + min(status) AS status, + max(names) AS names, + min(floats) AS floats +FROM + cagg_types +GROUP BY + 1; +NOTICE: refreshing continuous aggregate "mat_types" +CALL refresh_continuous_aggregate('mat_types',NULL,NULL); +NOTICE: continuous aggregate "mat_types" is already up-to-date +SELECT * FROM mat_types; + time_bucket | status | names | floats +------------------------------+--------+---------------+----------- + Fri Dec 31 16:00:00 1999 PST | yellow | {foo,bar,baz} | {1,2.5,3} +(1 row) + +------------------------------------------------------------------------------------- +-- Test issue #2616 where cagg view contains an experssion with several aggregates in +CREATE TABLE water_consumption +( + sensor_id integer NOT NULL, + timestamp timestamp(0) NOT NULL, + water_index integer +); +SELECT create_hypertable('water_consumption', 'timestamp', 'sensor_id', 2); +WARNING: column type "timestamp without time zone" used for "timestamp" does not follow best practices + create_hypertable +--------------------------------- + (34,public,water_consumption,t) +(1 row) + +INSERT INTO public.water_consumption (sensor_id, timestamp, water_index) VALUES + (1, '2010-11-03 09:42:30', 1030), + (1, '2010-11-03 09:42:40', 1032), + (1, '2010-11-03 09:42:50', 1035), + (1, '2010-11-03 09:43:30', 1040), + (1, '2010-11-03 09:43:40', 1045), + (1, '2010-11-03 09:43:50', 1050), + (1, '2010-11-03 09:44:30', 1052), + (1, '2010-11-03 09:44:40', 1057), + (1, '2010-11-03 09:44:50', 1060), + (1, '2010-11-03 09:45:30', 1063), + (1, '2010-11-03 09:45:40', 1067), + (1, '2010-11-03 09:45:50', 1070); +-- The test with the view originally reported in the issue. +CREATE MATERIALIZED VIEW water_consumption_aggregation_minute + WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE, timescaledb.finalized=false) +AS +SELECT sensor_id, + time_bucket(INTERVAL '1 minute', timestamp) + '1 minute' AS timestamp, + (max(water_index) - min(water_index)) AS water_consumption +FROM water_consumption +GROUP BY sensor_id, time_bucket(INTERVAL '1 minute', timestamp) +WITH NO DATA; +CALL refresh_continuous_aggregate('water_consumption_aggregation_minute', NULL, NULL); +-- The results of the view and the query over hypertable should be the same +SELECT * FROM water_consumption_aggregation_minute ORDER BY water_consumption; + sensor_id | timestamp | water_consumption +-----------+--------------------------+------------------- + 1 | Wed Nov 03 09:43:00 2010 | 5 + 1 | Wed Nov 03 09:46:00 2010 | 7 + 1 | Wed Nov 03 09:45:00 2010 | 8 + 1 | Wed Nov 03 09:44:00 2010 | 10 +(4 rows) + +SELECT sensor_id, + time_bucket(INTERVAL '1 minute', timestamp) + '1 minute' AS timestamp, + (max(water_index) - min(water_index)) AS water_consumption +FROM water_consumption +GROUP BY sensor_id, time_bucket(INTERVAL '1 minute', timestamp) +ORDER BY water_consumption; + sensor_id | timestamp | water_consumption +-----------+--------------------------+------------------- + 1 | Wed Nov 03 09:43:00 2010 | 5 + 1 | Wed Nov 03 09:46:00 2010 | 7 + 1 | Wed Nov 03 09:45:00 2010 | 8 + 1 | Wed Nov 03 09:44:00 2010 | 10 +(4 rows) + +-- Simplified test, where the view doesn't contain all group by clauses +CREATE MATERIALIZED VIEW water_consumption_no_select_bucket + WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE, timescaledb.finalized=false) +AS +SELECT sensor_id, + (max(water_index) - min(water_index)) AS water_consumption +FROM water_consumption +GROUP BY sensor_id, time_bucket(INTERVAL '1 minute', timestamp) +WITH NO DATA; +CALL refresh_continuous_aggregate('water_consumption_no_select_bucket', NULL, NULL); +-- The results of the view and the query over hypertable should be the same +SELECT * FROM water_consumption_no_select_bucket ORDER BY water_consumption; + sensor_id | water_consumption +-----------+------------------- + 1 | 5 + 1 | 7 + 1 | 8 + 1 | 10 +(4 rows) + +SELECT sensor_id, + (max(water_index) - min(water_index)) AS water_consumption +FROM water_consumption +GROUP BY sensor_id, time_bucket(INTERVAL '1 minute', timestamp) +ORDER BY water_consumption; + sensor_id | water_consumption +-----------+------------------- + 1 | 5 + 1 | 7 + 1 | 8 + 1 | 10 +(4 rows) + +-- The test with SELECT matching GROUP BY and placing aggregate expression not the last +CREATE MATERIALIZED VIEW water_consumption_aggregation_no_addition + WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE, timescaledb.finalized=false) +AS +SELECT sensor_id, + (max(water_index) - min(water_index)) AS water_consumption, + time_bucket(INTERVAL '1 minute', timestamp) AS timestamp +FROM water_consumption +GROUP BY sensor_id, time_bucket(INTERVAL '1 minute', timestamp) +WITH NO DATA; +CALL refresh_continuous_aggregate('water_consumption_aggregation_no_addition', NULL, NULL); +-- The results of the view and the query over hypertable should be the same +SELECT * FROM water_consumption_aggregation_no_addition ORDER BY water_consumption; + sensor_id | water_consumption | timestamp +-----------+-------------------+-------------------------- + 1 | 5 | Wed Nov 03 09:42:00 2010 + 1 | 7 | Wed Nov 03 09:45:00 2010 + 1 | 8 | Wed Nov 03 09:44:00 2010 + 1 | 10 | Wed Nov 03 09:43:00 2010 +(4 rows) + +SELECT sensor_id, + (max(water_index) - min(water_index)) AS water_consumption, + time_bucket(INTERVAL '1 minute', timestamp) AS timestamp +FROM water_consumption +GROUP BY sensor_id, time_bucket(INTERVAL '1 minute', timestamp) +ORDER BY water_consumption; + sensor_id | water_consumption | timestamp +-----------+-------------------+-------------------------- + 1 | 5 | Wed Nov 03 09:42:00 2010 + 1 | 7 | Wed Nov 03 09:45:00 2010 + 1 | 8 | Wed Nov 03 09:44:00 2010 + 1 | 10 | Wed Nov 03 09:43:00 2010 +(4 rows) + +DROP TABLE water_consumption CASCADE; +NOTICE: drop cascades to 6 other objects +NOTICE: drop cascades to table _timescaledb_internal._hyper_35_75_chunk +NOTICE: drop cascades to table _timescaledb_internal._hyper_36_76_chunk +NOTICE: drop cascades to table _timescaledb_internal._hyper_37_77_chunk +---- +--- github issue 2655 --- +create table raw_data(time timestamptz, search_query text, cnt integer, cnt2 integer); +select create_hypertable('raw_data','time', chunk_time_interval=>'15 days'::interval); +NOTICE: adding not-null constraint to column "time" + create_hypertable +------------------------ + (38,public,raw_data,t) +(1 row) + +insert into raw_data select '2000-01-01','Q1'; +--having has exprs that appear in select +CREATE MATERIALIZED VIEW search_query_count_1m WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) +AS + SELECT search_query,count(search_query) as count, + time_bucket(INTERVAL '1 minute', time) AS bucket + FROM raw_data + WHERE search_query is not null AND LENGTH(TRIM(both from search_query))>0 + GROUP BY search_query, bucket HAVING count(search_query) > 3 OR sum(cnt) > 1; +NOTICE: refreshing continuous aggregate "search_query_count_1m" +--having has aggregates + grp by columns that appear in select +CREATE MATERIALIZED VIEW search_query_count_2 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) +AS + SELECT search_query,count(search_query) as count, sum(cnt), + time_bucket(INTERVAL '1 minute', time) AS bucket + FROM raw_data + WHERE search_query is not null AND LENGTH(TRIM(both from search_query))>0 + GROUP BY search_query, bucket +HAVING count(search_query) > 3 OR sum(cnt) > 1 OR + ( sum(cnt) + count(cnt)) > 1 + AND search_query = 'Q1'; +NOTICE: refreshing continuous aggregate "search_query_count_2" +CREATE MATERIALIZED VIEW search_query_count_3 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) +AS + SELECT search_query,count(search_query) as count, sum(cnt), + time_bucket(INTERVAL '1 minute', time) AS bucket + FROM raw_data + WHERE search_query is not null AND LENGTH(TRIM(both from search_query))>0 + GROUP BY cnt +cnt2 , bucket, search_query + HAVING cnt + cnt2 + sum(cnt) > 2 or count(cnt2) > 10; +NOTICE: refreshing continuous aggregate "search_query_count_3" +insert into raw_data select '2000-01-01 00:00+0','Q1', 1, 100; +insert into raw_data select '2000-01-01 00:00+0','Q1', 2, 200; +insert into raw_data select '2000-01-01 00:00+0','Q1', 3, 300; +insert into raw_data select '2000-01-02 00:00+0','Q2', 10, 10; +insert into raw_data select '2000-01-02 00:00+0','Q2', 20, 20; +CALL refresh_continuous_aggregate('search_query_count_1m', NULL, NULL); +SELECT * FROM search_query_count_1m ORDER BY 1, 2; + search_query | count | bucket +--------------+-------+------------------------------ + Q1 | 3 | Fri Dec 31 16:00:00 1999 PST + Q2 | 2 | Sat Jan 01 16:00:00 2000 PST +(2 rows) + +--only 1 of these should appear in the result +insert into raw_data select '2000-01-02 00:00+0','Q3', 0, 0; +insert into raw_data select '2000-01-03 00:00+0','Q4', 20, 20; +CALL refresh_continuous_aggregate('search_query_count_1m', NULL, NULL); +SELECT * FROM search_query_count_1m ORDER BY 1, 2; + search_query | count | bucket +--------------+-------+------------------------------ + Q1 | 3 | Fri Dec 31 16:00:00 1999 PST + Q2 | 2 | Sat Jan 01 16:00:00 2000 PST + Q4 | 1 | Sun Jan 02 16:00:00 2000 PST +(3 rows) + +--refresh search_query_count_2--- +CALL refresh_continuous_aggregate('search_query_count_2', NULL, NULL); +SELECT * FROM search_query_count_2 ORDER BY 1, 2; + search_query | count | sum | bucket +--------------+-------+-----+------------------------------ + Q1 | 3 | 6 | Fri Dec 31 16:00:00 1999 PST + Q2 | 2 | 30 | Sat Jan 01 16:00:00 2000 PST + Q4 | 1 | 20 | Sun Jan 02 16:00:00 2000 PST +(3 rows) + +--refresh search_query_count_3--- +CALL refresh_continuous_aggregate('search_query_count_3', NULL, NULL); +SELECT * FROM search_query_count_3 ORDER BY 1, 2, 3; + search_query | count | sum | bucket +--------------+-------+-----+------------------------------ + Q1 | 1 | 1 | Fri Dec 31 16:00:00 1999 PST + Q1 | 1 | 2 | Fri Dec 31 16:00:00 1999 PST + Q1 | 1 | 3 | Fri Dec 31 16:00:00 1999 PST + Q2 | 1 | 10 | Sat Jan 01 16:00:00 2000 PST + Q2 | 1 | 20 | Sat Jan 01 16:00:00 2000 PST + Q4 | 1 | 20 | Sun Jan 02 16:00:00 2000 PST +(6 rows) + +--- TEST enable compression on continuous aggregates +CREATE VIEW cagg_compression_status as +SELECT ca.mat_hypertable_id AS mat_htid, + ca.user_view_name AS cagg_name , + h.schema_name AS mat_schema_name, + h.table_name AS mat_table_name, + ca.materialized_only +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) +; +SELECT mat_htid AS "MAT_HTID" + , mat_schema_name || '.' || mat_table_name AS "MAT_HTNAME" + , mat_table_name AS "MAT_TABLE_NAME" +FROM cagg_compression_status +WHERE cagg_name = 'search_query_count_3' \gset +ALTER MATERIALIZED VIEW search_query_count_3 SET (timescaledb.compress = 'true'); +NOTICE: defaulting compress_segmentby to grp_5_5,search_query +NOTICE: defaulting compress_orderby to bucket +SELECT cagg_name, mat_table_name +FROM cagg_compression_status where cagg_name = 'search_query_count_3'; + cagg_name | mat_table_name +----------------------+----------------------------- + search_query_count_3 | _materialized_hypertable_41 +(1 row) + +\x +SELECT * FROM timescaledb_information.compression_settings +WHERE hypertable_name = :'MAT_TABLE_NAME'; +-[ RECORD 1 ]----------+---------------------------- +hypertable_schema | _timescaledb_internal +hypertable_name | _materialized_hypertable_41 +attname | grp_5_5 +segmentby_column_index | 1 +orderby_column_index | +orderby_asc | +orderby_nullsfirst | +-[ RECORD 2 ]----------+---------------------------- +hypertable_schema | _timescaledb_internal +hypertable_name | _materialized_hypertable_41 +attname | search_query +segmentby_column_index | 2 +orderby_column_index | +orderby_asc | +orderby_nullsfirst | +-[ RECORD 3 ]----------+---------------------------- +hypertable_schema | _timescaledb_internal +hypertable_name | _materialized_hypertable_41 +attname | bucket +segmentby_column_index | +orderby_column_index | 1 +orderby_asc | t +orderby_nullsfirst | f + +\x +SELECT compress_chunk(ch) +FROM show_chunks('search_query_count_3') ch; + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_41_81_chunk +(1 row) + +SELECT * from search_query_count_3 ORDER BY 1, 2, 3; + search_query | count | sum | bucket +--------------+-------+-----+------------------------------ + Q1 | 1 | 1 | Fri Dec 31 16:00:00 1999 PST + Q1 | 1 | 2 | Fri Dec 31 16:00:00 1999 PST + Q1 | 1 | 3 | Fri Dec 31 16:00:00 1999 PST + Q2 | 1 | 10 | Sat Jan 01 16:00:00 2000 PST + Q2 | 1 | 20 | Sat Jan 01 16:00:00 2000 PST + Q4 | 1 | 20 | Sun Jan 02 16:00:00 2000 PST +(6 rows) + +-- insert into a new region of the hypertable and then refresh the cagg +-- (note we still do not support refreshes into existing regions. +-- cagg chunks do not map 1-1 to hypertabl regions. They encompass +-- more data +-- ). +insert into raw_data select '2000-05-01 00:00+0','Q3', 0, 0; +--this one fails now +\set ON_ERROR_STOP 0 +CALL refresh_continuous_aggregate('search_query_count_3', NULL, '2000-06-01 00:00+0'::timestamptz); +CALL refresh_continuous_aggregate('search_query_count_3', '2000-05-01 00:00+0'::timestamptz, '2000-06-01 00:00+0'::timestamptz); +NOTICE: continuous aggregate "search_query_count_3" is already up-to-date +\set ON_ERROR_STOP 1 +--insert row +insert into raw_data select '2001-05-10 00:00+0','Q3', 100, 100; +--this should succeed since it does not refresh any compressed regions in the cagg +CALL refresh_continuous_aggregate('search_query_count_3', '2001-05-01 00:00+0'::timestamptz, '2001-06-01 00:00+0'::timestamptz); +--verify watermark and check that chunks are compressed +SELECT _timescaledb_functions.to_timestamp(w) FROM _timescaledb_functions.cagg_watermark(:'MAT_HTID') w; + to_timestamp +------------------------------ + Wed May 09 17:01:00 2001 PDT +(1 row) + +SELECT chunk_name, range_start, range_end, is_compressed +FROM timescaledb_information.chunks +WHERE hypertable_name = :'MAT_TABLE_NAME' +ORDER BY 1; + chunk_name | range_start | range_end | is_compressed +--------------------+------------------------------+------------------------------+--------------- + _hyper_41_81_chunk | Fri Dec 24 16:00:00 1999 PST | Mon May 22 17:00:00 2000 PDT | t + _hyper_41_85_chunk | Sun Mar 18 16:00:00 2001 PST | Wed Aug 15 17:00:00 2001 PDT | f +(2 rows) + +SELECT * FROM _timescaledb_catalog.continuous_aggs_materialization_invalidation_log +WHERE materialization_id = :'MAT_HTID' ORDER BY 1, 2,3; + materialization_id | lowest_modified_value | greatest_modified_value +--------------------+-----------------------+------------------------- + 41 | -9223372036854775808 | -210866803200000001 + 41 | 959817600000000 | 988675199999999 + 41 | 991353600000000 | 9223372036854775807 +(3 rows) + +SELECT * from search_query_count_3 +WHERE bucket > '2001-01-01' +ORDER BY 1, 2, 3; + search_query | count | sum | bucket +--------------+-------+-----+------------------------------ + Q3 | 1 | 100 | Wed May 09 17:00:00 2001 PDT +(1 row) + +--now disable compression , will error out -- +\set ON_ERROR_STOP 0 +ALTER MATERIALIZED VIEW search_query_count_3 SET (timescaledb.compress = 'false'); +ERROR: cannot change configuration on already compressed chunks +\set ON_ERROR_STOP 1 +SELECT decompress_chunk(schema_name || '.' || table_name) +FROM _timescaledb_catalog.chunk +WHERE hypertable_id = :'MAT_HTID' and status = 1; + decompress_chunk +------------------ +(0 rows) + +SELECT cagg_name, mat_table_name +FROM cagg_compression_status where cagg_name = 'search_query_count_3'; + cagg_name | mat_table_name +----------------------+----------------------------- + search_query_count_3 | _materialized_hypertable_41 +(1 row) + +SELECT view_name, materialized_only, compression_enabled +FROM timescaledb_information.continuous_aggregates +where view_name = 'search_query_count_3'; + view_name | materialized_only | compression_enabled +----------------------+-------------------+--------------------- + search_query_count_3 | f | t +(1 row) + +-- TEST caggs on table with more columns than in the cagg view defn -- +CREATE TABLE test_morecols ( time TIMESTAMPTZ NOT NULL, + val1 INTEGER, val2 INTEGER, val3 INTEGER, val4 INTEGER, + val5 INTEGER, val6 INTEGER, val7 INTEGER, val8 INTEGER); +SELECT create_hypertable('test_morecols', 'time', chunk_time_interval=> '7 days'::interval); + create_hypertable +----------------------------- + (43,public,test_morecols,t) +(1 row) + +INSERT INTO test_morecols +SELECT generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '1 day'), 55, 75, 40, 70, NULL, 100, 200, 200; +CREATE MATERIALIZED VIEW test_morecols_cagg with (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) +AS SELECT time_bucket('30 days',time), avg(val1), count(val2) + FROM test_morecols GROUP BY 1; +NOTICE: refreshing continuous aggregate "test_morecols_cagg" +ALTER MATERIALIZED VIEW test_morecols_cagg SET (timescaledb.compress='true'); +NOTICE: defaulting compress_orderby to time_bucket +SELECT compress_chunk(ch) FROM show_chunks('test_morecols_cagg') ch; + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_44_91_chunk +(1 row) + +SELECT * FROM test_morecols_cagg; + time_bucket | avg | count +------------------------------+---------------------+------- + Fri Nov 23 16:00:00 2018 PST | 55.0000000000000000 | 23 + Sun Dec 23 16:00:00 2018 PST | 55.0000000000000000 | 8 +(2 rows) + +SELECT view_name, materialized_only, compression_enabled +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_morecols_cagg'; + view_name | materialized_only | compression_enabled +--------------------+-------------------+--------------------- + test_morecols_cagg | f | t +(1 row) + +--should keep compressed option, modify only materialized -- +ALTER MATERIALIZED VIEW test_morecols_cagg SET (timescaledb.materialized_only='true'); +SELECT view_name, materialized_only, compression_enabled +FROM timescaledb_information.continuous_aggregates +where view_name = 'test_morecols_cagg'; + view_name | materialized_only | compression_enabled +--------------------+-------------------+--------------------- + test_morecols_cagg | t | t +(1 row) + +CREATE TABLE issue3248(filler_1 int, filler_2 int, filler_3 int, time timestamptz NOT NULL, device_id int, v0 int, v1 int, v2 float, v3 float); +CREATE INDEX ON issue3248(time DESC); +CREATE INDEX ON issue3248(device_id,time DESC); +SELECT create_hypertable('issue3248','time',create_default_indexes:=false); + create_hypertable +------------------------- + (46,public,issue3248,t) +(1 row) + +ALTER TABLE issue3248 DROP COLUMN filler_1; +INSERT INTO issue3248(time,device_id,v0,v1,v2,v3) +SELECT time, device_id, device_id+1, device_id + 2, device_id + 0.5, NULL +FROM generate_series('2000-01-01 0:00:00+0'::timestamptz,'2000-01-05 23:55:00+0','8h') gtime(time), + generate_series(1,5,1) gdevice(device_id); +ALTER TABLE issue3248 DROP COLUMN filler_2; +INSERT INTO issue3248(time,device_id,v0,v1,v2,v3) +SELECT time, device_id, device_id-1, device_id + 2, device_id + 0.5, NULL +FROM generate_series('2000-01-06 0:00:00+0'::timestamptz,'2000-01-12 23:55:00+0','8h') gtime(time), + generate_series(1,5,1) gdevice(device_id); +ALTER TABLE issue3248 DROP COLUMN filler_3; +INSERT INTO issue3248(time,device_id,v0,v1,v2,v3) +SELECT time, device_id, device_id, device_id + 2, device_id + 0.5, NULL +FROM generate_series('2000-01-13 0:00:00+0'::timestamptz,'2000-01-19 23:55:00+0','8h') gtime(time), + generate_series(1,5,1) gdevice(device_id); +ANALYZE issue3248; +CREATE materialized view issue3248_cagg WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) +AS SELECT time_bucket('1h',time), device_id, min(v0), max(v1), avg(v2) +FROM issue3248 GROUP BY 1,2; +NOTICE: refreshing continuous aggregate "issue3248_cagg" +SELECT + FROM issue3248 AS m, + LATERAL(SELECT m FROM issue3248_cagg WHERE avg IS NULL LIMIT 1) AS lat; +-- +(0 rows) + +-- test that option create_group_indexes is taken into account +CREATE TABLE test_group_idx ( +time timestamptz, +symbol int, +value numeric +); +select create_hypertable('test_group_idx', 'time'); +NOTICE: adding not-null constraint to column "time" + create_hypertable +------------------------------ + (48,public,test_group_idx,t) +(1 row) + +insert into test_group_idx +select t, round(random()*10), random()*5 +from generate_series('2020-01-01', '2020-02-25', INTERVAL '12 hours') t; +create materialized view cagg_index_true +with (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.create_group_indexes=true, timescaledb.finalized=false) as +select + time_bucket('1 day', "time") as bucket, + sum(value), + symbol +from test_group_idx +group by bucket, symbol; +NOTICE: refreshing continuous aggregate "cagg_index_true" +create materialized view cagg_index_false +with (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.create_group_indexes=false, timescaledb.finalized=false) as +select + time_bucket('1 day', "time") as bucket, + sum(value), + symbol +from test_group_idx +group by bucket, symbol; +NOTICE: refreshing continuous aggregate "cagg_index_false" +create materialized view cagg_index_default +with (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) as +select + time_bucket('1 day', "time") as bucket, + sum(value), + symbol +from test_group_idx +group by bucket, symbol; +NOTICE: refreshing continuous aggregate "cagg_index_default" +-- see corresponding materialization_hypertables +select view_name, materialization_hypertable_name from timescaledb_information.continuous_aggregates ca +where view_name like 'cagg_index_%'; + view_name | materialization_hypertable_name +--------------------+--------------------------------- + cagg_index_default | _materialized_hypertable_51 + cagg_index_false | _materialized_hypertable_50 + cagg_index_true | _materialized_hypertable_49 +(3 rows) + +-- now make sure a group index has been created when explicitly asked for +\x on +select i.* +from pg_indexes i +join pg_class c + on schemaname = relnamespace::regnamespace::text + and tablename = relname +where tablename in (select materialization_hypertable_name from timescaledb_information.continuous_aggregates +where view_name like 'cagg_index_%') +order by tablename; +-[ RECORD 1 ]------------------------------------------------------------------------------------------------------------------------------------------------- +schemaname | _timescaledb_internal +tablename | _materialized_hypertable_49 +indexname | _materialized_hypertable_49_bucket_idx +tablespace | +indexdef | CREATE INDEX _materialized_hypertable_49_bucket_idx ON _timescaledb_internal._materialized_hypertable_49 USING btree (bucket DESC) +-[ RECORD 2 ]------------------------------------------------------------------------------------------------------------------------------------------------- +schemaname | _timescaledb_internal +tablename | _materialized_hypertable_49 +indexname | _materialized_hypertable_49_symbol_bucket_idx +tablespace | +indexdef | CREATE INDEX _materialized_hypertable_49_symbol_bucket_idx ON _timescaledb_internal._materialized_hypertable_49 USING btree (symbol, bucket DESC) +-[ RECORD 3 ]------------------------------------------------------------------------------------------------------------------------------------------------- +schemaname | _timescaledb_internal +tablename | _materialized_hypertable_50 +indexname | _materialized_hypertable_50_bucket_idx +tablespace | +indexdef | CREATE INDEX _materialized_hypertable_50_bucket_idx ON _timescaledb_internal._materialized_hypertable_50 USING btree (bucket DESC) +-[ RECORD 4 ]------------------------------------------------------------------------------------------------------------------------------------------------- +schemaname | _timescaledb_internal +tablename | _materialized_hypertable_51 +indexname | _materialized_hypertable_51_bucket_idx +tablespace | +indexdef | CREATE INDEX _materialized_hypertable_51_bucket_idx ON _timescaledb_internal._materialized_hypertable_51 USING btree (bucket DESC) +-[ RECORD 5 ]------------------------------------------------------------------------------------------------------------------------------------------------- +schemaname | _timescaledb_internal +tablename | _materialized_hypertable_51 +indexname | _materialized_hypertable_51_symbol_bucket_idx +tablespace | +indexdef | CREATE INDEX _materialized_hypertable_51_symbol_bucket_idx ON _timescaledb_internal._materialized_hypertable_51 USING btree (symbol, bucket DESC) + +\x off +-- Test View Target Entries that contain both aggrefs and Vars in the same expression +CREATE TABLE transactions +( + "time" timestamp with time zone NOT NULL, + dummy1 integer, + dummy2 integer, + dummy3 integer, + dummy4 integer, + dummy5 integer, + amount integer, + fiat_value integer +); +SELECT create_hypertable('transactions', 'time'); + create_hypertable +---------------------------- + (52,public,transactions,t) +(1 row) + +INSERT INTO transactions VALUES ( '2018-01-01 09:20:00-08', 0, 0, 0, 0, 0, 1, 10); +INSERT INTO transactions VALUES ( '2018-01-02 09:30:00-08', 0, 0, 0, 0, 0, -1, 10); +INSERT INTO transactions VALUES ( '2018-01-02 09:20:00-08', 0, 0, 0, 0, 0, -1, 10); +INSERT INTO transactions VALUES ( '2018-01-02 09:10:00-08', 0, 0, 0, 0, 0, -1, 10); +INSERT INTO transactions VALUES ( '2018-11-01 09:20:00-08', 0, 0, 0, 0, 0, 1, 10); +INSERT INTO transactions VALUES ( '2018-11-01 10:40:00-08', 0, 0, 0, 0, 0, 1, 10); +INSERT INTO transactions VALUES ( '2018-11-01 11:50:00-08', 0, 0, 0, 0, 0, 1, 10); +INSERT INTO transactions VALUES ( '2018-11-01 12:10:00-08', 0, 0, 0, 0, 0, -1, 10); +INSERT INTO transactions VALUES ( '2018-11-01 13:10:00-08', 0, 0, 0, 0, 0, -1, 10); +INSERT INTO transactions VALUES ( '2018-11-02 09:20:00-08', 0, 0, 0, 0, 0, 1, 10); +INSERT INTO transactions VALUES ( '2018-11-02 10:30:00-08', 0, 0, 0, 0, 0, -1, 10); +CREATE materialized view cashflows( + bucket, + amount, + cashflow, + cashflow2 +) WITH ( + timescaledb.continuous, + timescaledb.materialized_only = true, + timescaledb.finalized = false +) AS +SELECT time_bucket ('1 day', time) AS bucket, + amount, + CASE + WHEN amount < 0 THEN (0 - sum(fiat_value)) + ELSE sum(fiat_value) + END AS cashflow, + amount + sum(fiat_value) +FROM transactions +GROUP BY bucket, amount; +NOTICE: refreshing continuous aggregate "cashflows" +SELECT h.table_name AS "MAT_TABLE_NAME", + partial_view_name AS "PART_VIEW_NAME", + direct_view_name AS "DIRECT_VIEW_NAME" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'cashflows' +\gset +-- Show both the columns and the view definitions to see that +-- references are correct in the view as well. +\d+ "_timescaledb_internal".:"DIRECT_VIEW_NAME" + View "_timescaledb_internal._direct_view_53" + Column | Type | Collation | Nullable | Default | Storage | Description +-----------+--------------------------+-----------+----------+---------+---------+------------- + bucket | timestamp with time zone | | | | plain | + amount | integer | | | | plain | + cashflow | bigint | | | | plain | + cashflow2 | bigint | | | | plain | +View definition: + SELECT time_bucket('@ 1 day'::interval, "time") AS bucket, + amount, + CASE + WHEN amount < 0 THEN 0 - sum(fiat_value) + ELSE sum(fiat_value) + END AS cashflow, + amount + sum(fiat_value) AS cashflow2 + FROM transactions + GROUP BY (time_bucket('@ 1 day'::interval, "time")), amount; + +\d+ "_timescaledb_internal".:"PART_VIEW_NAME" + View "_timescaledb_internal._partial_view_53" + Column | Type | Collation | Nullable | Default | Storage | Description +----------+--------------------------+-----------+----------+---------+----------+------------- + bucket | timestamp with time zone | | | | plain | + amount | integer | | | | plain | + agg_3_3 | bytea | | | | extended | + agg_3_4 | bytea | | | | extended | + var_3_5 | integer | | | | plain | + agg_4_6 | bytea | | | | extended | + chunk_id | integer | | | | plain | +View definition: + SELECT time_bucket('@ 1 day'::interval, "time") AS bucket, + amount, + _timescaledb_functions.partialize_agg(sum(fiat_value)) AS agg_3_3, + _timescaledb_functions.partialize_agg(sum(fiat_value)) AS agg_3_4, + amount AS var_3_5, + _timescaledb_functions.partialize_agg(sum(fiat_value)) AS agg_4_6, + _timescaledb_functions.chunk_id_from_relid(tableoid) AS chunk_id + FROM transactions + GROUP BY (time_bucket('@ 1 day'::interval, "time")), amount, (_timescaledb_functions.chunk_id_from_relid(tableoid)); + +\d+ "_timescaledb_internal".:"MAT_TABLE_NAME" + Table "_timescaledb_internal._materialized_hypertable_53" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +----------+--------------------------+-----------+----------+---------+----------+--------------+------------- + bucket | timestamp with time zone | | not null | | plain | | + amount | integer | | | | plain | | + agg_3_3 | bytea | | | | extended | | + agg_3_4 | bytea | | | | extended | | + var_3_5 | integer | | | | plain | | + agg_4_6 | bytea | | | | extended | | + chunk_id | integer | | | | plain | | +Indexes: + "_materialized_hypertable_53_amount_bucket_idx" btree (amount, bucket DESC) + "_materialized_hypertable_53_bucket_idx" btree (bucket DESC) +Triggers: + ts_insert_blocker BEFORE INSERT ON _timescaledb_internal._materialized_hypertable_53 FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker() +Child tables: _timescaledb_internal._hyper_53_114_chunk, + _timescaledb_internal._hyper_53_115_chunk + +\d+ 'cashflows' + View "public.cashflows" + Column | Type | Collation | Nullable | Default | Storage | Description +-----------+--------------------------+-----------+----------+---------+---------+------------- + bucket | timestamp with time zone | | | | plain | + amount | integer | | | | plain | + cashflow | bigint | | | | plain | + cashflow2 | bigint | | | | plain | +View definition: + SELECT bucket, + amount, + CASE + WHEN var_3_5 < 0 THEN 0 - _timescaledb_functions.finalize_agg('pg_catalog.sum(integer)'::text, NULL::name, NULL::name, '{{pg_catalog,int4}}'::name[], agg_3_3, NULL::bigint) + ELSE _timescaledb_functions.finalize_agg('pg_catalog.sum(integer)'::text, NULL::name, NULL::name, '{{pg_catalog,int4}}'::name[], agg_3_4, NULL::bigint) + END AS cashflow, + var_3_5 + _timescaledb_functions.finalize_agg('pg_catalog.sum(integer)'::text, NULL::name, NULL::name, '{{pg_catalog,int4}}'::name[], agg_4_6, NULL::bigint) AS cashflow2 + FROM _timescaledb_internal._materialized_hypertable_53 + GROUP BY bucket, amount; + +SELECT * FROM cashflows ORDER BY bucket, amount, cashflow, cashflow2; + bucket | amount | cashflow | cashflow2 +------------------------------+--------+----------+----------- + Sun Dec 31 16:00:00 2017 PST | 1 | 10 | 11 + Mon Jan 01 16:00:00 2018 PST | -1 | -30 | 29 + Wed Oct 31 17:00:00 2018 PDT | -1 | -20 | 19 + Wed Oct 31 17:00:00 2018 PDT | 1 | 30 | 31 + Thu Nov 01 17:00:00 2018 PDT | -1 | -10 | 9 + Thu Nov 01 17:00:00 2018 PDT | 1 | 10 | 11 +(6 rows) + +-- Indexes on not finalized caggs are not allowed +\set ON_ERROR_STOP 0 +CREATE INDEX index_on_not_finalized_cagg ON cashflows(cashflow); +ERROR: operation not supported on continuous aggreates that are not finalized +\set ON_ERROR_STOP 1 diff --git a/tsl/test/expected/deparse.out b/tsl/test/expected/deparse-13.out similarity index 100% rename from tsl/test/expected/deparse.out rename to tsl/test/expected/deparse-13.out diff --git a/tsl/test/expected/deparse-14.out b/tsl/test/expected/deparse-14.out new file mode 100644 index 00000000000..4fd5f9ac083 --- /dev/null +++ b/tsl/test/expected/deparse-14.out @@ -0,0 +1,66 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- We compare information(\d+) about manually created tables with the ones that were recreated using deparse_table command. +-- There should be no diff. +\set ECHO errors + ?column? +------------------------- + TABLE DEPARSE TEST DONE +(1 row) + +-- test drop_chunks function deparsing +SELECT * FROM tsl_test_deparse_drop_chunks('myschema.table10', '2019-01-01'::timestamptz, verbose => true); + tsl_test_deparse_drop_chunks +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + SELECT * FROM public.drop_chunks(relation => 'myschema.table10',older_than => 'Tue Jan 01 00:00:00 2019 PST'::timestamp with time zone,newer_than => NULL,verbose => 't') +(1 row) + +SELECT * FROM tsl_test_deparse_drop_chunks('table1', newer_than => 12345); + tsl_test_deparse_drop_chunks +-------------------------------------------------------------------------------------------------------------------------------- + SELECT * FROM public.drop_chunks(relation => 'public.table1',older_than => NULL,newer_than => '12345'::integer,verbose => 'f') +(1 row) + +SELECT * FROM tsl_test_deparse_drop_chunks('table1', older_than => interval '2 years', newer_than => '2015-01-01'::timestamp); + tsl_test_deparse_drop_chunks +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + SELECT * FROM public.drop_chunks(relation => 'public.table1',older_than => '@ 2 years'::interval,newer_than => 'Thu Jan 01 00:00:00 2015'::timestamp without time zone,verbose => 'f') +(1 row) + +-- test generalized deparsing function +SELECT * FROM tsl_test_deparse_scalar_func(schema_name => 'Foo', table_name => 'bar', option => false, "time" => timestamp '2019-09-10 11:08', message => 'This is a test message.'); +NOTICE: Deparsed: SELECT * FROM public.tsl_test_deparse_scalar_func(schema_name => 'Foo',table_name => 'bar',time => 'Tue Sep 10 11:08:00 2019 PDT',message => 'This is a test message.',not_set => 't',option => 'f') + tsl_test_deparse_scalar_func +------------------------------ + t +(1 row) + +SELECT * FROM tsl_test_deparse_named_scalar_func(schema_name => 'Foo', table_name => 'bar', option => false, "time" => timestamp '2019-09-10 11:08', message => 'This is a test message.'); +NOTICE: Deparsed: SELECT option FROM public.tsl_test_deparse_named_scalar_func(schema_name => 'Foo',table_name => 'bar',time => 'Tue Sep 10 11:08:00 2019 PDT',message => 'This is a test message.',not_set => 't',option => 'f') + option +-------- + t +(1 row) + +SELECT * FROM tsl_test_deparse_composite_func(schema_name => 'Foo', table_name => 'bar', option => false, "time" => timestamp '2019-09-10 11:08', message => 'This is a test message.'); +NOTICE: Deparsed: SELECT success,message FROM public.tsl_test_deparse_composite_func(schema_name => 'Foo',table_name => 'bar',time => 'Tue Sep 10 11:08:00 2019 PDT',message => 'This is a test message.',not_set => NULL,option => 'f') + success | message +---------+--------- + | +(1 row) + +-- test errors handling +\set ON_ERROR_STOP 0 +CREATE TEMP TABLE fail_table1(x INT); +SELECT _timescaledb_internal.get_tabledef('fail_table1'); +ERROR: temporary table is not supported +CREATE INDEX my_fail_table1_idx ON fail_table1 USING BTREE(x); +SELECT _timescaledb_internal.get_tabledef('my_fail_table1_idx'); +ERROR: "my_fail_table1_idx" is an index +SELECT _timescaledb_internal.get_tabledef('non_existing'); +ERROR: relation "non_existing" does not exist at character 43 +CREATE TABLE row_sec(i INT); +ALTER TABLE row_sec ENABLE ROW LEVEL SECURITY; +SELECT _timescaledb_internal.get_tabledef('row_sec'); +ERROR: row security is not supported diff --git a/tsl/test/expected/deparse-15.out b/tsl/test/expected/deparse-15.out new file mode 100644 index 00000000000..4fd5f9ac083 --- /dev/null +++ b/tsl/test/expected/deparse-15.out @@ -0,0 +1,66 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- We compare information(\d+) about manually created tables with the ones that were recreated using deparse_table command. +-- There should be no diff. +\set ECHO errors + ?column? +------------------------- + TABLE DEPARSE TEST DONE +(1 row) + +-- test drop_chunks function deparsing +SELECT * FROM tsl_test_deparse_drop_chunks('myschema.table10', '2019-01-01'::timestamptz, verbose => true); + tsl_test_deparse_drop_chunks +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + SELECT * FROM public.drop_chunks(relation => 'myschema.table10',older_than => 'Tue Jan 01 00:00:00 2019 PST'::timestamp with time zone,newer_than => NULL,verbose => 't') +(1 row) + +SELECT * FROM tsl_test_deparse_drop_chunks('table1', newer_than => 12345); + tsl_test_deparse_drop_chunks +-------------------------------------------------------------------------------------------------------------------------------- + SELECT * FROM public.drop_chunks(relation => 'public.table1',older_than => NULL,newer_than => '12345'::integer,verbose => 'f') +(1 row) + +SELECT * FROM tsl_test_deparse_drop_chunks('table1', older_than => interval '2 years', newer_than => '2015-01-01'::timestamp); + tsl_test_deparse_drop_chunks +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + SELECT * FROM public.drop_chunks(relation => 'public.table1',older_than => '@ 2 years'::interval,newer_than => 'Thu Jan 01 00:00:00 2015'::timestamp without time zone,verbose => 'f') +(1 row) + +-- test generalized deparsing function +SELECT * FROM tsl_test_deparse_scalar_func(schema_name => 'Foo', table_name => 'bar', option => false, "time" => timestamp '2019-09-10 11:08', message => 'This is a test message.'); +NOTICE: Deparsed: SELECT * FROM public.tsl_test_deparse_scalar_func(schema_name => 'Foo',table_name => 'bar',time => 'Tue Sep 10 11:08:00 2019 PDT',message => 'This is a test message.',not_set => 't',option => 'f') + tsl_test_deparse_scalar_func +------------------------------ + t +(1 row) + +SELECT * FROM tsl_test_deparse_named_scalar_func(schema_name => 'Foo', table_name => 'bar', option => false, "time" => timestamp '2019-09-10 11:08', message => 'This is a test message.'); +NOTICE: Deparsed: SELECT option FROM public.tsl_test_deparse_named_scalar_func(schema_name => 'Foo',table_name => 'bar',time => 'Tue Sep 10 11:08:00 2019 PDT',message => 'This is a test message.',not_set => 't',option => 'f') + option +-------- + t +(1 row) + +SELECT * FROM tsl_test_deparse_composite_func(schema_name => 'Foo', table_name => 'bar', option => false, "time" => timestamp '2019-09-10 11:08', message => 'This is a test message.'); +NOTICE: Deparsed: SELECT success,message FROM public.tsl_test_deparse_composite_func(schema_name => 'Foo',table_name => 'bar',time => 'Tue Sep 10 11:08:00 2019 PDT',message => 'This is a test message.',not_set => NULL,option => 'f') + success | message +---------+--------- + | +(1 row) + +-- test errors handling +\set ON_ERROR_STOP 0 +CREATE TEMP TABLE fail_table1(x INT); +SELECT _timescaledb_internal.get_tabledef('fail_table1'); +ERROR: temporary table is not supported +CREATE INDEX my_fail_table1_idx ON fail_table1 USING BTREE(x); +SELECT _timescaledb_internal.get_tabledef('my_fail_table1_idx'); +ERROR: "my_fail_table1_idx" is an index +SELECT _timescaledb_internal.get_tabledef('non_existing'); +ERROR: relation "non_existing" does not exist at character 43 +CREATE TABLE row_sec(i INT); +ALTER TABLE row_sec ENABLE ROW LEVEL SECURITY; +SELECT _timescaledb_internal.get_tabledef('row_sec'); +ERROR: row security is not supported diff --git a/tsl/test/expected/deparse-16.out b/tsl/test/expected/deparse-16.out new file mode 100644 index 00000000000..427ba01974d --- /dev/null +++ b/tsl/test/expected/deparse-16.out @@ -0,0 +1,66 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- We compare information(\d+) about manually created tables with the ones that were recreated using deparse_table command. +-- There should be no diff. +\set ECHO errors + ?column? +------------------------- + TABLE DEPARSE TEST DONE +(1 row) + +-- test drop_chunks function deparsing +SELECT * FROM tsl_test_deparse_drop_chunks('myschema.table10', '2019-01-01'::timestamptz, verbose => true); + tsl_test_deparse_drop_chunks +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + SELECT * FROM public.drop_chunks(relation => 'myschema.table10',older_than => 'Tue Jan 01 00:00:00 2019 PST'::timestamp with time zone,newer_than => NULL,verbose => 't') +(1 row) + +SELECT * FROM tsl_test_deparse_drop_chunks('table1', newer_than => 12345); + tsl_test_deparse_drop_chunks +-------------------------------------------------------------------------------------------------------------------------------- + SELECT * FROM public.drop_chunks(relation => 'public.table1',older_than => NULL,newer_than => '12345'::integer,verbose => 'f') +(1 row) + +SELECT * FROM tsl_test_deparse_drop_chunks('table1', older_than => interval '2 years', newer_than => '2015-01-01'::timestamp); + tsl_test_deparse_drop_chunks +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + SELECT * FROM public.drop_chunks(relation => 'public.table1',older_than => '@ 2 years'::interval,newer_than => 'Thu Jan 01 00:00:00 2015'::timestamp without time zone,verbose => 'f') +(1 row) + +-- test generalized deparsing function +SELECT * FROM tsl_test_deparse_scalar_func(schema_name => 'Foo', table_name => 'bar', option => false, "time" => timestamp '2019-09-10 11:08', message => 'This is a test message.'); +NOTICE: Deparsed: SELECT * FROM public.tsl_test_deparse_scalar_func(schema_name => 'Foo',table_name => 'bar',time => 'Tue Sep 10 11:08:00 2019 PDT',message => 'This is a test message.',not_set => 't',option => 'f') + tsl_test_deparse_scalar_func +------------------------------ + t +(1 row) + +SELECT * FROM tsl_test_deparse_named_scalar_func(schema_name => 'Foo', table_name => 'bar', option => false, "time" => timestamp '2019-09-10 11:08', message => 'This is a test message.'); +NOTICE: Deparsed: SELECT option FROM public.tsl_test_deparse_named_scalar_func(schema_name => 'Foo',table_name => 'bar',time => 'Tue Sep 10 11:08:00 2019 PDT',message => 'This is a test message.',not_set => 't',option => 'f') + option +-------- + t +(1 row) + +SELECT * FROM tsl_test_deparse_composite_func(schema_name => 'Foo', table_name => 'bar', option => false, "time" => timestamp '2019-09-10 11:08', message => 'This is a test message.'); +NOTICE: Deparsed: SELECT success,message FROM public.tsl_test_deparse_composite_func(schema_name => 'Foo',table_name => 'bar',time => 'Tue Sep 10 11:08:00 2019 PDT',message => 'This is a test message.',not_set => NULL,option => 'f') + success | message +---------+--------- + | +(1 row) + +-- test errors handling +\set ON_ERROR_STOP 0 +CREATE TEMP TABLE fail_table1(x INT); +SELECT _timescaledb_internal.get_tabledef('fail_table1'); +ERROR: temporary table is not supported +CREATE INDEX my_fail_table1_idx ON fail_table1 USING BTREE(x); +SELECT _timescaledb_internal.get_tabledef('my_fail_table1_idx'); +ERROR: cannot open relation "my_fail_table1_idx" +SELECT _timescaledb_internal.get_tabledef('non_existing'); +ERROR: relation "non_existing" does not exist at character 43 +CREATE TABLE row_sec(i INT); +ALTER TABLE row_sec ENABLE ROW LEVEL SECURITY; +SELECT _timescaledb_internal.get_tabledef('row_sec'); +ERROR: row security is not supported diff --git a/tsl/test/expected/dist_grant-16.out b/tsl/test/expected/dist_grant-16.out new file mode 100644 index 00000000000..810a9be4253 --- /dev/null +++ b/tsl/test/expected/dist_grant-16.out @@ -0,0 +1,1289 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- Need to be super user to create extension and add data nodes +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; +\unset ECHO +psql:include/remote_exec.sql:5: NOTICE: schema "test" already exists, skipping +\set DATA_NODE_1 :TEST_DBNAME _1 +\set DATA_NODE_2 :TEST_DBNAME _2 +\set DATA_NODE_3 :TEST_DBNAME _3 +\set DATA_NODE_4 :TEST_DBNAME _4 +SELECT node_name, database, node_created, database_created, extension_created +FROM ( + SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* + FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) +) a; + node_name | database | node_created | database_created | extension_created +-----------------+-----------------+--------------+------------------+------------------- + db_dist_grant_1 | db_dist_grant_1 | t | t | t + db_dist_grant_2 | db_dist_grant_2 | t | t | t + db_dist_grant_3 | db_dist_grant_3 | t | t | t +(3 rows) + +CREATE TABLE conditions(time TIMESTAMPTZ NOT NULL, device INTEGER, temperature FLOAT, humidity FLOAT); +GRANT SELECT ON conditions TO :ROLE_1; +GRANT INSERT, DELETE ON conditions TO :ROLE_2; +SELECT relname, relacl FROM pg_class WHERE relname = 'conditions'; + relname | relacl +------------+-------------------------------------------------------------------------------------------------------------------- + conditions | {cluster_super_user=arwdDxt/cluster_super_user,test_role_1=r/cluster_super_user,test_role_2=ad/cluster_super_user} +(1 row) + +SELECT * FROM create_distributed_hypertable('conditions', 'time', 'device'); + hypertable_id | schema_name | table_name | created +---------------+-------------+------------+--------- + 1 | public | conditions | t +(1 row) + +SELECT has_table_privilege(:'ROLE_1', 'conditions', 'SELECT') AS "SELECT" + , has_table_privilege(:'ROLE_1', 'conditions', 'DELETE') AS "DELETE" + , has_table_privilege(:'ROLE_1', 'conditions', 'INSERT') AS "INSERT"; + SELECT | DELETE | INSERT +--------+--------+-------- + t | f | f +(1 row) + +SELECT * FROM test.remote_exec(NULL, format($$ + SELECT has_table_privilege('%s', 'conditions', 'SELECT') AS "SELECT" + , has_table_privilege('%s', 'conditions', 'DELETE') AS "DELETE" + , has_table_privilege('%s', 'conditions', 'INSERT') AS "INSERT"; +$$, :'ROLE_1', :'ROLE_1', :'ROLE_1')); +NOTICE: [db_dist_grant_1]: + SELECT has_table_privilege('test_role_1', 'conditions', 'SELECT') AS "SELECT" + , has_table_privilege('test_role_1', 'conditions', 'DELETE') AS "DELETE" + , has_table_privilege('test_role_1', 'conditions', 'INSERT') AS "INSERT" +NOTICE: [db_dist_grant_1]: +SELECT|DELETE|INSERT +------+------+------ +t |f |f +(1 row) + + +NOTICE: [db_dist_grant_2]: + SELECT has_table_privilege('test_role_1', 'conditions', 'SELECT') AS "SELECT" + , has_table_privilege('test_role_1', 'conditions', 'DELETE') AS "DELETE" + , has_table_privilege('test_role_1', 'conditions', 'INSERT') AS "INSERT" +NOTICE: [db_dist_grant_2]: +SELECT|DELETE|INSERT +------+------+------ +t |f |f +(1 row) + + +NOTICE: [db_dist_grant_3]: + SELECT has_table_privilege('test_role_1', 'conditions', 'SELECT') AS "SELECT" + , has_table_privilege('test_role_1', 'conditions', 'DELETE') AS "DELETE" + , has_table_privilege('test_role_1', 'conditions', 'INSERT') AS "INSERT" +NOTICE: [db_dist_grant_3]: +SELECT|DELETE|INSERT +------+------+------ +t |f |f +(1 row) + + + remote_exec +------------- + +(1 row) + +SELECT has_table_privilege(:'ROLE_2', 'conditions', 'SELECT') AS "SELECT" + , has_table_privilege(:'ROLE_2', 'conditions', 'DELETE') AS "DELETE" + , has_table_privilege(:'ROLE_2', 'conditions', 'INSERT') AS "INSERT"; + SELECT | DELETE | INSERT +--------+--------+-------- + f | t | t +(1 row) + +SELECT * FROM test.remote_exec(NULL, format($$ + SELECT has_table_privilege('%s', 'conditions', 'SELECT') AS "SELECT" + , has_table_privilege('%s', 'conditions', 'DELETE') AS "DELETE" + , has_table_privilege('%s', 'conditions', 'INSERT') AS "INSERT"; +$$, :'ROLE_2', :'ROLE_2', :'ROLE_2')); +NOTICE: [db_dist_grant_1]: + SELECT has_table_privilege('test_role_2', 'conditions', 'SELECT') AS "SELECT" + , has_table_privilege('test_role_2', 'conditions', 'DELETE') AS "DELETE" + , has_table_privilege('test_role_2', 'conditions', 'INSERT') AS "INSERT" +NOTICE: [db_dist_grant_1]: +SELECT|DELETE|INSERT +------+------+------ +f |t |t +(1 row) + + +NOTICE: [db_dist_grant_2]: + SELECT has_table_privilege('test_role_2', 'conditions', 'SELECT') AS "SELECT" + , has_table_privilege('test_role_2', 'conditions', 'DELETE') AS "DELETE" + , has_table_privilege('test_role_2', 'conditions', 'INSERT') AS "INSERT" +NOTICE: [db_dist_grant_2]: +SELECT|DELETE|INSERT +------+------+------ +f |t |t +(1 row) + + +NOTICE: [db_dist_grant_3]: + SELECT has_table_privilege('test_role_2', 'conditions', 'SELECT') AS "SELECT" + , has_table_privilege('test_role_2', 'conditions', 'DELETE') AS "DELETE" + , has_table_privilege('test_role_2', 'conditions', 'INSERT') AS "INSERT" +NOTICE: [db_dist_grant_3]: +SELECT|DELETE|INSERT +------+------+------ +f |t |t +(1 row) + + + remote_exec +------------- + +(1 row) + +INSERT INTO conditions +SELECT time, (random()*30)::int, random()*80 +FROM generate_series('2019-01-01 00:00:00'::timestamptz, '2019-02-01 00:00:00', '1 min') AS time; +-- Check that we can actually execute a select as non-owner +SET ROLE :ROLE_1; +SELECT COUNT(*) FROM conditions; + count +------- + 44641 +(1 row) + +SET ROLE :ROLE_CLUSTER_SUPERUSER; +GRANT UPDATE ON conditions TO :ROLE_2; +BEGIN; +GRANT TRUNCATE ON conditions TO :ROLE_2; +ROLLBACK; +-- Should have UPDATE, but not TRUNCATE +SELECT has_table_privilege(:'ROLE_2', 'conditions', 'SELECT') AS "SELECT" + , has_table_privilege(:'ROLE_2', 'conditions', 'DELETE') AS "DELETE" + , has_table_privilege(:'ROLE_2', 'conditions', 'INSERT') AS "INSERT" + , has_table_privilege(:'ROLE_2', 'conditions', 'UPDATE') AS "UPDATE" + , has_table_privilege(:'ROLE_2', 'conditions', 'TRUNCATE') AS "TRUNCATE"; + SELECT | DELETE | INSERT | UPDATE | TRUNCATE +--------+--------+--------+--------+---------- + f | t | t | t | f +(1 row) + +SELECT * FROM test.remote_exec(NULL, format($$ + SELECT has_table_privilege('%s', 'conditions', 'SELECT') AS "SELECT" + , has_table_privilege('%s', 'conditions', 'DELETE') AS "DELETE" + , has_table_privilege('%s', 'conditions', 'INSERT') AS "INSERT" + , has_table_privilege('%s', 'conditions', 'UPDATE') AS "UPDATE" + , has_table_privilege('%s', 'conditions', 'TRUNCATE') AS "TRUNCATE"; +$$, :'ROLE_2', :'ROLE_2', :'ROLE_2', :'ROLE_2', :'ROLE_2')); +NOTICE: [db_dist_grant_1]: + SELECT has_table_privilege('test_role_2', 'conditions', 'SELECT') AS "SELECT" + , has_table_privilege('test_role_2', 'conditions', 'DELETE') AS "DELETE" + , has_table_privilege('test_role_2', 'conditions', 'INSERT') AS "INSERT" + , has_table_privilege('test_role_2', 'conditions', 'UPDATE') AS "UPDATE" + , has_table_privilege('test_role_2', 'conditions', 'TRUNCATE') AS "TRUNCATE" +NOTICE: [db_dist_grant_1]: +SELECT|DELETE|INSERT|UPDATE|TRUNCATE +------+------+------+------+-------- +f |t |t |t |f +(1 row) + + +NOTICE: [db_dist_grant_2]: + SELECT has_table_privilege('test_role_2', 'conditions', 'SELECT') AS "SELECT" + , has_table_privilege('test_role_2', 'conditions', 'DELETE') AS "DELETE" + , has_table_privilege('test_role_2', 'conditions', 'INSERT') AS "INSERT" + , has_table_privilege('test_role_2', 'conditions', 'UPDATE') AS "UPDATE" + , has_table_privilege('test_role_2', 'conditions', 'TRUNCATE') AS "TRUNCATE" +NOTICE: [db_dist_grant_2]: +SELECT|DELETE|INSERT|UPDATE|TRUNCATE +------+------+------+------+-------- +f |t |t |t |f +(1 row) + + +NOTICE: [db_dist_grant_3]: + SELECT has_table_privilege('test_role_2', 'conditions', 'SELECT') AS "SELECT" + , has_table_privilege('test_role_2', 'conditions', 'DELETE') AS "DELETE" + , has_table_privilege('test_role_2', 'conditions', 'INSERT') AS "INSERT" + , has_table_privilege('test_role_2', 'conditions', 'UPDATE') AS "UPDATE" + , has_table_privilege('test_role_2', 'conditions', 'TRUNCATE') AS "TRUNCATE" +NOTICE: [db_dist_grant_3]: +SELECT|DELETE|INSERT|UPDATE|TRUNCATE +------+------+------+------+-------- +f |t |t |t |f +(1 row) + + + remote_exec +------------- + +(1 row) + +-- Add another data node and check that grants are propagated when the +-- data node is attached to an existing table. +SELECT node_name, database, node_created, database_created, extension_created FROM add_data_node('data4', host => 'localhost', database => :'DATA_NODE_4'); + node_name | database | node_created | database_created | extension_created +-----------+-----------------+--------------+------------------+------------------- + data4 | db_dist_grant_4 | t | t | t +(1 row) + +\set ON_ERROR_STOP 0 +SELECT * FROM test.remote_exec(NULL, format($$ + SELECT has_table_privilege('%s', 'conditions', 'SELECT') AS "SELECT" + , has_table_privilege('%s', 'conditions', 'DELETE') AS "DELETE" + , has_table_privilege('%s', 'conditions', 'INSERT') AS "INSERT" + , has_table_privilege('%s', 'conditions', 'UPDATE') AS "UPDATE" + , has_table_privilege('%s', 'conditions', 'TRUNCATE') AS "TRUNCATE"; +$$, :'ROLE_2', :'ROLE_2', :'ROLE_2', :'ROLE_2', :'ROLE_2')); +NOTICE: [db_dist_grant_1]: + SELECT has_table_privilege('test_role_2', 'conditions', 'SELECT') AS "SELECT" + , has_table_privilege('test_role_2', 'conditions', 'DELETE') AS "DELETE" + , has_table_privilege('test_role_2', 'conditions', 'INSERT') AS "INSERT" + , has_table_privilege('test_role_2', 'conditions', 'UPDATE') AS "UPDATE" + , has_table_privilege('test_role_2', 'conditions', 'TRUNCATE') AS "TRUNCATE" +NOTICE: [db_dist_grant_1]: +SELECT|DELETE|INSERT|UPDATE|TRUNCATE +------+------+------+------+-------- +f |t |t |t |f +(1 row) + + +NOTICE: [db_dist_grant_2]: + SELECT has_table_privilege('test_role_2', 'conditions', 'SELECT') AS "SELECT" + , has_table_privilege('test_role_2', 'conditions', 'DELETE') AS "DELETE" + , has_table_privilege('test_role_2', 'conditions', 'INSERT') AS "INSERT" + , has_table_privilege('test_role_2', 'conditions', 'UPDATE') AS "UPDATE" + , has_table_privilege('test_role_2', 'conditions', 'TRUNCATE') AS "TRUNCATE" +NOTICE: [db_dist_grant_2]: +SELECT|DELETE|INSERT|UPDATE|TRUNCATE +------+------+------+------+-------- +f |t |t |t |f +(1 row) + + +NOTICE: [db_dist_grant_3]: + SELECT has_table_privilege('test_role_2', 'conditions', 'SELECT') AS "SELECT" + , has_table_privilege('test_role_2', 'conditions', 'DELETE') AS "DELETE" + , has_table_privilege('test_role_2', 'conditions', 'INSERT') AS "INSERT" + , has_table_privilege('test_role_2', 'conditions', 'UPDATE') AS "UPDATE" + , has_table_privilege('test_role_2', 'conditions', 'TRUNCATE') AS "TRUNCATE" +NOTICE: [db_dist_grant_3]: +SELECT|DELETE|INSERT|UPDATE|TRUNCATE +------+------+------+------+-------- +f |t |t |t |f +(1 row) + + +NOTICE: [data4]: + SELECT has_table_privilege('test_role_2', 'conditions', 'SELECT') AS "SELECT" + , has_table_privilege('test_role_2', 'conditions', 'DELETE') AS "DELETE" + , has_table_privilege('test_role_2', 'conditions', 'INSERT') AS "INSERT" + , has_table_privilege('test_role_2', 'conditions', 'UPDATE') AS "UPDATE" + , has_table_privilege('test_role_2', 'conditions', 'TRUNCATE') AS "TRUNCATE" +ERROR: [data4]: relation "conditions" does not exist +\set ON_ERROR_STOP 1 +SELECT * FROM attach_data_node('data4', 'conditions'); +NOTICE: the number of partitions in dimension "device" was increased to 4 + hypertable_id | node_hypertable_id | node_name +---------------+--------------------+----------- + 1 | 1 | data4 +(1 row) + +INSERT INTO conditions +SELECT time, (random()*30)::int, random()*80 +FROM generate_series('2019-02-01 00:00:00'::timestamptz, '2019-03-01 00:00:00', '1 min') AS time; +SELECT * FROM test.remote_exec(NULL, format($$ + SELECT has_table_privilege('%s', 'conditions', 'SELECT') AS "SELECT" + , has_table_privilege('%s', 'conditions', 'DELETE') AS "DELETE" + , has_table_privilege('%s', 'conditions', 'INSERT') AS "INSERT" + , has_table_privilege('%s', 'conditions', 'UPDATE') AS "UPDATE" + , has_table_privilege('%s', 'conditions', 'TRUNCATE') AS "TRUNCATE"; +$$, :'ROLE_2', :'ROLE_2', :'ROLE_2', :'ROLE_2', :'ROLE_2')); +NOTICE: [db_dist_grant_1]: + SELECT has_table_privilege('test_role_2', 'conditions', 'SELECT') AS "SELECT" + , has_table_privilege('test_role_2', 'conditions', 'DELETE') AS "DELETE" + , has_table_privilege('test_role_2', 'conditions', 'INSERT') AS "INSERT" + , has_table_privilege('test_role_2', 'conditions', 'UPDATE') AS "UPDATE" + , has_table_privilege('test_role_2', 'conditions', 'TRUNCATE') AS "TRUNCATE" +NOTICE: [db_dist_grant_1]: +SELECT|DELETE|INSERT|UPDATE|TRUNCATE +------+------+------+------+-------- +f |t |t |t |f +(1 row) + + +NOTICE: [db_dist_grant_2]: + SELECT has_table_privilege('test_role_2', 'conditions', 'SELECT') AS "SELECT" + , has_table_privilege('test_role_2', 'conditions', 'DELETE') AS "DELETE" + , has_table_privilege('test_role_2', 'conditions', 'INSERT') AS "INSERT" + , has_table_privilege('test_role_2', 'conditions', 'UPDATE') AS "UPDATE" + , has_table_privilege('test_role_2', 'conditions', 'TRUNCATE') AS "TRUNCATE" +NOTICE: [db_dist_grant_2]: +SELECT|DELETE|INSERT|UPDATE|TRUNCATE +------+------+------+------+-------- +f |t |t |t |f +(1 row) + + +NOTICE: [db_dist_grant_3]: + SELECT has_table_privilege('test_role_2', 'conditions', 'SELECT') AS "SELECT" + , has_table_privilege('test_role_2', 'conditions', 'DELETE') AS "DELETE" + , has_table_privilege('test_role_2', 'conditions', 'INSERT') AS "INSERT" + , has_table_privilege('test_role_2', 'conditions', 'UPDATE') AS "UPDATE" + , has_table_privilege('test_role_2', 'conditions', 'TRUNCATE') AS "TRUNCATE" +NOTICE: [db_dist_grant_3]: +SELECT|DELETE|INSERT|UPDATE|TRUNCATE +------+------+------+------+-------- +f |t |t |t |f +(1 row) + + +NOTICE: [data4]: + SELECT has_table_privilege('test_role_2', 'conditions', 'SELECT') AS "SELECT" + , has_table_privilege('test_role_2', 'conditions', 'DELETE') AS "DELETE" + , has_table_privilege('test_role_2', 'conditions', 'INSERT') AS "INSERT" + , has_table_privilege('test_role_2', 'conditions', 'UPDATE') AS "UPDATE" + , has_table_privilege('test_role_2', 'conditions', 'TRUNCATE') AS "TRUNCATE" +NOTICE: [data4]: +SELECT|DELETE|INSERT|UPDATE|TRUNCATE +------+------+------+------+-------- +f |t |t |t |f +(1 row) + + + remote_exec +------------- + +(1 row) + +-- Check that grants are not propagated when enable_grant_propagation +-- is false. +SET timescaledb.enable_grant_propagation = false; +CREATE TABLE no_grants(time TIMESTAMPTZ NOT NULL, device INTEGER, temperature FLOAT); +GRANT SELECT ON no_grants TO :ROLE_1; +-- First case is when table is created. Grants should not be propagated. +SELECT * FROM create_distributed_hypertable('no_grants', 'time', 'device'); + hypertable_id | schema_name | table_name | created +---------------+-------------+------------+--------- + 2 | public | no_grants | t +(1 row) + +SELECT has_table_privilege(:'ROLE_1', 'no_grants', 'SELECT') AS "SELECT" + , has_table_privilege(:'ROLE_1', 'no_grants', 'DELETE') AS "DELETE" + , has_table_privilege(:'ROLE_1', 'no_grants', 'INSERT') AS "INSERT"; + SELECT | DELETE | INSERT +--------+--------+-------- + t | f | f +(1 row) + +SELECT * FROM test.remote_exec(NULL, format($$ + SELECT has_table_privilege('%s', 'no_grants', 'SELECT') AS "SELECT" + , has_table_privilege('%s', 'no_grants', 'DELETE') AS "DELETE" + , has_table_privilege('%s', 'no_grants', 'INSERT') AS "INSERT"; +$$, :'ROLE_1', :'ROLE_1', :'ROLE_1')); +NOTICE: [db_dist_grant_1]: + SELECT has_table_privilege('test_role_1', 'no_grants', 'SELECT') AS "SELECT" + , has_table_privilege('test_role_1', 'no_grants', 'DELETE') AS "DELETE" + , has_table_privilege('test_role_1', 'no_grants', 'INSERT') AS "INSERT" +NOTICE: [db_dist_grant_1]: +SELECT|DELETE|INSERT +------+------+------ +t |f |f +(1 row) + + +NOTICE: [db_dist_grant_2]: + SELECT has_table_privilege('test_role_1', 'no_grants', 'SELECT') AS "SELECT" + , has_table_privilege('test_role_1', 'no_grants', 'DELETE') AS "DELETE" + , has_table_privilege('test_role_1', 'no_grants', 'INSERT') AS "INSERT" +NOTICE: [db_dist_grant_2]: +SELECT|DELETE|INSERT +------+------+------ +t |f |f +(1 row) + + +NOTICE: [db_dist_grant_3]: + SELECT has_table_privilege('test_role_1', 'no_grants', 'SELECT') AS "SELECT" + , has_table_privilege('test_role_1', 'no_grants', 'DELETE') AS "DELETE" + , has_table_privilege('test_role_1', 'no_grants', 'INSERT') AS "INSERT" +NOTICE: [db_dist_grant_3]: +SELECT|DELETE|INSERT +------+------+------ +t |f |f +(1 row) + + +NOTICE: [data4]: + SELECT has_table_privilege('test_role_1', 'no_grants', 'SELECT') AS "SELECT" + , has_table_privilege('test_role_1', 'no_grants', 'DELETE') AS "DELETE" + , has_table_privilege('test_role_1', 'no_grants', 'INSERT') AS "INSERT" +NOTICE: [data4]: +SELECT|DELETE|INSERT +------+------+------ +t |f |f +(1 row) + + + remote_exec +------------- + +(1 row) + +-- Second case is when grants is done on an existing table. The grant +-- should not be propagated. +GRANT INSERT ON no_grants TO :ROLE_1; +SELECT has_table_privilege(:'ROLE_1', 'no_grants', 'SELECT') AS "SELECT" + , has_table_privilege(:'ROLE_1', 'no_grants', 'DELETE') AS "DELETE" + , has_table_privilege(:'ROLE_1', 'no_grants', 'INSERT') AS "INSERT"; + SELECT | DELETE | INSERT +--------+--------+-------- + t | f | t +(1 row) + +SELECT * FROM test.remote_exec(NULL, format($$ + SELECT has_table_privilege('%s', 'no_grants', 'SELECT') AS "SELECT" + , has_table_privilege('%s', 'no_grants', 'DELETE') AS "DELETE" + , has_table_privilege('%s', 'no_grants', 'INSERT') AS "INSERT"; +$$, :'ROLE_1', :'ROLE_1', :'ROLE_1')); +NOTICE: [db_dist_grant_1]: + SELECT has_table_privilege('test_role_1', 'no_grants', 'SELECT') AS "SELECT" + , has_table_privilege('test_role_1', 'no_grants', 'DELETE') AS "DELETE" + , has_table_privilege('test_role_1', 'no_grants', 'INSERT') AS "INSERT" +NOTICE: [db_dist_grant_1]: +SELECT|DELETE|INSERT +------+------+------ +t |f |t +(1 row) + + +NOTICE: [db_dist_grant_2]: + SELECT has_table_privilege('test_role_1', 'no_grants', 'SELECT') AS "SELECT" + , has_table_privilege('test_role_1', 'no_grants', 'DELETE') AS "DELETE" + , has_table_privilege('test_role_1', 'no_grants', 'INSERT') AS "INSERT" +NOTICE: [db_dist_grant_2]: +SELECT|DELETE|INSERT +------+------+------ +t |f |t +(1 row) + + +NOTICE: [db_dist_grant_3]: + SELECT has_table_privilege('test_role_1', 'no_grants', 'SELECT') AS "SELECT" + , has_table_privilege('test_role_1', 'no_grants', 'DELETE') AS "DELETE" + , has_table_privilege('test_role_1', 'no_grants', 'INSERT') AS "INSERT" +NOTICE: [db_dist_grant_3]: +SELECT|DELETE|INSERT +------+------+------ +t |f |t +(1 row) + + +NOTICE: [data4]: + SELECT has_table_privilege('test_role_1', 'no_grants', 'SELECT') AS "SELECT" + , has_table_privilege('test_role_1', 'no_grants', 'DELETE') AS "DELETE" + , has_table_privilege('test_role_1', 'no_grants', 'INSERT') AS "INSERT" +NOTICE: [data4]: +SELECT|DELETE|INSERT +------+------+------ +t |f |t +(1 row) + + + remote_exec +------------- + +(1 row) + +DROP TABLE conditions; +DROP TABLE no_grants; +-- Check that grants and revokes are copied properly to the chunks and +-- that newly created chunks have the right privileges. +CREATE TABLE conditions( + time TIMESTAMPTZ NOT NULL, + device INTEGER, + temperature FLOAT +); +-- Create a hypertable and show that it does not have any privileges +SELECT * FROM create_hypertable('conditions', 'time', chunk_time_interval => '5 days'::interval); + hypertable_id | schema_name | table_name | created +---------------+-------------+------------+--------- + 3 | public | conditions | t +(1 row) + +INSERT INTO conditions +SELECT time, (random()*30)::int, random()*80 - 40 +FROM generate_series('2018-12-01 00:00'::timestamp, '2018-12-10 00:00'::timestamp, '1h') AS time; +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+-------------------+-------------------+---------- + public | conditions | table | | | +(1 row) + +\z _timescaledb_internal.*chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+-------------------+-------+-------------------+-------------------+---------- + _timescaledb_internal | _hyper_3_35_chunk | table | | | + _timescaledb_internal | _hyper_3_36_chunk | table | | | + _timescaledb_internal | _hyper_3_37_chunk | table | | | +(3 rows) + +-- Add privileges and show that they propagate to the chunks +GRANT SELECT, INSERT ON conditions TO PUBLIC; +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+-----------------------------------------------+-------------------+---------- + public | conditions | table | cluster_super_user=arwdDxt/cluster_super_user+| | + | | | =ar/cluster_super_user | | +(1 row) + +\z _timescaledb_internal.*chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+-------------------+-------+-----------------------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_3_35_chunk | table | cluster_super_user=arwdDxt/cluster_super_user+| | + | | | =ar/cluster_super_user | | + _timescaledb_internal | _hyper_3_36_chunk | table | cluster_super_user=arwdDxt/cluster_super_user+| | + | | | =ar/cluster_super_user | | + _timescaledb_internal | _hyper_3_37_chunk | table | cluster_super_user=arwdDxt/cluster_super_user+| | + | | | =ar/cluster_super_user | | +(3 rows) + +-- Create some more chunks and show that they also get the privileges. +INSERT INTO conditions +SELECT time, (random()*30)::int, random()*80 - 40 +FROM generate_series('2018-12-10 00:00'::timestamp, '2018-12-20 00:00'::timestamp, '1h') AS time; +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+-----------------------------------------------+-------------------+---------- + public | conditions | table | cluster_super_user=arwdDxt/cluster_super_user+| | + | | | =ar/cluster_super_user | | +(1 row) + +\z _timescaledb_internal.*chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+-------------------+-------+-----------------------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_3_35_chunk | table | cluster_super_user=arwdDxt/cluster_super_user+| | + | | | =ar/cluster_super_user | | + _timescaledb_internal | _hyper_3_36_chunk | table | cluster_super_user=arwdDxt/cluster_super_user+| | + | | | =ar/cluster_super_user | | + _timescaledb_internal | _hyper_3_37_chunk | table | cluster_super_user=arwdDxt/cluster_super_user+| | + | | | =ar/cluster_super_user | | + _timescaledb_internal | _hyper_3_38_chunk | table | cluster_super_user=arwdDxt/cluster_super_user+| | + | | | =ar/cluster_super_user | | + _timescaledb_internal | _hyper_3_39_chunk | table | cluster_super_user=arwdDxt/cluster_super_user+| | + | | | =ar/cluster_super_user | | +(5 rows) + +-- Revoke one of the privileges and show that it propagate to the +-- chunks. +REVOKE INSERT ON conditions FROM PUBLIC; +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+-----------------------------------------------+-------------------+---------- + public | conditions | table | cluster_super_user=arwdDxt/cluster_super_user+| | + | | | =r/cluster_super_user | | +(1 row) + +\z _timescaledb_internal.*chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+-------------------+-------+-----------------------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_3_35_chunk | table | cluster_super_user=arwdDxt/cluster_super_user+| | + | | | =r/cluster_super_user | | + _timescaledb_internal | _hyper_3_36_chunk | table | cluster_super_user=arwdDxt/cluster_super_user+| | + | | | =r/cluster_super_user | | + _timescaledb_internal | _hyper_3_37_chunk | table | cluster_super_user=arwdDxt/cluster_super_user+| | + | | | =r/cluster_super_user | | + _timescaledb_internal | _hyper_3_38_chunk | table | cluster_super_user=arwdDxt/cluster_super_user+| | + | | | =r/cluster_super_user | | + _timescaledb_internal | _hyper_3_39_chunk | table | cluster_super_user=arwdDxt/cluster_super_user+| | + | | | =r/cluster_super_user | | +(5 rows) + +-- Add some more chunks and show that it inherits the grants from the +-- hypertable. +INSERT INTO conditions +SELECT time, (random()*30)::int, random()*80 - 40 +FROM generate_series('2018-12-20 00:00'::timestamp, '2018-12-30 00:00'::timestamp, '1h') AS time; +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+-----------------------------------------------+-------------------+---------- + public | conditions | table | cluster_super_user=arwdDxt/cluster_super_user+| | + | | | =r/cluster_super_user | | +(1 row) + +\z _timescaledb_internal.*chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+-------------------+-------+-----------------------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_3_35_chunk | table | cluster_super_user=arwdDxt/cluster_super_user+| | + | | | =r/cluster_super_user | | + _timescaledb_internal | _hyper_3_36_chunk | table | cluster_super_user=arwdDxt/cluster_super_user+| | + | | | =r/cluster_super_user | | + _timescaledb_internal | _hyper_3_37_chunk | table | cluster_super_user=arwdDxt/cluster_super_user+| | + | | | =r/cluster_super_user | | + _timescaledb_internal | _hyper_3_38_chunk | table | cluster_super_user=arwdDxt/cluster_super_user+| | + | | | =r/cluster_super_user | | + _timescaledb_internal | _hyper_3_39_chunk | table | cluster_super_user=arwdDxt/cluster_super_user+| | + | | | =r/cluster_super_user | | + _timescaledb_internal | _hyper_3_40_chunk | table | cluster_super_user=arwdDxt/cluster_super_user+| | + | | | =r/cluster_super_user | | + _timescaledb_internal | _hyper_3_41_chunk | table | cluster_super_user=arwdDxt/cluster_super_user+| | + | | | =r/cluster_super_user | | +(7 rows) + +-- Change grants of one chunk explicitly and check that it is possible +\z _timescaledb_internal._hyper_3_35_chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+-------------------+-------+-----------------------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_3_35_chunk | table | cluster_super_user=arwdDxt/cluster_super_user+| | + | | | =r/cluster_super_user | | +(1 row) + +GRANT UPDATE ON _timescaledb_internal._hyper_3_35_chunk TO PUBLIC; +\z _timescaledb_internal._hyper_3_35_chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+-------------------+-------+-----------------------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_3_35_chunk | table | cluster_super_user=arwdDxt/cluster_super_user+| | + | | | =rw/cluster_super_user | | +(1 row) + +REVOKE SELECT ON _timescaledb_internal._hyper_3_35_chunk FROM PUBLIC; +\z _timescaledb_internal._hyper_3_35_chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+-------------------+-------+-----------------------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_3_35_chunk | table | cluster_super_user=arwdDxt/cluster_super_user+| | + | | | =w/cluster_super_user | | +(1 row) + +DROP TABLE conditions; +-- Test that we can create a writer role, assign users to that role, +-- and allow the users to insert data and create new chunks. +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; +CREATE TABLE conditions( + time timestamptz, + device int CHECK (device > 0), + temp float, + PRIMARY KEY (time,device) +); +SELECT * FROM create_distributed_hypertable('conditions', 'time', 'device', 3); +WARNING: insufficient number of partitions for dimension "device" + hypertable_id | schema_name | table_name | created +---------------+-------------+------------+--------- + 4 | public | conditions | t +(1 row) + +-- Test that we can create a writer role, assign users to that role, +-- and allow the users to insert data and create new chunks. +SET ROLE :ROLE_DEFAULT_PERM_USER_2; +\set ON_ERROR_STOP 0 +INSERT INTO conditions +SELECT time, 1 + (random()*30)::int, random()*80 +FROM generate_series('2019-01-01 00:00:00'::timestamptz, '2019-02-01 00:00:00', '1 min') AS time; +ERROR: permission denied for table conditions +\set ON_ERROR_STOP 1 +RESET ROLE; +GRANT INSERT ON conditions TO :ROLE_DEFAULT_PERM_USER_2; +SET ROLE :ROLE_DEFAULT_PERM_USER_2; +INSERT INTO conditions +SELECT time, 1 + (random()*30)::int, random()*80 +FROM generate_series('2019-01-01 00:00:00'::timestamptz, '2019-02-01 00:00:00', '1 min') AS time; +RESET ROLE; +SELECT current_user; + current_user +-------------------- + cluster_super_user +(1 row) + +-- Check that GRANT ALL IN SCHEMA adds privileges to the parent +-- and also does so on the foreign chunks in another schema +CREATE VIEW CHUNK_QRY1 AS SELECT n.nspname as schema, substring(c.relname for 12) as name, pg_catalog.array_to_string(c.relacl, E'\n') AS Access_privileges FROM pg_catalog.pg_class c LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace WHERE c.relkind IN ('r','v','m','S','f','p') AND c.relname OPERATOR(pg_catalog.~) '^(_dist.*)$' COLLATE pg_catalog.default ORDER BY 1, 2; +CALL distributed_exec($$ CREATE VIEW CHUNK_QRY1 AS SELECT n.nspname as schema, substring(c.relname for 12) as name, pg_catalog.array_to_string(c.relacl, E'\n') AS Access_privileges FROM pg_catalog.pg_class c LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace WHERE c.relkind IN ('r','v','m','S','f','p') AND c.relname OPERATOR(pg_catalog.~) '^(_dist.*)$' COLLATE pg_catalog.default ORDER BY 1, 2; $$); +GRANT ALL ON ALL TABLES IN SCHEMA public TO :ROLE_DEFAULT_PERM_USER; +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+-----------------------------------------------+-------------------+---------- + public | conditions | table | cluster_super_user=arwdDxt/cluster_super_user+| | + | | | default_perm_user_2=a/cluster_super_user +| | + | | | default_perm_user=arwdDxt/cluster_super_user | | +(1 row) + +SELECT * FROM CHUNK_QRY1; + schema | name | access_privileges +-----------------------+--------------+----------------------------------------------- + _timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user_2=a/cluster_super_user + + | | default_perm_user=arwdDxt/cluster_super_user + _timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user_2=a/cluster_super_user + + | | default_perm_user=arwdDxt/cluster_super_user + _timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user_2=a/cluster_super_user + + | | default_perm_user=arwdDxt/cluster_super_user + _timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user_2=a/cluster_super_user + + | | default_perm_user=arwdDxt/cluster_super_user + _timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user_2=a/cluster_super_user + + | | default_perm_user=arwdDxt/cluster_super_user + _timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user_2=a/cluster_super_user + + | | default_perm_user=arwdDxt/cluster_super_user + _timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user_2=a/cluster_super_user + + | | default_perm_user=arwdDxt/cluster_super_user + _timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user_2=a/cluster_super_user + + | | default_perm_user=arwdDxt/cluster_super_user + _timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user_2=a/cluster_super_user + + | | default_perm_user=arwdDxt/cluster_super_user + _timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user_2=a/cluster_super_user + + | | default_perm_user=arwdDxt/cluster_super_user + _timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user_2=a/cluster_super_user + + | | default_perm_user=arwdDxt/cluster_super_user + _timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user_2=a/cluster_super_user + + | | default_perm_user=arwdDxt/cluster_super_user + _timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user_2=a/cluster_super_user + + | | default_perm_user=arwdDxt/cluster_super_user + _timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user_2=a/cluster_super_user + + | | default_perm_user=arwdDxt/cluster_super_user + _timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user_2=a/cluster_super_user + + | | default_perm_user=arwdDxt/cluster_super_user + _timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user_2=a/cluster_super_user + + | | default_perm_user=arwdDxt/cluster_super_user + _timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user_2=a/cluster_super_user + + | | default_perm_user=arwdDxt/cluster_super_user + _timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user_2=a/cluster_super_user + + | | default_perm_user=arwdDxt/cluster_super_user +(18 rows) + +-- Check on one datanode, should be the same on others as well +\c :DATA_NODE_1 :ROLE_CLUSTER_SUPERUSER; +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+-----------------------------------------------+-------------------+---------- + public | conditions | table | cluster_super_user=arwdDxt/cluster_super_user+| | + | | | default_perm_user_2=a/cluster_super_user +| | + | | | default_perm_user=arwdDxt/cluster_super_user | | +(1 row) + +SELECT * FROM CHUNK_QRY1; + schema | name | access_privileges +-----------------------+--------------+----------------------------------------------- + _timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user_2=a/cluster_super_user + + | | default_perm_user=arwdDxt/cluster_super_user + _timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user_2=a/cluster_super_user + + | | default_perm_user=arwdDxt/cluster_super_user + _timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user_2=a/cluster_super_user + + | | default_perm_user=arwdDxt/cluster_super_user + _timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user_2=a/cluster_super_user + + | | default_perm_user=arwdDxt/cluster_super_user + _timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user_2=a/cluster_super_user + + | | default_perm_user=arwdDxt/cluster_super_user + _timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user_2=a/cluster_super_user + + | | default_perm_user=arwdDxt/cluster_super_user +(6 rows) + +-- Check that REVOKE ALL IN SCHEMA removes privileges of the parent +-- and also does so on foreign chunks in another schema +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; +REVOKE ALL ON ALL TABLES IN SCHEMA public FROM :ROLE_DEFAULT_PERM_USER; +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+-----------------------------------------------+-------------------+---------- + public | conditions | table | cluster_super_user=arwdDxt/cluster_super_user+| | + | | | default_perm_user_2=a/cluster_super_user | | +(1 row) + +SELECT * FROM CHUNK_QRY1; + schema | name | access_privileges +-----------------------+--------------+----------------------------------------------- + _timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user_2=a/cluster_super_user + _timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user_2=a/cluster_super_user + _timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user_2=a/cluster_super_user + _timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user_2=a/cluster_super_user + _timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user_2=a/cluster_super_user + _timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user_2=a/cluster_super_user + _timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user_2=a/cluster_super_user + _timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user_2=a/cluster_super_user + _timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user_2=a/cluster_super_user + _timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user_2=a/cluster_super_user + _timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user_2=a/cluster_super_user + _timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user_2=a/cluster_super_user + _timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user_2=a/cluster_super_user + _timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user_2=a/cluster_super_user + _timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user_2=a/cluster_super_user + _timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user_2=a/cluster_super_user + _timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user_2=a/cluster_super_user + _timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user_2=a/cluster_super_user +(18 rows) + +-- Check on one datanode, should be the same on others as well +\c :DATA_NODE_2 :ROLE_CLUSTER_SUPERUSER; +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+-----------------------------------------------+-------------------+---------- + public | conditions | table | cluster_super_user=arwdDxt/cluster_super_user+| | + | | | default_perm_user_2=a/cluster_super_user | | +(1 row) + +SELECT * FROM CHUNK_QRY1; + schema | name | access_privileges +-----------------------+--------------+----------------------------------------------- + _timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user_2=a/cluster_super_user + _timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user_2=a/cluster_super_user + _timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user_2=a/cluster_super_user + _timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user_2=a/cluster_super_user + _timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user_2=a/cluster_super_user + _timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user_2=a/cluster_super_user +(6 rows) + +-- Create chunks in the same schema as the hypertable and check that +-- they also get the same privileges as the hypertable +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; +CREATE TABLE measurements( + time TIMESTAMPTZ NOT NULL, + device INTEGER, + temperature FLOAT +); +-- Create a distributed hypertable with chunks in the same schema +SELECT * FROM create_distributed_hypertable('public.measurements', 'time', chunk_time_interval => '5 days'::interval, associated_schema_name => 'public'); + hypertable_id | schema_name | table_name | created +---------------+-------------+--------------+--------- + 5 | public | measurements | t +(1 row) + +INSERT INTO measurements +SELECT time, (random()*30)::int, random()*80 - 40 +FROM generate_series('2018-12-01 00:00'::timestamp, '2018-12-10 00:00'::timestamp, '1h') AS time; +-- Create a local regular table +CREATE TABLE local(g int primary key, h int); +-- Create a local hypertable +CREATE TABLE conditions_lht(time TIMESTAMPTZ NOT NULL, device INTEGER, temperature FLOAT, humidity FLOAT); +SELECT * FROM create_hypertable('conditions_lht', 'time', chunk_time_interval => '5 days'::interval); + hypertable_id | schema_name | table_name | created +---------------+-------------+----------------+--------- + 6 | public | conditions_lht | t +(1 row) + +INSERT INTO conditions_lht +SELECT time, (random()*30)::int, random()*80 - 40 +FROM generate_series('2018-12-01 00:00'::timestamp, '2018-12-10 00:00'::timestamp, '1h') AS time; +-- GRANT ALL and check privileges of these mix of local table, local hypertable and distributed hypertable +GRANT ALL ON ALL TABLES IN SCHEMA public TO :ROLE_DEFAULT_PERM_USER; +\z measurements + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+--------------+-------+-----------------------------------------------+-------------------+---------- + public | measurements | table | cluster_super_user=arwdDxt/cluster_super_user+| | + | | | default_perm_user=arwdDxt/cluster_super_user | | +(1 row) + +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+-----------------------------------------------+-------------------+---------- + public | conditions | table | cluster_super_user=arwdDxt/cluster_super_user+| | + | | | default_perm_user_2=a/cluster_super_user +| | + | | | default_perm_user=arwdDxt/cluster_super_user | | +(1 row) + +SELECT * FROM CHUNK_QRY1 WHERE schema = 'public'; + schema | name | access_privileges +--------+--------------+----------------------------------------------- + public | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user=arwdDxt/cluster_super_user + public | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user=arwdDxt/cluster_super_user + public | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user=arwdDxt/cluster_super_user +(3 rows) + +-- Check on one datanode, should be the same on others as well +\c :DATA_NODE_3 :ROLE_CLUSTER_SUPERUSER; +\z measurements + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+--------------+-------+-----------------------------------------------+-------------------+---------- + public | measurements | table | cluster_super_user=arwdDxt/cluster_super_user+| | + | | | default_perm_user=arwdDxt/cluster_super_user | | +(1 row) + +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+-----------------------------------------------+-------------------+---------- + public | conditions | table | cluster_super_user=arwdDxt/cluster_super_user+| | + | | | default_perm_user_2=a/cluster_super_user +| | + | | | default_perm_user=arwdDxt/cluster_super_user | | +(1 row) + +SELECT * FROM CHUNK_QRY1 WHERE schema = 'public'; + schema | name | access_privileges +--------+--------------+----------------------------------------------- + public | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+ + | | default_perm_user=arwdDxt/cluster_super_user +(1 row) + +-- REVOKE ALL and check privileges +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; +REVOKE ALL ON ALL TABLES IN SCHEMA public FROM :ROLE_DEFAULT_PERM_USER; +\z measurements + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+--------------+-------+-----------------------------------------------+-------------------+---------- + public | measurements | table | cluster_super_user=arwdDxt/cluster_super_user | | +(1 row) + +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+-----------------------------------------------+-------------------+---------- + public | conditions | table | cluster_super_user=arwdDxt/cluster_super_user+| | + | | | default_perm_user_2=a/cluster_super_user | | +(1 row) + +SELECT * FROM CHUNK_QRY1 WHERE schema = 'public'; + schema | name | access_privileges +--------+--------------+----------------------------------------------- + public | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user + public | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user + public | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user +(3 rows) + +-- Check on one datanode, should be the same on others as well +\c :DATA_NODE_4 :ROLE_CLUSTER_SUPERUSER; +\z measurements + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+--------------+-------+-----------------------------------------------+-------------------+---------- + public | measurements | table | cluster_super_user=arwdDxt/cluster_super_user | | +(1 row) + +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+-----------------------------------------------+-------------------+---------- + public | conditions | table | cluster_super_user=arwdDxt/cluster_super_user+| | + | | | default_perm_user_2=a/cluster_super_user | | +(1 row) + +SELECT * FROM CHUNK_QRY1 WHERE schema = 'public'; + schema | name | access_privileges +--------+------+------------------- +(0 rows) + +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; +-- Test GRANT/REVOKE on DATABASE +SET client_min_messages TO DEBUG1; +-- Test GRANT/REVOKE command being deparsed with the database name and +-- being propagated to the data nodes +GRANT ALL ON DATABASE :TEST_DBNAME TO :ROLE_CLUSTER_SUPERUSER; +LOG: statement: GRANT ALL ON DATABASE db_dist_grant TO cluster_super_user; +DEBUG: [db_dist_grant_1]: GRANT ALL ON DATABASE db_dist_grant_1 TO cluster_super_user +DEBUG: [db_dist_grant_2]: GRANT ALL ON DATABASE db_dist_grant_2 TO cluster_super_user +DEBUG: [db_dist_grant_3]: GRANT ALL ON DATABASE db_dist_grant_3 TO cluster_super_user +DEBUG: [data4]: GRANT ALL ON DATABASE db_dist_grant_4 TO cluster_super_user +GRANT TEMP ON DATABASE :TEST_DBNAME TO :ROLE_CLUSTER_SUPERUSER; +LOG: statement: GRANT TEMP ON DATABASE db_dist_grant TO cluster_super_user; +DEBUG: [db_dist_grant_1]: GRANT temp ON DATABASE db_dist_grant_1 TO cluster_super_user +DEBUG: [db_dist_grant_2]: GRANT temp ON DATABASE db_dist_grant_2 TO cluster_super_user +DEBUG: [db_dist_grant_3]: GRANT temp ON DATABASE db_dist_grant_3 TO cluster_super_user +DEBUG: [data4]: GRANT temp ON DATABASE db_dist_grant_4 TO cluster_super_user +GRANT TEMP, TEMPORARY ON DATABASE :TEST_DBNAME TO :ROLE_CLUSTER_SUPERUSER; +LOG: statement: GRANT TEMP, TEMPORARY ON DATABASE db_dist_grant TO cluster_super_user; +DEBUG: [db_dist_grant_1]: GRANT temp, temporary ON DATABASE db_dist_grant_1 TO cluster_super_user +DEBUG: [db_dist_grant_2]: GRANT temp, temporary ON DATABASE db_dist_grant_2 TO cluster_super_user +DEBUG: [db_dist_grant_3]: GRANT temp, temporary ON DATABASE db_dist_grant_3 TO cluster_super_user +DEBUG: [data4]: GRANT temp, temporary ON DATABASE db_dist_grant_4 TO cluster_super_user +GRANT TEMP, TEMPORARY ON DATABASE :TEST_DBNAME TO :ROLE_CLUSTER_SUPERUSER, :ROLE_DEFAULT_PERM_USER; +LOG: statement: GRANT TEMP, TEMPORARY ON DATABASE db_dist_grant TO cluster_super_user, default_perm_user; +DEBUG: [db_dist_grant_1]: GRANT temp, temporary ON DATABASE db_dist_grant_1 TO cluster_super_user, default_perm_user +DEBUG: [db_dist_grant_2]: GRANT temp, temporary ON DATABASE db_dist_grant_2 TO cluster_super_user, default_perm_user +DEBUG: [db_dist_grant_3]: GRANT temp, temporary ON DATABASE db_dist_grant_3 TO cluster_super_user, default_perm_user +DEBUG: [data4]: GRANT temp, temporary ON DATABASE db_dist_grant_4 TO cluster_super_user, default_perm_user +GRANT TEMP ON DATABASE :TEST_DBNAME TO :ROLE_CLUSTER_SUPERUSER WITH GRANT OPTION; +LOG: statement: GRANT TEMP ON DATABASE db_dist_grant TO cluster_super_user WITH GRANT OPTION; +DEBUG: [db_dist_grant_1]: GRANT temp ON DATABASE db_dist_grant_1 TO cluster_super_user WITH GRANT OPTION +DEBUG: [db_dist_grant_2]: GRANT temp ON DATABASE db_dist_grant_2 TO cluster_super_user WITH GRANT OPTION +DEBUG: [db_dist_grant_3]: GRANT temp ON DATABASE db_dist_grant_3 TO cluster_super_user WITH GRANT OPTION +DEBUG: [data4]: GRANT temp ON DATABASE db_dist_grant_4 TO cluster_super_user WITH GRANT OPTION +REVOKE TEMP ON DATABASE :TEST_DBNAME FROM :ROLE_CLUSTER_SUPERUSER; +LOG: statement: REVOKE TEMP ON DATABASE db_dist_grant FROM cluster_super_user; +DEBUG: [db_dist_grant_1]: REVOKE temp ON DATABASE db_dist_grant_1 FROM cluster_super_user +DEBUG: [db_dist_grant_2]: REVOKE temp ON DATABASE db_dist_grant_2 FROM cluster_super_user +DEBUG: [db_dist_grant_3]: REVOKE temp ON DATABASE db_dist_grant_3 FROM cluster_super_user +DEBUG: [data4]: REVOKE temp ON DATABASE db_dist_grant_4 FROM cluster_super_user +REVOKE ALL ON DATABASE :TEST_DBNAME FROM :ROLE_CLUSTER_SUPERUSER; +LOG: statement: REVOKE ALL ON DATABASE db_dist_grant FROM cluster_super_user; +DEBUG: [db_dist_grant_1]: REVOKE ALL ON DATABASE db_dist_grant_1 FROM cluster_super_user +DEBUG: [db_dist_grant_2]: REVOKE ALL ON DATABASE db_dist_grant_2 FROM cluster_super_user +DEBUG: [db_dist_grant_3]: REVOKE ALL ON DATABASE db_dist_grant_3 FROM cluster_super_user +DEBUG: [data4]: REVOKE ALL ON DATABASE db_dist_grant_4 FROM cluster_super_user +REVOKE ALL ON DATABASE :TEST_DBNAME FROM :ROLE_CLUSTER_SUPERUSER CASCADE; +LOG: statement: REVOKE ALL ON DATABASE db_dist_grant FROM cluster_super_user CASCADE; +DEBUG: [db_dist_grant_1]: REVOKE ALL ON DATABASE db_dist_grant_1 FROM cluster_super_user CASCADE +DEBUG: [db_dist_grant_2]: REVOKE ALL ON DATABASE db_dist_grant_2 FROM cluster_super_user CASCADE +DEBUG: [db_dist_grant_3]: REVOKE ALL ON DATABASE db_dist_grant_3 FROM cluster_super_user CASCADE +DEBUG: [data4]: REVOKE ALL ON DATABASE db_dist_grant_4 FROM cluster_super_user CASCADE +REVOKE ALL ON DATABASE :TEST_DBNAME FROM :ROLE_CLUSTER_SUPERUSER RESTRICT; +LOG: statement: REVOKE ALL ON DATABASE db_dist_grant FROM cluster_super_user RESTRICT; +DEBUG: [db_dist_grant_1]: REVOKE ALL ON DATABASE db_dist_grant_1 FROM cluster_super_user +DEBUG: [db_dist_grant_2]: REVOKE ALL ON DATABASE db_dist_grant_2 FROM cluster_super_user +DEBUG: [db_dist_grant_3]: REVOKE ALL ON DATABASE db_dist_grant_3 FROM cluster_super_user +DEBUG: [data4]: REVOKE ALL ON DATABASE db_dist_grant_4 FROM cluster_super_user +-- Grant to specific role types +GRANT TEMP, TEMPORARY ON DATABASE :TEST_DBNAME TO PUBLIC; +LOG: statement: GRANT TEMP, TEMPORARY ON DATABASE db_dist_grant TO PUBLIC; +DEBUG: [db_dist_grant_1]: GRANT temp, temporary ON DATABASE db_dist_grant_1 TO PUBLIC +DEBUG: [db_dist_grant_2]: GRANT temp, temporary ON DATABASE db_dist_grant_2 TO PUBLIC +DEBUG: [db_dist_grant_3]: GRANT temp, temporary ON DATABASE db_dist_grant_3 TO PUBLIC +DEBUG: [data4]: GRANT temp, temporary ON DATABASE db_dist_grant_4 TO PUBLIC +GRANT TEMP, TEMPORARY ON DATABASE :TEST_DBNAME TO CURRENT_USER; +LOG: statement: GRANT TEMP, TEMPORARY ON DATABASE db_dist_grant TO CURRENT_USER; +DEBUG: [db_dist_grant_1]: GRANT temp, temporary ON DATABASE db_dist_grant_1 TO CURRENT_USER +DEBUG: [db_dist_grant_2]: GRANT temp, temporary ON DATABASE db_dist_grant_2 TO CURRENT_USER +DEBUG: [db_dist_grant_3]: GRANT temp, temporary ON DATABASE db_dist_grant_3 TO CURRENT_USER +DEBUG: [data4]: GRANT temp, temporary ON DATABASE db_dist_grant_4 TO CURRENT_USER +GRANT TEMP, TEMPORARY ON DATABASE :TEST_DBNAME TO SESSION_USER, :ROLE_CLUSTER_SUPERUSER; +LOG: statement: GRANT TEMP, TEMPORARY ON DATABASE db_dist_grant TO SESSION_USER, cluster_super_user; +DEBUG: [db_dist_grant_1]: GRANT temp, temporary ON DATABASE db_dist_grant_1 TO SESSION_USER, cluster_super_user +DEBUG: [db_dist_grant_2]: GRANT temp, temporary ON DATABASE db_dist_grant_2 TO SESSION_USER, cluster_super_user +DEBUG: [db_dist_grant_3]: GRANT temp, temporary ON DATABASE db_dist_grant_3 TO SESSION_USER, cluster_super_user +DEBUG: [data4]: GRANT temp, temporary ON DATABASE db_dist_grant_4 TO SESSION_USER, cluster_super_user +-- PG14 added support for CURRENT_ROLE +\set ON_ERROR_STOP 0 +GRANT TEMP, TEMPORARY ON DATABASE :TEST_DBNAME TO CURRENT_ROLE; +LOG: statement: GRANT TEMP, TEMPORARY ON DATABASE db_dist_grant TO CURRENT_ROLE; +DEBUG: [db_dist_grant_1]: GRANT temp, temporary ON DATABASE db_dist_grant_1 TO CURRENT_ROLE +DEBUG: [db_dist_grant_2]: GRANT temp, temporary ON DATABASE db_dist_grant_2 TO CURRENT_ROLE +DEBUG: [db_dist_grant_3]: GRANT temp, temporary ON DATABASE db_dist_grant_3 TO CURRENT_ROLE +DEBUG: [data4]: GRANT temp, temporary ON DATABASE db_dist_grant_4 TO CURRENT_ROLE +\set ON_ERROR_STOP 1 +-- Grant on other database should not be propagated +GRANT CREATE ON DATABASE :DATA_NODE_1 TO :ROLE_CLUSTER_SUPERUSER; +LOG: statement: GRANT CREATE ON DATABASE db_dist_grant_1 TO cluster_super_user; +-- Prevent mixing databases +\set ON_ERROR_STOP 0 +GRANT CREATE ON DATABASE :TEST_DBNAME, :DATA_NODE_1 TO :ROLE_CLUSTER_SUPERUSER; +LOG: statement: GRANT CREATE ON DATABASE db_dist_grant, db_dist_grant_1 TO cluster_super_user; +ERROR: cannot change privileges on multiple databases +\set ON_ERROR_STOP 1 +-- Test disabling DDL commands on global objects +SET timescaledb_experimental.enable_distributed_ddl TO 'off'; +LOG: statement: SET timescaledb_experimental.enable_distributed_ddl TO 'off'; +-- ALTER DEFAULT PRIVELEGES +ALTER DEFAULT PRIVILEGES GRANT INSERT ON TABLES TO :ROLE_1; +LOG: statement: ALTER DEFAULT PRIVILEGES GRANT INSERT ON TABLES TO test_role_1; +DEBUG: skipping dist DDL on object: ALTER DEFAULT PRIVILEGES GRANT INSERT ON TABLES TO test_role_1; +-- GRANT/REVOKE +REVOKE ALL ON DATABASE :TEST_DBNAME FROM :ROLE_CLUSTER_SUPERUSER; +LOG: statement: REVOKE ALL ON DATABASE db_dist_grant FROM cluster_super_user; +DEBUG: skipping dist DDL on object: REVOKE ALL ON DATABASE db_dist_grant FROM cluster_super_user; +GRANT ALL ON DATABASE :TEST_DBNAME TO :ROLE_CLUSTER_SUPERUSER; +LOG: statement: GRANT ALL ON DATABASE db_dist_grant TO cluster_super_user; +DEBUG: skipping dist DDL on object: GRANT ALL ON DATABASE db_dist_grant TO cluster_super_user; +REVOKE ALL ON SCHEMA public FROM :ROLE_DEFAULT_PERM_USER; +LOG: statement: REVOKE ALL ON SCHEMA public FROM default_perm_user; +DEBUG: skipping dist DDL on object: REVOKE ALL ON SCHEMA public FROM default_perm_user; +GRANT ALL ON SCHEMA public TO :ROLE_DEFAULT_PERM_USER; +LOG: statement: GRANT ALL ON SCHEMA public TO default_perm_user; +DEBUG: skipping dist DDL on object: GRANT ALL ON SCHEMA public TO default_perm_user; +REVOKE ALL ON ALL TABLES IN SCHEMA public FROM :ROLE_DEFAULT_PERM_USER; +LOG: statement: REVOKE ALL ON ALL TABLES IN SCHEMA public FROM default_perm_user; +DEBUG: skipping dist DDL on object: REVOKE ALL ON ALL TABLES IN SCHEMA public FROM default_perm_user; +GRANT ALL ON ALL TABLES IN SCHEMA public TO :ROLE_DEFAULT_PERM_USER; +LOG: statement: GRANT ALL ON ALL TABLES IN SCHEMA public TO default_perm_user; +DEBUG: skipping dist DDL on object: GRANT ALL ON ALL TABLES IN SCHEMA public TO default_perm_user; +SET timescaledb_experimental.enable_distributed_ddl TO 'on'; +LOG: statement: SET timescaledb_experimental.enable_distributed_ddl TO 'on'; +RESET client_min_messages; +LOG: statement: RESET client_min_messages; +-- Test GRANT on foreign server and data node authentication using a +-- user mapping +SET ROLE :ROLE_3; +SELECT current_user; + current_user +-------------- + test_role_3 +(1 row) + +CREATE TABLE disttable_role_3(time timestamptz, device int, temp float); +\set ON_ERROR_STOP 0 +-- Can't create distributed hypertable without GRANTs on foreign servers (data nodes) +SELECT * FROM create_distributed_hypertable('disttable_role_3', 'time', data_nodes => ARRAY[:'DATA_NODE_1',:'DATA_NODE_2']); +ERROR: permission denied for foreign server db_dist_grant_1 +\set ON_ERROR_STOP 1 +-- Grant USAGE on DATA_NODE_1 (but it is not enough) +RESET ROLE; +GRANT USAGE ON FOREIGN SERVER :DATA_NODE_1 TO :ROLE_3; +GRANT CREATE ON SCHEMA public TO :ROLE_3; +SET ROLE :ROLE_3; +\set ON_ERROR_STOP 0 +SELECT * FROM create_distributed_hypertable('disttable_role_3', 'time', data_nodes => ARRAY[:'DATA_NODE_1',:'DATA_NODE_2']); +ERROR: permission denied for foreign server db_dist_grant_2 +\set ON_ERROR_STOP 1 +-- Creating the hypertable should work with GRANTs on both servers. +RESET ROLE; +GRANT USAGE ON FOREIGN SERVER :DATA_NODE_2 TO :ROLE_3; +GRANT CREATE ON SCHEMA public TO :ROLE_3; +SET ROLE :ROLE_3; +\set ON_ERROR_STOP 0 +-- Still cannot connect since there is no password in the passfile and +-- no user mapping. +SELECT * FROM create_distributed_hypertable('disttable_role_3', 'time', data_nodes => ARRAY[:'DATA_NODE_1',:'DATA_NODE_2']); +NOTICE: adding not-null constraint to column "time" +ERROR: could not connect to "db_dist_grant_1" +\set ON_ERROR_STOP 1 +RESET ROLE; +CREATE USER MAPPING FOR :ROLE_3 SERVER :DATA_NODE_1 OPTIONS (user :'ROLE_3', password :'ROLE_3_PASS'); +SET ROLE :ROLE_3; +\set ON_ERROR_STOP 0 +-- Still cannot connect since there is only a user mapping for data +-- node DATA_NODE_1. +SELECT * FROM create_distributed_hypertable('disttable_role_3', 'time', data_nodes => ARRAY[:'DATA_NODE_1',:'DATA_NODE_2']); +NOTICE: adding not-null constraint to column "time" +ERROR: could not connect to "db_dist_grant_2" +\set ON_ERROR_STOP 1 +RESET ROLE; +-- Create user mapping for ROLE_3, but don't specify user in +-- options. The "current user" will instead be used when connecting. +CREATE USER MAPPING FOR :ROLE_3 SERVER :DATA_NODE_2 OPTIONS (password :'ROLE_3_PASS'); +SET ROLE :ROLE_3; +-- User should be able to connect and create the distributed +-- hypertable at this point. +SELECT * FROM create_distributed_hypertable('disttable_role_3', 'time', data_nodes => ARRAY[:'DATA_NODE_1',:'DATA_NODE_2']); +NOTICE: adding not-null constraint to column "time" + hypertable_id | schema_name | table_name | created +---------------+-------------+------------------+--------- + 9 | public | disttable_role_3 | t +(1 row) + +-- Test insert and query +INSERT INTO disttable_role_3 VALUES ('2019-01-01 00:00:00', 1, 23.4); +SELECT * FROM disttable_role_3; + time | device | temp +------------------------------+--------+------ + Tue Jan 01 00:00:00 2019 PST | 1 | 23.4 +(1 row) + +DROP USER MAPPING FOR :ROLE_3 SERVER :DATA_NODE_1; +DROP USER MAPPING FOR :ROLE_3 SERVER :DATA_NODE_2; +-- Test altering default privileges +RESET ROLE; +-- Should be superuser +SELECT current_user; + current_user +-------------------- + cluster_super_user +(1 row) + +CALL distributed_exec($$ CREATE TABLE nodefprivs (time timestamptz, value int) $$); +SET ROLE :ROLE_1; +\set ON_ERROR_STOP 0 +-- Should fail due to lack of privileges (only insert on one data node +-- to make error reporting deterministic) +CALL distributed_exec($$ INSERT INTO nodefprivs VALUES ('2019-01-01 00:00:00', 1) $$, ARRAY[:'DATA_NODE_1']); +ERROR: [db_dist_grant_1]: permission denied for table nodefprivs +\set ON_ERROR_STOP 1 +-- Reset to super user +RESET ROLE; +-- Now alter default privileges and create table +ALTER DEFAULT PRIVILEGES GRANT INSERT ON TABLES TO :ROLE_1; +SELECT FROM test.remote_exec(ARRAY[:'DATA_NODE_1'], $$ + SELECT defaclrole::regrole, defaclobjtype, defaclacl FROM pg_default_acl +$$); +NOTICE: [db_dist_grant_1]: + SELECT defaclrole::regrole, defaclobjtype, defaclacl FROM pg_default_acl + +NOTICE: [db_dist_grant_1]: +defaclrole |defaclobjtype|defaclacl +------------------+-------------+-------------------------------------------------------------------------------- +cluster_super_user|r |{cluster_super_user=arwdDxt/cluster_super_user,test_role_1=a/cluster_super_user} +(1 row) + + +-- +(1 row) + +CALL distributed_exec($$ CREATE TABLE defprivs (time timestamptz, value int) $$); +-- Switch to the role that was granted default privileges +SET ROLE :ROLE_1; +-- Should succeed since user will have insert privileges by default +CALL distributed_exec($$ INSERT INTO defprivs VALUES ('2019-01-01 00:00:00', 1) $$); +RESET ROLE; +ALTER DEFAULT PRIVILEGES REVOKE INSERT ON TABLES FROM :ROLE_1; +-- No default privileges remain +SELECT FROM test.remote_exec(ARRAY[:'DATA_NODE_1'], $$ + SELECT defaclrole::regrole, defaclobjtype, defaclacl FROM pg_default_acl +$$); +NOTICE: [db_dist_grant_1]: + SELECT defaclrole::regrole, defaclobjtype, defaclacl FROM pg_default_acl + +NOTICE: [db_dist_grant_1]: +defaclrole|defaclobjtype|defaclacl +----------+-------------+--------- +(0 rows) + + +-- +(1 row) + +CALL distributed_exec($$ DROP TABLE defprivs $$); +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; +DROP DATABASE :DATA_NODE_1 WITH (FORCE); +DROP DATABASE :DATA_NODE_2 WITH (FORCE); +DROP DATABASE :DATA_NODE_3 WITH (FORCE); +DROP DATABASE :DATA_NODE_4 WITH (FORCE); diff --git a/tsl/test/expected/dist_hypertable-16.out b/tsl/test/expected/dist_hypertable-16.out new file mode 100644 index 00000000000..23f808ff7ba --- /dev/null +++ b/tsl/test/expected/dist_hypertable-16.out @@ -0,0 +1,6228 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- Need to be super user to create extension and add data nodes +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; +\unset ECHO +psql:include/filter_exec.sql:5: NOTICE: schema "test" already exists, skipping +psql:include/remote_exec.sql:5: NOTICE: schema "test" already exists, skipping +\set DATA_NODE_1 :TEST_DBNAME _1 +\set DATA_NODE_2 :TEST_DBNAME _2 +\set DATA_NODE_3 :TEST_DBNAME _3 +\set DATA_NODE_4 :TEST_DBNAME _4 +\set TABLESPACE_1 :TEST_DBNAME _1 +\set TABLESPACE_2 :TEST_DBNAME _2 +SELECT + test.make_tablespace_path(:'TEST_TABLESPACE1_PREFIX', :'TEST_DBNAME') AS spc1path, + test.make_tablespace_path(:'TEST_TABLESPACE2_PREFIX', :'TEST_DBNAME') AS spc2path +\gset +SELECT node_name, database, node_created, database_created, extension_created +FROM ( + SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* + FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) +) a; + node_name | database | node_created | database_created | extension_created +----------------------+----------------------+--------------+------------------+------------------- + db_dist_hypertable_1 | db_dist_hypertable_1 | t | t | t + db_dist_hypertable_2 | db_dist_hypertable_2 | t | t | t + db_dist_hypertable_3 | db_dist_hypertable_3 | t | t | t +(3 rows) + +GRANT USAGE ON FOREIGN SERVER :DATA_NODE_1, :DATA_NODE_2, :DATA_NODE_3 TO PUBLIC; +-- View to see dimension partitions. Note RIGHT JOIN to see that +-- dimension partitions are cleaned up (deleted) properly. +CREATE VIEW hypertable_partitions AS +SELECT table_name, dimension_id, range_start, data_nodes +FROM _timescaledb_catalog.hypertable h +INNER JOIN _timescaledb_catalog.dimension d ON (d.hypertable_id = h.id) +RIGHT JOIN _timescaledb_catalog.dimension_partition dp ON (dp.dimension_id = d.id) +ORDER BY dimension_id, range_start; +GRANT SELECT ON hypertable_partitions TO :ROLE_1; +-- Import testsupport.sql file to data nodes +\unset ECHO +GRANT CREATE ON SCHEMA public TO :ROLE_1; +SET ROLE :ROLE_1; +--Ensure INSERTs use DataNodeDispatch. DataNodeCopy is tested later +SET timescaledb.enable_distributed_insert_with_copy=false; +-- Verify lack of tables +SELECT node_name FROM timescaledb_information.data_nodes ORDER BY node_name; + node_name +---------------------- + db_dist_hypertable_1 + db_dist_hypertable_2 + db_dist_hypertable_3 +(3 rows) + +\set ON_ERROR_STOP 0 +-- Test that one cannot directly create TimescaleDB foreign tables +CREATE FOREIGN TABLE foreign_table (time timestamptz, device int, temp float) SERVER :DATA_NODE_1; +ERROR: operation not supported +\set ON_ERROR_STOP 1 +-- Create distributed hypertables. Add a trigger and primary key +-- constraint to test how those work +CREATE TABLE disttable(time timestamptz, device int CHECK (device > 0), color int, temp float, PRIMARY KEY (time,device)); +SELECT * FROM create_distributed_hypertable('disttable', 'time', 'device', 1); +WARNING: insufficient number of partitions for dimension "device" + hypertable_id | schema_name | table_name | created +---------------+-------------+------------+--------- + 1 | public | disttable | t +(1 row) + +-- Increase the number of partitions. Expect warning since still too +-- low. Dimension partitions should be updated to reflect new +-- partitioning. +SELECT * FROM hypertable_partitions WHERE table_name = 'disttable'; + table_name | dimension_id | range_start | data_nodes +------------+--------------+----------------------+------------------------ + disttable | 2 | -9223372036854775808 | {db_dist_hypertable_1} +(1 row) + +SELECT * FROM set_number_partitions('disttable', 2); +WARNING: insufficient number of partitions for dimension "device" + set_number_partitions +----------------------- + +(1 row) + +SELECT * FROM hypertable_partitions WHERE table_name = 'disttable'; + table_name | dimension_id | range_start | data_nodes +------------+--------------+----------------------+------------------------ + disttable | 2 | -9223372036854775808 | {db_dist_hypertable_1} + disttable | 2 | 1073741823 | {db_dist_hypertable_2} +(2 rows) + +-- Set number of partitions equal to the number of servers should not +-- raise a warning. +SELECT * FROM hypertable_partitions WHERE table_name = 'disttable'; + table_name | dimension_id | range_start | data_nodes +------------+--------------+----------------------+------------------------ + disttable | 2 | -9223372036854775808 | {db_dist_hypertable_1} + disttable | 2 | 1073741823 | {db_dist_hypertable_2} +(2 rows) + +SELECT * FROM set_number_partitions('disttable', 3, 'device'); + set_number_partitions +----------------------- + +(1 row) + +SELECT * FROM hypertable_partitions WHERE table_name = 'disttable'; + table_name | dimension_id | range_start | data_nodes +------------+--------------+----------------------+------------------------ + disttable | 2 | -9223372036854775808 | {db_dist_hypertable_1} + disttable | 2 | 715827882 | {db_dist_hypertable_2} + disttable | 2 | 1431655764 | {db_dist_hypertable_3} +(3 rows) + +-- Show the number of slices +SELECT h.table_name, d.column_name, d.num_slices +FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d +WHERE h.id = d.hypertable_id +AND h.table_name = 'disttable'; + table_name | column_name | num_slices +------------+-------------+------------ + disttable | device | 3 + disttable | time | +(2 rows) + +-- This table tests both 1-dimensional tables and under-replication +-- (replication_factor > num_data_nodes). +CREATE TABLE underreplicated(time timestamptz, device int, temp float); +\set ON_ERROR_STOP 0 +-- can't create an under-replicated hypertable +SELECT * FROM create_hypertable('underreplicated', 'time', replication_factor => 4); +ERROR: replication factor too large for hypertable "underreplicated" +\set ON_ERROR_STOP 1 +RESET ROLE; +SELECT node_name, database, node_created, database_created, extension_created +FROM add_data_node(:'DATA_NODE_4', host => 'localhost', database => :'DATA_NODE_4'); + node_name | database | node_created | database_created | extension_created +----------------------+----------------------+--------------+------------------+------------------- + db_dist_hypertable_4 | db_dist_hypertable_4 | t | t | t +(1 row) + +GRANT USAGE ON FOREIGN SERVER :DATA_NODE_4 TO PUBLIC; +GRANT CREATE ON SCHEMA public TO :ROLE_1; +SET ROLE :ROLE_1; +SELECT * FROM create_hypertable('underreplicated', 'time', replication_factor => 4); +NOTICE: adding not-null constraint to column "time" + hypertable_id | schema_name | table_name | created +---------------+-------------+-----------------+--------- + 2 | public | underreplicated | t +(1 row) + +-- test that attaching a data node to an existing hypertable with +-- repartition=>false does not change the number of partitions when +-- number of partitions is greater than number of data nodes. +SELECT * FROM set_number_partitions('disttable', 8, 'device'); + set_number_partitions +----------------------- + +(1 row) + +SELECT * FROM hypertable_partitions WHERE table_name = 'disttable'; + table_name | dimension_id | range_start | data_nodes +------------+--------------+----------------------+------------------------ + disttable | 2 | -9223372036854775808 | {db_dist_hypertable_1} + disttable | 2 | 268435455 | {db_dist_hypertable_2} + disttable | 2 | 536870910 | {db_dist_hypertable_3} + disttable | 2 | 805306365 | {db_dist_hypertable_1} + disttable | 2 | 1073741820 | {db_dist_hypertable_2} + disttable | 2 | 1342177275 | {db_dist_hypertable_3} + disttable | 2 | 1610612730 | {db_dist_hypertable_1} + disttable | 2 | 1879048185 | {db_dist_hypertable_2} +(8 rows) + +SELECT attach_data_node(:'DATA_NODE_4', 'disttable', repartition => false); + attach_data_node +---------------------------- + (1,2,db_dist_hypertable_4) +(1 row) + +SELECT * FROM hypertable_partitions WHERE table_name = 'disttable'; + table_name | dimension_id | range_start | data_nodes +------------+--------------+----------------------+------------------------ + disttable | 2 | -9223372036854775808 | {db_dist_hypertable_1} + disttable | 2 | 268435455 | {db_dist_hypertable_2} + disttable | 2 | 536870910 | {db_dist_hypertable_3} + disttable | 2 | 805306365 | {db_dist_hypertable_4} + disttable | 2 | 1073741820 | {db_dist_hypertable_1} + disttable | 2 | 1342177275 | {db_dist_hypertable_2} + disttable | 2 | 1610612730 | {db_dist_hypertable_3} + disttable | 2 | 1879048185 | {db_dist_hypertable_4} +(8 rows) + +--create new session to clear out connections +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; +SELECT * FROM delete_data_node(:'DATA_NODE_4', force => true, drop_database => true, repartition => false); +WARNING: insufficient number of data nodes for distributed hypertable "underreplicated" + delete_data_node +------------------ + t +(1 row) + +SET ROLE :ROLE_1; +-- Deleting a data node should also not change the number of +-- partitions with repartition=>false +SELECT * FROM hypertable_partitions WHERE table_name = 'disttable'; + table_name | dimension_id | range_start | data_nodes +------------+--------------+----------------------+------------------------ + disttable | 2 | -9223372036854775808 | {db_dist_hypertable_1} + disttable | 2 | 268435455 | {db_dist_hypertable_2} + disttable | 2 | 536870910 | {db_dist_hypertable_3} + disttable | 2 | 805306365 | {db_dist_hypertable_1} + disttable | 2 | 1073741820 | {db_dist_hypertable_2} + disttable | 2 | 1342177275 | {db_dist_hypertable_3} + disttable | 2 | 1610612730 | {db_dist_hypertable_1} + disttable | 2 | 1879048185 | {db_dist_hypertable_2} +(8 rows) + +-- reset to 3 partitions +SELECT * FROM set_number_partitions('disttable', 3, 'device'); + set_number_partitions +----------------------- + +(1 row) + +SELECT * FROM hypertable_partitions WHERE table_name = 'disttable'; + table_name | dimension_id | range_start | data_nodes +------------+--------------+----------------------+------------------------ + disttable | 2 | -9223372036854775808 | {db_dist_hypertable_1} + disttable | 2 | 715827882 | {db_dist_hypertable_2} + disttable | 2 | 1431655764 | {db_dist_hypertable_3} +(3 rows) + +CREATE OR REPLACE FUNCTION test_trigger() + RETURNS TRIGGER LANGUAGE PLPGSQL AS +$BODY$ +DECLARE + cnt INTEGER; +BEGIN + SELECT count(*) INTO cnt FROM public.disttable; + RAISE WARNING 'FIRING trigger when: % level: % op: % cnt: % trigger_name %', + tg_when, tg_level, tg_op, cnt, tg_name; + + IF TG_OP = 'DELETE' THEN + RETURN OLD; + END IF; + RETURN NEW; +END +$BODY$; +-- Create the trigger function on the data nodes: +CALL distributed_exec($$ +CREATE OR REPLACE FUNCTION test_trigger() + RETURNS TRIGGER LANGUAGE PLPGSQL AS +$BODY$ +DECLARE + cnt INTEGER; +BEGIN + SELECT count(*) INTO cnt FROM public.disttable; + RAISE WARNING 'FIRING trigger when: % level: % op: % cnt: % trigger_name %', + tg_when, tg_level, tg_op, cnt, tg_name; + + IF TG_OP = 'DELETE' THEN + RETURN OLD; + END IF; + RETURN NEW; +END +$BODY$; +$$); +CREATE TRIGGER _0_test_trigger_insert + BEFORE INSERT ON disttable + FOR EACH ROW EXECUTE FUNCTION test_trigger(); +SELECT * FROM _timescaledb_catalog.hypertable_data_node ORDER BY 1,2,3; + hypertable_id | node_hypertable_id | node_name | block_chunks +---------------+--------------------+----------------------+-------------- + 1 | 1 | db_dist_hypertable_1 | f + 1 | 1 | db_dist_hypertable_2 | f + 1 | 1 | db_dist_hypertable_3 | f + 2 | 2 | db_dist_hypertable_1 | f + 2 | 2 | db_dist_hypertable_2 | f + 2 | 2 | db_dist_hypertable_3 | f +(6 rows) + +SELECT * FROM _timescaledb_catalog.chunk_data_node ORDER BY 1,2,3; + chunk_id | node_chunk_id | node_name +----------+---------------+----------- +(0 rows) + +-- The constraints, indexes, and triggers on the hypertable +SELECT * FROM test.show_constraints('disttable'); + Constraint | Type | Columns | Index | Expr | Deferrable | Deferred | Validated +------------------------+------+---------------+----------------+--------------+------------+----------+----------- + disttable_device_check | c | {device} | - | (device > 0) | f | f | t + disttable_pkey | p | {time,device} | disttable_pkey | | f | f | t +(2 rows) + +SELECT * FROM test.show_indexes('disttable'); + Index | Columns | Expr | Unique | Primary | Exclusion | Tablespace +---------------------------+---------------+------+--------+---------+-----------+------------ + disttable_device_time_idx | {device,time} | | f | f | f | + disttable_pkey | {time,device} | | t | t | f | + disttable_time_idx | {time} | | f | f | f | +(3 rows) + +SELECT * FROM test.show_triggers('disttable'); + Trigger | Type | Function +------------------------+------+--------------------------------------- + _0_test_trigger_insert | 7 | test_trigger + ts_insert_blocker | 7 | _timescaledb_functions.insert_blocker +(2 rows) + +-- Drop a column. This will make the attribute numbers of the +-- hypertable's root relation differ from newly created chunks. It is +-- a way to test that we properly handle attributed conversion between +-- the root table and chunks +ALTER TABLE disttable DROP COLUMN color; +-- EXPLAIN some inserts to see what plans and explain output for +-- remote inserts look like +EXPLAIN (COSTS FALSE) +INSERT INTO disttable VALUES + ('2017-01-01 06:01', 1, 1.1); + QUERY PLAN +----------------------------------------------- + Custom Scan (HypertableModify) + Insert on distributed hypertable disttable + -> Insert on disttable + -> Custom Scan (DataNodeCopy) + -> Custom Scan (ChunkDispatch) + -> Result +(6 rows) + +EXPLAIN (VERBOSE, COSTS FALSE) +INSERT INTO disttable VALUES + ('2017-01-01 06:01', 1, 1.1); + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + Insert on distributed hypertable public.disttable + Data nodes: db_dist_hypertable_1, db_dist_hypertable_2, db_dist_hypertable_3 + -> Insert on public.disttable + -> Custom Scan (DataNodeCopy) + Output: 'Sun Jan 01 06:01:00 2017 PST'::timestamp with time zone, 1, NULL::integer, '1.1'::double precision + Remote SQL: COPY public.disttable ("time", device, temp) FROM STDIN WITH (FORMAT binary) + -> Custom Scan (ChunkDispatch) + Output: 'Sun Jan 01 06:01:00 2017 PST'::timestamp with time zone, 1, NULL::integer, '1.1'::double precision + -> Result + Output: 'Sun Jan 01 06:01:00 2017 PST'::timestamp with time zone, 1, NULL::integer, '1.1'::double precision +(11 rows) + +-- Create some chunks through insertion +INSERT INTO disttable VALUES + ('2017-01-01 06:01', 1, 1.1), + ('2017-01-01 09:11', 3, 2.1), + ('2017-01-01 09:21', 3, 2.2), + ('2017-01-01 08:11', 3, 2.3), + ('2017-01-01 08:01', 1, 1.2), + ('2017-01-02 08:01', 2, 1.3), + ('2017-01-02 09:01', 2, 1.4), + ('2017-01-02 08:21', 2, 1.5), + ('2018-07-02 08:01', 87, 1.6), + ('2018-07-02 09:01', 87, 1.4), + ('2018-07-02 09:21', 87, 1.8), + ('2018-07-01 06:01', 13, 1.4), + ('2018-07-01 06:21', 13, 1.5), + ('2018-07-01 07:01', 13, 1.4), + ('2018-07-01 09:11', 90, 2.7), + ('2018-07-01 08:01', 29, 1.5), + ('2018-07-01 09:21', 90, 2.8), + ('2018-07-01 08:21', 29, 1.2); +-- EXPLAIN some updates/deletes to see what plans and explain output for +-- remote operations look like +EXPLAIN (VERBOSE, COSTS FALSE) +UPDATE disttable SET temp = 3.7 WHERE device = 1; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------- + Update on public.disttable + Update on public.disttable disttable_1 + Foreign Update on _timescaledb_internal._dist_hyper_1_1_chunk disttable_2 + Remote SQL: UPDATE _timescaledb_internal._dist_hyper_1_1_chunk SET temp = $2 WHERE ctid = $1 + Foreign Update on _timescaledb_internal._dist_hyper_1_4_chunk disttable_3 + Remote SQL: UPDATE _timescaledb_internal._dist_hyper_1_4_chunk SET temp = $2 WHERE ctid = $1 + -> Result + Output: '3.7'::double precision, disttable.tableoid, disttable.ctid, (NULL::record) + -> Append + -> Seq Scan on public.disttable disttable_1 + Output: disttable_1.tableoid, disttable_1.ctid, NULL::record + Filter: (disttable_1.device = 1) + -> Foreign Scan on _timescaledb_internal._dist_hyper_1_1_chunk disttable_2 + Output: disttable_2.tableoid, disttable_2.ctid, disttable_2.* + Data node: db_dist_hypertable_1 + Remote SQL: SELECT "time", device, temp, ctid FROM _timescaledb_internal._dist_hyper_1_1_chunk WHERE ((device = 1)) + -> Foreign Scan on _timescaledb_internal._dist_hyper_1_4_chunk disttable_3 + Output: disttable_3.tableoid, disttable_3.ctid, disttable_3.* + Data node: db_dist_hypertable_1 + Remote SQL: SELECT "time", device, temp, ctid FROM _timescaledb_internal._dist_hyper_1_4_chunk WHERE ((device = 1)) +(20 rows) + +EXPLAIN (VERBOSE, COSTS FALSE) +DELETE FROM disttable WHERE device = 1; + QUERY PLAN +------------------------------------------------------------------------------------------------------------- + Delete on public.disttable + Delete on public.disttable disttable_1 + Foreign Delete on _timescaledb_internal._dist_hyper_1_1_chunk disttable_2 + Remote SQL: DELETE FROM _timescaledb_internal._dist_hyper_1_1_chunk WHERE ctid = $1 + Foreign Delete on _timescaledb_internal._dist_hyper_1_4_chunk disttable_3 + Remote SQL: DELETE FROM _timescaledb_internal._dist_hyper_1_4_chunk WHERE ctid = $1 + -> Append + -> Seq Scan on public.disttable disttable_1 + Output: disttable_1.tableoid, disttable_1.ctid + Filter: (disttable_1.device = 1) + -> Foreign Scan on _timescaledb_internal._dist_hyper_1_1_chunk disttable_2 + Output: disttable_2.tableoid, disttable_2.ctid + Data node: db_dist_hypertable_1 + Remote SQL: SELECT ctid FROM _timescaledb_internal._dist_hyper_1_1_chunk WHERE ((device = 1)) + -> Foreign Scan on _timescaledb_internal._dist_hyper_1_4_chunk disttable_3 + Output: disttable_3.tableoid, disttable_3.ctid + Data node: db_dist_hypertable_1 + Remote SQL: SELECT ctid FROM _timescaledb_internal._dist_hyper_1_4_chunk WHERE ((device = 1)) +(18 rows) + +-- Test distributed ANALYZE. +-- +-- First show no statistics +-- reltuples is initially -1 before any VACUUM/ANALYZE has been run on PG14 +SELECT relname, relkind, CASE WHEN reltuples > 0 THEN reltuples ELSE 0 END AS reltuples, relpages +FROM pg_class +WHERE oid = 'disttable'::regclass; + relname | relkind | reltuples | relpages +-----------+---------+-----------+---------- + disttable | r | 0 | 0 +(1 row) + +SELECT relname, relkind, CASE WHEN reltuples > 0 THEN reltuples ELSE 0 END AS reltuples, relpages +FROM pg_class cl, (SELECT show_chunks AS chunk FROM show_chunks('disttable')) ch +WHERE cl.oid = ch.chunk::regclass; + relname | relkind | reltuples | relpages +-----------------------+---------+-----------+---------- + _dist_hyper_1_1_chunk | f | 0 | 0 + _dist_hyper_1_2_chunk | f | 0 | 0 + _dist_hyper_1_3_chunk | f | 0 | 0 + _dist_hyper_1_4_chunk | f | 0 | 0 + _dist_hyper_1_5_chunk | f | 0 | 0 + _dist_hyper_1_6_chunk | f | 0 | 0 +(6 rows) + +ANALYZE disttable; +-- Show updated statistics +SELECT relname, relkind, CASE WHEN reltuples > 0 THEN reltuples ELSE 0 END AS reltuples, relpages +FROM pg_class +WHERE oid = 'disttable'::regclass; + relname | relkind | reltuples | relpages +-----------+---------+-----------+---------- + disttable | r | 0 | 0 +(1 row) + +SELECT relname, relkind, reltuples, relpages +FROM pg_class cl, (SELECT show_chunks AS chunk FROM show_chunks('disttable')) ch +WHERE cl.oid = ch.chunk::regclass; + relname | relkind | reltuples | relpages +-----------------------+---------+-----------+---------- + _dist_hyper_1_1_chunk | f | 2 | 1 + _dist_hyper_1_2_chunk | f | 3 | 1 + _dist_hyper_1_3_chunk | f | 3 | 1 + _dist_hyper_1_4_chunk | f | 3 | 1 + _dist_hyper_1_5_chunk | f | 3 | 1 + _dist_hyper_1_6_chunk | f | 4 | 1 +(6 rows) + +-- Test distributed VACUUM. +-- +VACUUM (FULL, ANALYZE) disttable; +VACUUM FULL disttable; +VACUUM disttable; +\set ON_ERROR_STOP 0 +-- VACUUM VERBOSE is not supported at the moment +VACUUM VERBOSE disttable; +ERROR: operation not supported on distributed hypertable +\set ON_ERROR_STOP 1 +-- Test prepared statement +PREPARE dist_insert (timestamptz, int, float) AS +INSERT INTO disttable VALUES ($1, $2, $3); +EXECUTE dist_insert ('2017-01-01 06:05', 1, 1.4); +-- Show chunks created +SELECT (_timescaledb_functions.show_chunk(show_chunks)).* +FROM show_chunks('disttable'); + chunk_id | hypertable_id | schema_name | table_name | relkind | slices +----------+---------------+-----------------------+-----------------------+---------+--------------------------------------------------------------------------------------------- + 1 | 1 | _timescaledb_internal | _dist_hyper_1_1_chunk | f | {"time": [1482969600000000, 1483574400000000], "device": [-9223372036854775808, 715827882]} + 2 | 1 | _timescaledb_internal | _dist_hyper_1_2_chunk | f | {"time": [1482969600000000, 1483574400000000], "device": [1431655764, 9223372036854775807]} + 3 | 1 | _timescaledb_internal | _dist_hyper_1_3_chunk | f | {"time": [1482969600000000, 1483574400000000], "device": [715827882, 1431655764]} + 4 | 1 | _timescaledb_internal | _dist_hyper_1_4_chunk | f | {"time": [1530144000000000, 1530748800000000], "device": [-9223372036854775808, 715827882]} + 5 | 1 | _timescaledb_internal | _dist_hyper_1_5_chunk | f | {"time": [1530144000000000, 1530748800000000], "device": [715827882, 1431655764]} + 6 | 1 | _timescaledb_internal | _dist_hyper_1_6_chunk | f | {"time": [1530144000000000, 1530748800000000], "device": [1431655764, 9223372036854775807]} +(6 rows) + +-- Show that there are assigned node_chunk_id:s in chunk data node mappings +SELECT * FROM _timescaledb_catalog.chunk_data_node ORDER BY 1,2,3; + chunk_id | node_chunk_id | node_name +----------+---------------+---------------------- + 1 | 1 | db_dist_hypertable_1 + 2 | 1 | db_dist_hypertable_3 + 3 | 1 | db_dist_hypertable_2 + 4 | 2 | db_dist_hypertable_1 + 5 | 2 | db_dist_hypertable_2 + 6 | 2 | db_dist_hypertable_3 +(6 rows) + +SELECT * FROM hypertable_partitions; + table_name | dimension_id | range_start | data_nodes +------------+--------------+----------------------+------------------------ + disttable | 2 | -9223372036854775808 | {db_dist_hypertable_1} + disttable | 2 | 715827882 | {db_dist_hypertable_2} + disttable | 2 | 1431655764 | {db_dist_hypertable_3} +(3 rows) + +-- Show that chunks are created on data nodes and that each data node +-- has their own unique slice in the space (device) dimension. +SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ +SELECT (_timescaledb_functions.show_chunk(show_chunks)).* +FROM show_chunks('disttable'); +$$); +NOTICE: [db_dist_hypertable_1]: +SELECT (_timescaledb_functions.show_chunk(show_chunks)).* +FROM show_chunks('disttable') +NOTICE: [db_dist_hypertable_1]: +chunk_id|hypertable_id|schema_name |table_name |relkind|slices +--------+-------------+---------------------+---------------------+-------+------------------------------------------------------------------------------------------- + 1| 1|_timescaledb_internal|_dist_hyper_1_1_chunk|r |{"time": [1482969600000000, 1483574400000000], "device": [-9223372036854775808, 715827882]} + 2| 1|_timescaledb_internal|_dist_hyper_1_4_chunk|r |{"time": [1530144000000000, 1530748800000000], "device": [-9223372036854775808, 715827882]} +(2 rows) + + +NOTICE: [db_dist_hypertable_2]: +SELECT (_timescaledb_functions.show_chunk(show_chunks)).* +FROM show_chunks('disttable') +NOTICE: [db_dist_hypertable_2]: +chunk_id|hypertable_id|schema_name |table_name |relkind|slices +--------+-------------+---------------------+---------------------+-------+--------------------------------------------------------------------------------- + 1| 1|_timescaledb_internal|_dist_hyper_1_3_chunk|r |{"time": [1482969600000000, 1483574400000000], "device": [715827882, 1431655764]} + 2| 1|_timescaledb_internal|_dist_hyper_1_5_chunk|r |{"time": [1530144000000000, 1530748800000000], "device": [715827882, 1431655764]} +(2 rows) + + +NOTICE: [db_dist_hypertable_3]: +SELECT (_timescaledb_functions.show_chunk(show_chunks)).* +FROM show_chunks('disttable') +NOTICE: [db_dist_hypertable_3]: +chunk_id|hypertable_id|schema_name |table_name |relkind|slices +--------+-------------+---------------------+---------------------+-------+------------------------------------------------------------------------------------------- + 1| 1|_timescaledb_internal|_dist_hyper_1_2_chunk|r |{"time": [1482969600000000, 1483574400000000], "device": [1431655764, 9223372036854775807]} + 2| 1|_timescaledb_internal|_dist_hyper_1_6_chunk|r |{"time": [1530144000000000, 1530748800000000], "device": [1431655764, 9223372036854775807]} +(2 rows) + + + remote_exec +------------- + +(1 row) + +SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ +SELECT * FROM disttable; +$$); +NOTICE: [db_dist_hypertable_1]: +SELECT * FROM disttable +NOTICE: [db_dist_hypertable_1]: +time |device|temp +----------------------------+------+---- +Sun Jan 01 06:01:00 2017 PST| 1| 1.1 +Sun Jan 01 08:01:00 2017 PST| 1| 1.2 +Sun Jan 01 06:05:00 2017 PST| 1| 1.4 +Mon Jul 02 08:01:00 2018 PDT| 87| 1.6 +Mon Jul 02 09:01:00 2018 PDT| 87| 1.4 +Mon Jul 02 09:21:00 2018 PDT| 87| 1.8 +(6 rows) + + +NOTICE: [db_dist_hypertable_2]: +SELECT * FROM disttable +NOTICE: [db_dist_hypertable_2]: +time |device|temp +----------------------------+------+---- +Mon Jan 02 08:01:00 2017 PST| 2| 1.3 +Mon Jan 02 09:01:00 2017 PST| 2| 1.4 +Mon Jan 02 08:21:00 2017 PST| 2| 1.5 +Sun Jul 01 06:01:00 2018 PDT| 13| 1.4 +Sun Jul 01 06:21:00 2018 PDT| 13| 1.5 +Sun Jul 01 07:01:00 2018 PDT| 13| 1.4 +(6 rows) + + +NOTICE: [db_dist_hypertable_3]: +SELECT * FROM disttable +NOTICE: [db_dist_hypertable_3]: +time |device|temp +----------------------------+------+---- +Sun Jan 01 09:11:00 2017 PST| 3| 2.1 +Sun Jan 01 09:21:00 2017 PST| 3| 2.2 +Sun Jan 01 08:11:00 2017 PST| 3| 2.3 +Sun Jul 01 09:11:00 2018 PDT| 90| 2.7 +Sun Jul 01 08:01:00 2018 PDT| 29| 1.5 +Sun Jul 01 09:21:00 2018 PDT| 90| 2.8 +Sun Jul 01 08:21:00 2018 PDT| 29| 1.2 +(7 rows) + + + remote_exec +------------- + +(1 row) + +SELECT node_name FROM timescaledb_information.data_nodes ORDER BY node_name; + node_name +---------------------- + db_dist_hypertable_1 + db_dist_hypertable_2 + db_dist_hypertable_3 +(3 rows) + +SELECT * FROM hypertable_detailed_size('disttable') ORDER BY node_name; + table_bytes | index_bytes | toast_bytes | total_bytes | node_name +-------------+-------------+-------------+-------------+---------------------- + 81920 | 122880 | 0 | 204800 | db_dist_hypertable_1 + 81920 | 122880 | 0 | 204800 | db_dist_hypertable_2 + 81920 | 122880 | 0 | 204800 | db_dist_hypertable_3 + 0 | 24576 | 0 | 24576 | +(4 rows) + +-- Show what some queries would look like on the frontend +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT * FROM disttable; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) + Output: disttable."time", disttable.device, disttable.temp + -> Append + -> Custom Scan (DataNodeScan) on public.disttable disttable_1 + Output: disttable_1."time", disttable_1.device, disttable_1.temp + Data node: db_dist_hypertable_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk + Remote SQL: SELECT "time", device, temp FROM public.disttable WHERE _timescaledb_functions.chunks_in(public.disttable.*, ARRAY[1, 2]) + -> Custom Scan (DataNodeScan) on public.disttable disttable_2 + Output: disttable_2."time", disttable_2.device, disttable_2.temp + Data node: db_dist_hypertable_2 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_5_chunk + Remote SQL: SELECT "time", device, temp FROM public.disttable WHERE _timescaledb_functions.chunks_in(public.disttable.*, ARRAY[1, 2]) + -> Custom Scan (DataNodeScan) on public.disttable disttable_3 + Output: disttable_3."time", disttable_3.device, disttable_3.temp + Data node: db_dist_hypertable_3 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_6_chunk + Remote SQL: SELECT "time", device, temp FROM public.disttable WHERE _timescaledb_functions.chunks_in(public.disttable.*, ARRAY[1, 2]) +(18 rows) + +SELECT * FROM disttable; + time | device | temp +------------------------------+--------+------ + Sun Jan 01 06:01:00 2017 PST | 1 | 1.1 + Sun Jan 01 08:01:00 2017 PST | 1 | 1.2 + Sun Jan 01 06:05:00 2017 PST | 1 | 1.4 + Mon Jul 02 08:01:00 2018 PDT | 87 | 1.6 + Mon Jul 02 09:01:00 2018 PDT | 87 | 1.4 + Mon Jul 02 09:21:00 2018 PDT | 87 | 1.8 + Mon Jan 02 08:01:00 2017 PST | 2 | 1.3 + Mon Jan 02 09:01:00 2017 PST | 2 | 1.4 + Mon Jan 02 08:21:00 2017 PST | 2 | 1.5 + Sun Jul 01 06:01:00 2018 PDT | 13 | 1.4 + Sun Jul 01 06:21:00 2018 PDT | 13 | 1.5 + Sun Jul 01 07:01:00 2018 PDT | 13 | 1.4 + Sun Jan 01 09:11:00 2017 PST | 3 | 2.1 + Sun Jan 01 09:21:00 2017 PST | 3 | 2.2 + Sun Jan 01 08:11:00 2017 PST | 3 | 2.3 + Sun Jul 01 09:11:00 2018 PDT | 90 | 2.7 + Sun Jul 01 08:01:00 2018 PDT | 29 | 1.5 + Sun Jul 01 09:21:00 2018 PDT | 90 | 2.8 + Sun Jul 01 08:21:00 2018 PDT | 29 | 1.2 +(19 rows) + +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT time_bucket('3 hours', time) AS time, device, avg(temp) AS avg_temp +FROM disttable GROUP BY 1, 2 +ORDER BY 1; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: (time_bucket('@ 3 hours'::interval, disttable."time")), disttable.device, avg(disttable.temp) + Group Key: (time_bucket('@ 3 hours'::interval, disttable."time")), disttable.device + -> Custom Scan (AsyncAppend) + Output: (time_bucket('@ 3 hours'::interval, disttable."time")), disttable.device, disttable.temp + -> Merge Append + Sort Key: (time_bucket('@ 3 hours'::interval, disttable_1."time")), disttable_1.device + -> Result + Output: time_bucket('@ 3 hours'::interval, disttable_1."time"), disttable_1.device, disttable_1.temp + -> Custom Scan (DataNodeScan) on public.disttable disttable_1 + Output: disttable_1."time", disttable_1.device, disttable_1.temp + Data node: db_dist_hypertable_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk + Remote SQL: SELECT "time", device, temp FROM public.disttable WHERE _timescaledb_functions.chunks_in(public.disttable.*, ARRAY[1, 2]) ORDER BY public.time_bucket('03:00:00'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Result + Output: time_bucket('@ 3 hours'::interval, disttable_2."time"), disttable_2.device, disttable_2.temp + -> Custom Scan (DataNodeScan) on public.disttable disttable_2 + Output: disttable_2."time", disttable_2.device, disttable_2.temp + Data node: db_dist_hypertable_2 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_5_chunk + Remote SQL: SELECT "time", device, temp FROM public.disttable WHERE _timescaledb_functions.chunks_in(public.disttable.*, ARRAY[1, 2]) ORDER BY public.time_bucket('03:00:00'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Result + Output: time_bucket('@ 3 hours'::interval, disttable_3."time"), disttable_3.device, disttable_3.temp + -> Custom Scan (DataNodeScan) on public.disttable disttable_3 + Output: disttable_3."time", disttable_3.device, disttable_3.temp + Data node: db_dist_hypertable_3 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_6_chunk + Remote SQL: SELECT "time", device, temp FROM public.disttable WHERE _timescaledb_functions.chunks_in(public.disttable.*, ARRAY[1, 2]) ORDER BY public.time_bucket('03:00:00'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST +(28 rows) + +-- Execute some queries on the frontend and return the results +SELECT * FROM disttable; + time | device | temp +------------------------------+--------+------ + Sun Jan 01 06:01:00 2017 PST | 1 | 1.1 + Sun Jan 01 08:01:00 2017 PST | 1 | 1.2 + Sun Jan 01 06:05:00 2017 PST | 1 | 1.4 + Mon Jul 02 08:01:00 2018 PDT | 87 | 1.6 + Mon Jul 02 09:01:00 2018 PDT | 87 | 1.4 + Mon Jul 02 09:21:00 2018 PDT | 87 | 1.8 + Mon Jan 02 08:01:00 2017 PST | 2 | 1.3 + Mon Jan 02 09:01:00 2017 PST | 2 | 1.4 + Mon Jan 02 08:21:00 2017 PST | 2 | 1.5 + Sun Jul 01 06:01:00 2018 PDT | 13 | 1.4 + Sun Jul 01 06:21:00 2018 PDT | 13 | 1.5 + Sun Jul 01 07:01:00 2018 PDT | 13 | 1.4 + Sun Jan 01 09:11:00 2017 PST | 3 | 2.1 + Sun Jan 01 09:21:00 2017 PST | 3 | 2.2 + Sun Jan 01 08:11:00 2017 PST | 3 | 2.3 + Sun Jul 01 09:11:00 2018 PDT | 90 | 2.7 + Sun Jul 01 08:01:00 2018 PDT | 29 | 1.5 + Sun Jul 01 09:21:00 2018 PDT | 90 | 2.8 + Sun Jul 01 08:21:00 2018 PDT | 29 | 1.2 +(19 rows) + +SELECT time_bucket('3 hours', time) AS time, device, avg(temp) AS avg_temp +FROM disttable +GROUP BY 1, 2 +ORDER BY 1; + time | device | avg_temp +------------------------------+--------+------------------ + Sun Jan 01 04:00:00 2017 PST | 1 | 1.25 + Sun Jan 01 07:00:00 2017 PST | 1 | 1.2 + Sun Jan 01 07:00:00 2017 PST | 3 | 2.2 + Mon Jan 02 07:00:00 2017 PST | 2 | 1.4 + Sun Jul 01 05:00:00 2018 PDT | 13 | 1.43333333333333 + Sun Jul 01 08:00:00 2018 PDT | 29 | 1.35 + Sun Jul 01 08:00:00 2018 PDT | 90 | 2.75 + Mon Jul 02 08:00:00 2018 PDT | 87 | 1.6 +(8 rows) + +SELECT time_bucket('3 hours', time) AS time, device, avg(temp) AS avg_temp +FROM disttable GROUP BY 1, 2 +HAVING avg(temp) > 1.2 +ORDER BY 1; + time | device | avg_temp +------------------------------+--------+------------------ + Sun Jan 01 04:00:00 2017 PST | 1 | 1.25 + Sun Jan 01 07:00:00 2017 PST | 3 | 2.2 + Mon Jan 02 07:00:00 2017 PST | 2 | 1.4 + Sun Jul 01 05:00:00 2018 PDT | 13 | 1.43333333333333 + Sun Jul 01 08:00:00 2018 PDT | 29 | 1.35 + Sun Jul 01 08:00:00 2018 PDT | 90 | 2.75 + Mon Jul 02 08:00:00 2018 PDT | 87 | 1.6 +(7 rows) + +SELECT time_bucket('3 hours', time) AS time, device, avg(temp) AS avg_temp +FROM disttable +WHERE temp > 2 +GROUP BY 1, 2 +HAVING avg(temp) > 1.2 +ORDER BY 1; + time | device | avg_temp +------------------------------+--------+---------- + Sun Jan 01 07:00:00 2017 PST | 3 | 2.2 + Sun Jul 01 08:00:00 2018 PDT | 90 | 2.75 +(2 rows) + +-- Test AsyncAppend when using min/max aggregates +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT max(temp) +FROM disttable; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Result + Output: $0 + InitPlan 1 (returns $0) + -> Limit + Output: disttable.temp + -> Custom Scan (AsyncAppend) + Output: disttable.temp + -> Merge Append + Sort Key: disttable_1.temp DESC + -> Custom Scan (DataNodeScan) on public.disttable disttable_1 + Output: disttable_1.temp + Data node: db_dist_hypertable_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk + Remote SQL: SELECT temp FROM public.disttable WHERE _timescaledb_functions.chunks_in(public.disttable.*, ARRAY[1, 2]) AND ((temp IS NOT NULL)) ORDER BY temp DESC NULLS FIRST LIMIT 1 + -> Custom Scan (DataNodeScan) on public.disttable disttable_2 + Output: disttable_2.temp + Data node: db_dist_hypertable_2 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_5_chunk + Remote SQL: SELECT temp FROM public.disttable WHERE _timescaledb_functions.chunks_in(public.disttable.*, ARRAY[1, 2]) AND ((temp IS NOT NULL)) ORDER BY temp DESC NULLS FIRST LIMIT 1 + -> Custom Scan (DataNodeScan) on public.disttable disttable_3 + Output: disttable_3.temp + Data node: db_dist_hypertable_3 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_6_chunk + Remote SQL: SELECT temp FROM public.disttable WHERE _timescaledb_functions.chunks_in(public.disttable.*, ARRAY[1, 2]) AND ((temp IS NOT NULL)) ORDER BY temp DESC NULLS FIRST LIMIT 1 +(24 rows) + +SELECT max(temp) +FROM disttable; + max +----- + 2.8 +(1 row) + +-- Test turning off async append +SET timescaledb.enable_async_append = OFF; +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT max(temp) +FROM disttable; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Result + Output: $0 + InitPlan 1 (returns $0) + -> Limit + Output: disttable_1.temp + -> Merge Append + Sort Key: disttable_1.temp DESC + -> Custom Scan (DataNodeScan) on public.disttable disttable_1 + Output: disttable_1.temp + Data node: db_dist_hypertable_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk + Remote SQL: SELECT temp FROM public.disttable WHERE _timescaledb_functions.chunks_in(public.disttable.*, ARRAY[1, 2]) AND ((temp IS NOT NULL)) ORDER BY temp DESC NULLS FIRST LIMIT 1 + -> Custom Scan (DataNodeScan) on public.disttable disttable_2 + Output: disttable_2.temp + Data node: db_dist_hypertable_2 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_5_chunk + Remote SQL: SELECT temp FROM public.disttable WHERE _timescaledb_functions.chunks_in(public.disttable.*, ARRAY[1, 2]) AND ((temp IS NOT NULL)) ORDER BY temp DESC NULLS FIRST LIMIT 1 + -> Custom Scan (DataNodeScan) on public.disttable disttable_3 + Output: disttable_3.temp + Data node: db_dist_hypertable_3 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_6_chunk + Remote SQL: SELECT temp FROM public.disttable WHERE _timescaledb_functions.chunks_in(public.disttable.*, ARRAY[1, 2]) AND ((temp IS NOT NULL)) ORDER BY temp DESC NULLS FIRST LIMIT 1 +(22 rows) + +SET timescaledb.enable_async_append = ON; +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT min(temp), max(temp) +FROM disttable; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------- + Aggregate + Output: min(disttable.temp), max(disttable.temp) + -> Custom Scan (AsyncAppend) + Output: disttable.temp + -> Append + -> Custom Scan (DataNodeScan) on public.disttable disttable_1 + Output: disttable_1.temp + Data node: db_dist_hypertable_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk + Remote SQL: SELECT temp FROM public.disttable WHERE _timescaledb_functions.chunks_in(public.disttable.*, ARRAY[1, 2]) + -> Custom Scan (DataNodeScan) on public.disttable disttable_2 + Output: disttable_2.temp + Data node: db_dist_hypertable_2 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_5_chunk + Remote SQL: SELECT temp FROM public.disttable WHERE _timescaledb_functions.chunks_in(public.disttable.*, ARRAY[1, 2]) + -> Custom Scan (DataNodeScan) on public.disttable disttable_3 + Output: disttable_3.temp + Data node: db_dist_hypertable_3 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_6_chunk + Remote SQL: SELECT temp FROM public.disttable WHERE _timescaledb_functions.chunks_in(public.disttable.*, ARRAY[1, 2]) +(20 rows) + +SELECT min(temp), max(temp) +FROM disttable; + min | max +-----+----- + 1.1 | 2.8 +(1 row) + +-- Test AsyncAppend when using window functions +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT device, temp, avg(temp) OVER (PARTITION BY device) +FROM disttable +ORDER BY device, temp; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Output: disttable.device, disttable.temp, (avg(disttable.temp) OVER (?)) + Sort Key: disttable.device, disttable.temp + -> WindowAgg + Output: disttable.device, disttable.temp, avg(disttable.temp) OVER (?) + -> Custom Scan (AsyncAppend) + Output: disttable.device, disttable.temp + -> Merge Append + Sort Key: disttable_1.device + -> Custom Scan (DataNodeScan) on public.disttable disttable_1 + Output: disttable_1.device, disttable_1.temp + Data node: db_dist_hypertable_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk + Remote SQL: SELECT device, temp FROM public.disttable WHERE _timescaledb_functions.chunks_in(public.disttable.*, ARRAY[1, 2]) ORDER BY device ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.disttable disttable_2 + Output: disttable_2.device, disttable_2.temp + Data node: db_dist_hypertable_2 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_5_chunk + Remote SQL: SELECT device, temp FROM public.disttable WHERE _timescaledb_functions.chunks_in(public.disttable.*, ARRAY[1, 2]) ORDER BY device ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.disttable disttable_3 + Output: disttable_3.device, disttable_3.temp + Data node: db_dist_hypertable_3 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_6_chunk + Remote SQL: SELECT device, temp FROM public.disttable WHERE _timescaledb_functions.chunks_in(public.disttable.*, ARRAY[1, 2]) ORDER BY device ASC NULLS LAST +(24 rows) + +SELECT device, temp, avg(temp) OVER (PARTITION BY device) +FROM disttable +ORDER BY device, temp; + device | temp | avg +--------+------+------------------ + 1 | 1.1 | 1.23333333333333 + 1 | 1.2 | 1.23333333333333 + 1 | 1.4 | 1.23333333333333 + 2 | 1.3 | 1.4 + 2 | 1.4 | 1.4 + 2 | 1.5 | 1.4 + 3 | 2.1 | 2.2 + 3 | 2.2 | 2.2 + 3 | 2.3 | 2.2 + 13 | 1.4 | 1.43333333333333 + 13 | 1.4 | 1.43333333333333 + 13 | 1.5 | 1.43333333333333 + 29 | 1.2 | 1.35 + 29 | 1.5 | 1.35 + 87 | 1.4 | 1.6 + 87 | 1.6 | 1.6 + 87 | 1.8 | 1.6 + 90 | 2.7 | 2.75 + 90 | 2.8 | 2.75 +(19 rows) + +-- Test remote explain +-- Make sure that chunks_in function only expects one-dimensional integer arrays +\set ON_ERROR_STOP 0 +SELECT "time" FROM public.disttable WHERE _timescaledb_functions.chunks_in(public.disttable.*, ARRAY[[2], [1]]) +ORDER BY "time" DESC NULLS FIRST LIMIT 1; +ERROR: invalid number of array dimensions for chunks_in +\set ON_ERROR_STOP 1 +SET timescaledb.enable_remote_explain = ON; +-- Check that datanodes use ChunkAppend plans with chunks_in function in the +-- "Remote SQL" when using max(time). +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT max(time) +FROM disttable; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Result + Output: $0 + InitPlan 1 (returns $0) + -> Limit + Output: disttable."time" + -> Custom Scan (AsyncAppend) + Output: disttable."time" + -> Merge Append + Sort Key: disttable_1."time" DESC + -> Custom Scan (DataNodeScan) on public.disttable disttable_1 + Output: disttable_1."time" + Data node: db_dist_hypertable_1 + Chunks: _dist_hyper_1_4_chunk, _dist_hyper_1_1_chunk + Remote SQL: SELECT "time" FROM public.disttable WHERE _timescaledb_functions.chunks_in(public.disttable.*, ARRAY[2, 1]) AND (("time" IS NOT NULL)) ORDER BY "time" DESC NULLS FIRST LIMIT 1 + Remote EXPLAIN: + Limit + Output: disttable."time" + -> Custom Scan (ChunkAppend) on public.disttable + Output: disttable."time" + Order: disttable."time" DESC + Startup Exclusion: false + Runtime Exclusion: false + -> Index Only Scan using _dist_hyper_1_4_chunk_disttable_time_idx on _timescaledb_internal._dist_hyper_1_4_chunk + Output: _dist_hyper_1_4_chunk."time" + Index Cond: (_dist_hyper_1_4_chunk."time" IS NOT NULL) + -> Index Only Scan using _dist_hyper_1_1_chunk_disttable_time_idx on _timescaledb_internal._dist_hyper_1_1_chunk + Output: _dist_hyper_1_1_chunk."time" + Index Cond: (_dist_hyper_1_1_chunk."time" IS NOT NULL) + + -> Custom Scan (DataNodeScan) on public.disttable disttable_2 + Output: disttable_2."time" + Data node: db_dist_hypertable_2 + Chunks: _dist_hyper_1_5_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT "time" FROM public.disttable WHERE _timescaledb_functions.chunks_in(public.disttable.*, ARRAY[2, 1]) AND (("time" IS NOT NULL)) ORDER BY "time" DESC NULLS FIRST LIMIT 1 + Remote EXPLAIN: + Limit + Output: disttable."time" + -> Custom Scan (ChunkAppend) on public.disttable + Output: disttable."time" + Order: disttable."time" DESC + Startup Exclusion: false + Runtime Exclusion: false + -> Index Only Scan using _dist_hyper_1_5_chunk_disttable_time_idx on _timescaledb_internal._dist_hyper_1_5_chunk + Output: _dist_hyper_1_5_chunk."time" + Index Cond: (_dist_hyper_1_5_chunk."time" IS NOT NULL) + -> Index Only Scan using _dist_hyper_1_3_chunk_disttable_time_idx on _timescaledb_internal._dist_hyper_1_3_chunk + Output: _dist_hyper_1_3_chunk."time" + Index Cond: (_dist_hyper_1_3_chunk."time" IS NOT NULL) + + -> Custom Scan (DataNodeScan) on public.disttable disttable_3 + Output: disttable_3."time" + Data node: db_dist_hypertable_3 + Chunks: _dist_hyper_1_6_chunk, _dist_hyper_1_2_chunk + Remote SQL: SELECT "time" FROM public.disttable WHERE _timescaledb_functions.chunks_in(public.disttable.*, ARRAY[2, 1]) AND (("time" IS NOT NULL)) ORDER BY "time" DESC NULLS FIRST LIMIT 1 + Remote EXPLAIN: + Limit + Output: disttable."time" + -> Custom Scan (ChunkAppend) on public.disttable + Output: disttable."time" + Order: disttable."time" DESC + Startup Exclusion: false + Runtime Exclusion: false + -> Index Only Scan using _dist_hyper_1_6_chunk_disttable_time_idx on _timescaledb_internal._dist_hyper_1_6_chunk + Output: _dist_hyper_1_6_chunk."time" + Index Cond: (_dist_hyper_1_6_chunk."time" IS NOT NULL) + -> Index Only Scan using _dist_hyper_1_2_chunk_disttable_time_idx on _timescaledb_internal._dist_hyper_1_2_chunk + Output: _dist_hyper_1_2_chunk."time" + Index Cond: (_dist_hyper_1_2_chunk."time" IS NOT NULL) + +(69 rows) + +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT max(temp) +FROM disttable; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Result + Output: $0 + InitPlan 1 (returns $0) + -> Limit + Output: disttable.temp + -> Custom Scan (AsyncAppend) + Output: disttable.temp + -> Merge Append + Sort Key: disttable_1.temp DESC + -> Custom Scan (DataNodeScan) on public.disttable disttable_1 + Output: disttable_1.temp + Data node: db_dist_hypertable_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk + Remote SQL: SELECT temp FROM public.disttable WHERE _timescaledb_functions.chunks_in(public.disttable.*, ARRAY[1, 2]) AND ((temp IS NOT NULL)) ORDER BY temp DESC NULLS FIRST LIMIT 1 + Remote EXPLAIN: + Limit + Output: _dist_hyper_1_1_chunk.temp + -> Sort + Output: _dist_hyper_1_1_chunk.temp + Sort Key: _dist_hyper_1_1_chunk.temp DESC + -> Append + -> Seq Scan on _timescaledb_internal._dist_hyper_1_1_chunk + Output: _dist_hyper_1_1_chunk.temp + Filter: (_dist_hyper_1_1_chunk.temp IS NOT NULL) + -> Seq Scan on _timescaledb_internal._dist_hyper_1_4_chunk + Output: _dist_hyper_1_4_chunk.temp + Filter: (_dist_hyper_1_4_chunk.temp IS NOT NULL) + + -> Custom Scan (DataNodeScan) on public.disttable disttable_2 + Output: disttable_2.temp + Data node: db_dist_hypertable_2 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_5_chunk + Remote SQL: SELECT temp FROM public.disttable WHERE _timescaledb_functions.chunks_in(public.disttable.*, ARRAY[1, 2]) AND ((temp IS NOT NULL)) ORDER BY temp DESC NULLS FIRST LIMIT 1 + Remote EXPLAIN: + Limit + Output: _dist_hyper_1_3_chunk.temp + -> Sort + Output: _dist_hyper_1_3_chunk.temp + Sort Key: _dist_hyper_1_3_chunk.temp DESC + -> Append + -> Seq Scan on _timescaledb_internal._dist_hyper_1_3_chunk + Output: _dist_hyper_1_3_chunk.temp + Filter: (_dist_hyper_1_3_chunk.temp IS NOT NULL) + -> Seq Scan on _timescaledb_internal._dist_hyper_1_5_chunk + Output: _dist_hyper_1_5_chunk.temp + Filter: (_dist_hyper_1_5_chunk.temp IS NOT NULL) + + -> Custom Scan (DataNodeScan) on public.disttable disttable_3 + Output: disttable_3.temp + Data node: db_dist_hypertable_3 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_6_chunk + Remote SQL: SELECT temp FROM public.disttable WHERE _timescaledb_functions.chunks_in(public.disttable.*, ARRAY[1, 2]) AND ((temp IS NOT NULL)) ORDER BY temp DESC NULLS FIRST LIMIT 1 + Remote EXPLAIN: + Limit + Output: _dist_hyper_1_2_chunk.temp + -> Sort + Output: _dist_hyper_1_2_chunk.temp + Sort Key: _dist_hyper_1_2_chunk.temp DESC + -> Append + -> Seq Scan on _timescaledb_internal._dist_hyper_1_2_chunk + Output: _dist_hyper_1_2_chunk.temp + Filter: (_dist_hyper_1_2_chunk.temp IS NOT NULL) + -> Seq Scan on _timescaledb_internal._dist_hyper_1_6_chunk + Output: _dist_hyper_1_6_chunk.temp + Filter: (_dist_hyper_1_6_chunk.temp IS NOT NULL) + +(66 rows) + +-- Don't remote explain if there is no VERBOSE flag +EXPLAIN (COSTS FALSE) +SELECT max(temp) +FROM disttable; + QUERY PLAN +------------------------------------------------------------------------------- + Result + InitPlan 1 (returns $0) + -> Limit + -> Custom Scan (AsyncAppend) + -> Merge Append + Sort Key: disttable_1.temp DESC + -> Custom Scan (DataNodeScan) on disttable disttable_1 + -> Custom Scan (DataNodeScan) on disttable disttable_2 + -> Custom Scan (DataNodeScan) on disttable disttable_3 +(9 rows) + +-- Test additional EXPLAIN flags +EXPLAIN (ANALYZE, VERBOSE, COSTS FALSE, BUFFERS OFF, TIMING OFF, SUMMARY OFF) +SELECT max(temp) +FROM disttable; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + Output: $0 + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + Output: disttable.temp + -> Custom Scan (AsyncAppend) (actual rows=1 loops=1) + Output: disttable.temp + -> Merge Append (actual rows=1 loops=1) + Sort Key: disttable_1.temp DESC + -> Custom Scan (DataNodeScan) on public.disttable disttable_1 (actual rows=1 loops=1) + Output: disttable_1.temp + Data node: db_dist_hypertable_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk + Remote SQL: SELECT temp FROM public.disttable WHERE _timescaledb_functions.chunks_in(public.disttable.*, ARRAY[1, 2]) AND ((temp IS NOT NULL)) ORDER BY temp DESC NULLS FIRST LIMIT 1 + Remote EXPLAIN: + Limit (actual rows=1 loops=1) + Output: _dist_hyper_1_1_chunk.temp + -> Sort (actual rows=1 loops=1) + Output: _dist_hyper_1_1_chunk.temp + Sort Key: _dist_hyper_1_1_chunk.temp DESC + Sort Method: top-N heapsort + -> Append (actual rows=6 loops=1) + -> Seq Scan on _timescaledb_internal._dist_hyper_1_1_chunk (actual rows=3 loops=1) + Output: _dist_hyper_1_1_chunk.temp + Filter: (_dist_hyper_1_1_chunk.temp IS NOT NULL) + -> Seq Scan on _timescaledb_internal._dist_hyper_1_4_chunk (actual rows=3 loops=1) + Output: _dist_hyper_1_4_chunk.temp + Filter: (_dist_hyper_1_4_chunk.temp IS NOT NULL) + + -> Custom Scan (DataNodeScan) on public.disttable disttable_2 (actual rows=1 loops=1) + Output: disttable_2.temp + Data node: db_dist_hypertable_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_5_chunk + Remote SQL: SELECT temp FROM public.disttable WHERE _timescaledb_functions.chunks_in(public.disttable.*, ARRAY[1, 2]) AND ((temp IS NOT NULL)) ORDER BY temp DESC NULLS FIRST LIMIT 1 + Remote EXPLAIN: + Limit (actual rows=1 loops=1) + Output: _dist_hyper_1_3_chunk.temp + -> Sort (actual rows=1 loops=1) + Output: _dist_hyper_1_3_chunk.temp + Sort Key: _dist_hyper_1_3_chunk.temp DESC + Sort Method: top-N heapsort + -> Append (actual rows=6 loops=1) + -> Seq Scan on _timescaledb_internal._dist_hyper_1_3_chunk (actual rows=3 loops=1) + Output: _dist_hyper_1_3_chunk.temp + Filter: (_dist_hyper_1_3_chunk.temp IS NOT NULL) + -> Seq Scan on _timescaledb_internal._dist_hyper_1_5_chunk (actual rows=3 loops=1) + Output: _dist_hyper_1_5_chunk.temp + Filter: (_dist_hyper_1_5_chunk.temp IS NOT NULL) + + -> Custom Scan (DataNodeScan) on public.disttable disttable_3 (actual rows=1 loops=1) + Output: disttable_3.temp + Data node: db_dist_hypertable_3 + Fetcher Type: COPY + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_6_chunk + Remote SQL: SELECT temp FROM public.disttable WHERE _timescaledb_functions.chunks_in(public.disttable.*, ARRAY[1, 2]) AND ((temp IS NOT NULL)) ORDER BY temp DESC NULLS FIRST LIMIT 1 + Remote EXPLAIN: + Limit (actual rows=1 loops=1) + Output: _dist_hyper_1_2_chunk.temp + -> Sort (actual rows=1 loops=1) + Output: _dist_hyper_1_2_chunk.temp + Sort Key: _dist_hyper_1_2_chunk.temp DESC + Sort Method: top-N heapsort + -> Append (actual rows=7 loops=1) + -> Seq Scan on _timescaledb_internal._dist_hyper_1_2_chunk (actual rows=3 loops=1) + Output: _dist_hyper_1_2_chunk.temp + Filter: (_dist_hyper_1_2_chunk.temp IS NOT NULL) + -> Seq Scan on _timescaledb_internal._dist_hyper_1_6_chunk (actual rows=4 loops=1) + Output: _dist_hyper_1_6_chunk.temp + Filter: (_dist_hyper_1_6_chunk.temp IS NOT NULL) + +(72 rows) + +-- The constraints, indexes, and triggers on foreign chunks. Only +-- check constraints should recurse to foreign chunks (although they +-- aren't enforced on a foreign table) +SELECT st."Child" as chunk_relid, test.show_constraints((st)."Child") +FROM test.show_subtables('disttable') st; + chunk_relid | show_constraints +---------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + _timescaledb_internal._dist_hyper_1_1_chunk | (constraint_1,c,{time},-,"((""time"" >= 'Wed Dec 28 16:00:00 2016 PST'::timestamp with time zone) AND (""time"" < 'Wed Jan 04 16:00:00 2017 PST'::timestamp with time zone))",f,f,t) + _timescaledb_internal._dist_hyper_1_1_chunk | (constraint_2,c,{device},-,"(_timescaledb_functions.get_partition_hash(device) < 715827882)",f,f,t) + _timescaledb_internal._dist_hyper_1_1_chunk | (disttable_device_check,c,{device},-,"(device > 0)",f,f,t) + _timescaledb_internal._dist_hyper_1_2_chunk | (constraint_1,c,{time},-,"((""time"" >= 'Wed Dec 28 16:00:00 2016 PST'::timestamp with time zone) AND (""time"" < 'Wed Jan 04 16:00:00 2017 PST'::timestamp with time zone))",f,f,t) + _timescaledb_internal._dist_hyper_1_2_chunk | (constraint_3,c,{device},-,"(_timescaledb_functions.get_partition_hash(device) >= 1431655764)",f,f,t) + _timescaledb_internal._dist_hyper_1_2_chunk | (disttable_device_check,c,{device},-,"(device > 0)",f,f,t) + _timescaledb_internal._dist_hyper_1_3_chunk | (constraint_1,c,{time},-,"((""time"" >= 'Wed Dec 28 16:00:00 2016 PST'::timestamp with time zone) AND (""time"" < 'Wed Jan 04 16:00:00 2017 PST'::timestamp with time zone))",f,f,t) + _timescaledb_internal._dist_hyper_1_3_chunk | (constraint_4,c,{device},-,"((_timescaledb_functions.get_partition_hash(device) >= 715827882) AND (_timescaledb_functions.get_partition_hash(device) < 1431655764))",f,f,t) + _timescaledb_internal._dist_hyper_1_3_chunk | (disttable_device_check,c,{device},-,"(device > 0)",f,f,t) + _timescaledb_internal._dist_hyper_1_4_chunk | (constraint_2,c,{device},-,"(_timescaledb_functions.get_partition_hash(device) < 715827882)",f,f,t) + _timescaledb_internal._dist_hyper_1_4_chunk | (constraint_5,c,{time},-,"((""time"" >= 'Wed Jun 27 17:00:00 2018 PDT'::timestamp with time zone) AND (""time"" < 'Wed Jul 04 17:00:00 2018 PDT'::timestamp with time zone))",f,f,t) + _timescaledb_internal._dist_hyper_1_4_chunk | (disttable_device_check,c,{device},-,"(device > 0)",f,f,t) + _timescaledb_internal._dist_hyper_1_5_chunk | (constraint_4,c,{device},-,"((_timescaledb_functions.get_partition_hash(device) >= 715827882) AND (_timescaledb_functions.get_partition_hash(device) < 1431655764))",f,f,t) + _timescaledb_internal._dist_hyper_1_5_chunk | (constraint_5,c,{time},-,"((""time"" >= 'Wed Jun 27 17:00:00 2018 PDT'::timestamp with time zone) AND (""time"" < 'Wed Jul 04 17:00:00 2018 PDT'::timestamp with time zone))",f,f,t) + _timescaledb_internal._dist_hyper_1_5_chunk | (disttable_device_check,c,{device},-,"(device > 0)",f,f,t) + _timescaledb_internal._dist_hyper_1_6_chunk | (constraint_3,c,{device},-,"(_timescaledb_functions.get_partition_hash(device) >= 1431655764)",f,f,t) + _timescaledb_internal._dist_hyper_1_6_chunk | (constraint_5,c,{time},-,"((""time"" >= 'Wed Jun 27 17:00:00 2018 PDT'::timestamp with time zone) AND (""time"" < 'Wed Jul 04 17:00:00 2018 PDT'::timestamp with time zone))",f,f,t) + _timescaledb_internal._dist_hyper_1_6_chunk | (disttable_device_check,c,{device},-,"(device > 0)",f,f,t) +(18 rows) + +SELECT st."Child" as chunk_relid, test.show_indexes((st)."Child") +FROM test.show_subtables('disttable') st; + chunk_relid | show_indexes +-------------+-------------- +(0 rows) + +SELECT st."Child" as chunk_relid, test.show_triggers((st)."Child") +FROM test.show_subtables('disttable') st; + chunk_relid | show_triggers +-------------+--------------- +(0 rows) + +-- Check that the chunks are assigned data nodes +SELECT * FROM _timescaledb_catalog.chunk_data_node ORDER BY 1,2,3; + chunk_id | node_chunk_id | node_name +----------+---------------+---------------------- + 1 | 1 | db_dist_hypertable_1 + 2 | 1 | db_dist_hypertable_3 + 3 | 1 | db_dist_hypertable_2 + 4 | 2 | db_dist_hypertable_1 + 5 | 2 | db_dist_hypertable_2 + 6 | 2 | db_dist_hypertable_3 +(6 rows) + +-- Adding a new trigger should not recurse to foreign chunks +CREATE TRIGGER _1_test_trigger_insert + AFTER INSERT ON disttable + FOR EACH ROW EXECUTE FUNCTION test_trigger(); +SELECT st."Child" as chunk_relid, test.show_triggers((st)."Child") +FROM test.show_subtables('disttable') st; + chunk_relid | show_triggers +-------------+--------------- +(0 rows) + +-- Check that we can create indexes on distributed hypertables and +-- that they don't recurse to foreign chunks +CREATE INDEX ON disttable (time, device); +SELECT * FROM test.show_indexes('disttable'); + Index | Columns | Expr | Unique | Primary | Exclusion | Tablespace +---------------------------+---------------+------+--------+---------+-----------+------------ + disttable_device_time_idx | {device,time} | | f | f | f | + disttable_pkey | {time,device} | | t | t | f | + disttable_time_device_idx | {time,device} | | f | f | f | + disttable_time_idx | {time} | | f | f | f | +(4 rows) + +SELECT st."Child" as chunk_relid, test.show_indexes((st)."Child") +FROM test.show_subtables('disttable') st; + chunk_relid | show_indexes +-------------+-------------- +(0 rows) + +-- No index mappings should exist either +SELECT * FROM _timescaledb_catalog.chunk_index; + chunk_id | index_name | hypertable_id | hypertable_index_name +----------+------------+---------------+----------------------- +(0 rows) + +-- Check that creating columns work +ALTER TABLE disttable ADD COLUMN "Color" int; +SELECT * FROM test.show_columns('disttable'); + Column | Type | NotNull +--------+--------------------------+--------- + time | timestamp with time zone | t + device | integer | t + temp | double precision | f + Color | integer | f +(4 rows) + +SELECT st."Child" as chunk_relid, test.show_columns((st)."Child") +FROM test.show_subtables('disttable') st; + chunk_relid | show_columns +---------------------------------------------+------------------------------------- + _timescaledb_internal._dist_hyper_1_1_chunk | (time,"timestamp with time zone",t) + _timescaledb_internal._dist_hyper_1_1_chunk | (device,integer,t) + _timescaledb_internal._dist_hyper_1_1_chunk | (temp,"double precision",f) + _timescaledb_internal._dist_hyper_1_1_chunk | (Color,integer,f) + _timescaledb_internal._dist_hyper_1_2_chunk | (time,"timestamp with time zone",t) + _timescaledb_internal._dist_hyper_1_2_chunk | (device,integer,t) + _timescaledb_internal._dist_hyper_1_2_chunk | (temp,"double precision",f) + _timescaledb_internal._dist_hyper_1_2_chunk | (Color,integer,f) + _timescaledb_internal._dist_hyper_1_3_chunk | (time,"timestamp with time zone",t) + _timescaledb_internal._dist_hyper_1_3_chunk | (device,integer,t) + _timescaledb_internal._dist_hyper_1_3_chunk | (temp,"double precision",f) + _timescaledb_internal._dist_hyper_1_3_chunk | (Color,integer,f) + _timescaledb_internal._dist_hyper_1_4_chunk | (time,"timestamp with time zone",t) + _timescaledb_internal._dist_hyper_1_4_chunk | (device,integer,t) + _timescaledb_internal._dist_hyper_1_4_chunk | (temp,"double precision",f) + _timescaledb_internal._dist_hyper_1_4_chunk | (Color,integer,f) + _timescaledb_internal._dist_hyper_1_5_chunk | (time,"timestamp with time zone",t) + _timescaledb_internal._dist_hyper_1_5_chunk | (device,integer,t) + _timescaledb_internal._dist_hyper_1_5_chunk | (temp,"double precision",f) + _timescaledb_internal._dist_hyper_1_5_chunk | (Color,integer,f) + _timescaledb_internal._dist_hyper_1_6_chunk | (time,"timestamp with time zone",t) + _timescaledb_internal._dist_hyper_1_6_chunk | (device,integer,t) + _timescaledb_internal._dist_hyper_1_6_chunk | (temp,"double precision",f) + _timescaledb_internal._dist_hyper_1_6_chunk | (Color,integer,f) +(24 rows) + +-- Adding a new unique constraint should not recurse to foreign +-- chunks, but a check constraint should +ALTER TABLE disttable ADD CONSTRAINT disttable_color_unique UNIQUE (time, device, "Color"); +ALTER TABLE disttable ADD CONSTRAINT disttable_temp_non_negative CHECK (temp > 0.0); +SELECT st."Child" as chunk_relid, test.show_constraints((st)."Child") +FROM test.show_subtables('disttable') st; + chunk_relid | show_constraints +---------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + _timescaledb_internal._dist_hyper_1_1_chunk | (constraint_1,c,{time},-,"((""time"" >= 'Wed Dec 28 16:00:00 2016 PST'::timestamp with time zone) AND (""time"" < 'Wed Jan 04 16:00:00 2017 PST'::timestamp with time zone))",f,f,t) + _timescaledb_internal._dist_hyper_1_1_chunk | (constraint_2,c,{device},-,"(_timescaledb_functions.get_partition_hash(device) < 715827882)",f,f,t) + _timescaledb_internal._dist_hyper_1_1_chunk | (disttable_device_check,c,{device},-,"(device > 0)",f,f,t) + _timescaledb_internal._dist_hyper_1_1_chunk | (disttable_temp_non_negative,c,{temp},-,"(temp > (0.0)::double precision)",f,f,t) + _timescaledb_internal._dist_hyper_1_2_chunk | (constraint_1,c,{time},-,"((""time"" >= 'Wed Dec 28 16:00:00 2016 PST'::timestamp with time zone) AND (""time"" < 'Wed Jan 04 16:00:00 2017 PST'::timestamp with time zone))",f,f,t) + _timescaledb_internal._dist_hyper_1_2_chunk | (constraint_3,c,{device},-,"(_timescaledb_functions.get_partition_hash(device) >= 1431655764)",f,f,t) + _timescaledb_internal._dist_hyper_1_2_chunk | (disttable_device_check,c,{device},-,"(device > 0)",f,f,t) + _timescaledb_internal._dist_hyper_1_2_chunk | (disttable_temp_non_negative,c,{temp},-,"(temp > (0.0)::double precision)",f,f,t) + _timescaledb_internal._dist_hyper_1_3_chunk | (constraint_1,c,{time},-,"((""time"" >= 'Wed Dec 28 16:00:00 2016 PST'::timestamp with time zone) AND (""time"" < 'Wed Jan 04 16:00:00 2017 PST'::timestamp with time zone))",f,f,t) + _timescaledb_internal._dist_hyper_1_3_chunk | (constraint_4,c,{device},-,"((_timescaledb_functions.get_partition_hash(device) >= 715827882) AND (_timescaledb_functions.get_partition_hash(device) < 1431655764))",f,f,t) + _timescaledb_internal._dist_hyper_1_3_chunk | (disttable_device_check,c,{device},-,"(device > 0)",f,f,t) + _timescaledb_internal._dist_hyper_1_3_chunk | (disttable_temp_non_negative,c,{temp},-,"(temp > (0.0)::double precision)",f,f,t) + _timescaledb_internal._dist_hyper_1_4_chunk | (constraint_2,c,{device},-,"(_timescaledb_functions.get_partition_hash(device) < 715827882)",f,f,t) + _timescaledb_internal._dist_hyper_1_4_chunk | (constraint_5,c,{time},-,"((""time"" >= 'Wed Jun 27 17:00:00 2018 PDT'::timestamp with time zone) AND (""time"" < 'Wed Jul 04 17:00:00 2018 PDT'::timestamp with time zone))",f,f,t) + _timescaledb_internal._dist_hyper_1_4_chunk | (disttable_device_check,c,{device},-,"(device > 0)",f,f,t) + _timescaledb_internal._dist_hyper_1_4_chunk | (disttable_temp_non_negative,c,{temp},-,"(temp > (0.0)::double precision)",f,f,t) + _timescaledb_internal._dist_hyper_1_5_chunk | (constraint_4,c,{device},-,"((_timescaledb_functions.get_partition_hash(device) >= 715827882) AND (_timescaledb_functions.get_partition_hash(device) < 1431655764))",f,f,t) + _timescaledb_internal._dist_hyper_1_5_chunk | (constraint_5,c,{time},-,"((""time"" >= 'Wed Jun 27 17:00:00 2018 PDT'::timestamp with time zone) AND (""time"" < 'Wed Jul 04 17:00:00 2018 PDT'::timestamp with time zone))",f,f,t) + _timescaledb_internal._dist_hyper_1_5_chunk | (disttable_device_check,c,{device},-,"(device > 0)",f,f,t) + _timescaledb_internal._dist_hyper_1_5_chunk | (disttable_temp_non_negative,c,{temp},-,"(temp > (0.0)::double precision)",f,f,t) + _timescaledb_internal._dist_hyper_1_6_chunk | (constraint_3,c,{device},-,"(_timescaledb_functions.get_partition_hash(device) >= 1431655764)",f,f,t) + _timescaledb_internal._dist_hyper_1_6_chunk | (constraint_5,c,{time},-,"((""time"" >= 'Wed Jun 27 17:00:00 2018 PDT'::timestamp with time zone) AND (""time"" < 'Wed Jul 04 17:00:00 2018 PDT'::timestamp with time zone))",f,f,t) + _timescaledb_internal._dist_hyper_1_6_chunk | (disttable_device_check,c,{device},-,"(device > 0)",f,f,t) + _timescaledb_internal._dist_hyper_1_6_chunk | (disttable_temp_non_negative,c,{temp},-,"(temp > (0.0)::double precision)",f,f,t) +(24 rows) + +SELECT cc.* +FROM (SELECT (_timescaledb_functions.show_chunk(show_chunks)).* + FROM show_chunks('disttable')) c, + _timescaledb_catalog.chunk_constraint cc +WHERE c.chunk_id = cc.chunk_id; + chunk_id | dimension_slice_id | constraint_name | hypertable_constraint_name +----------+--------------------+-----------------+---------------------------- + 1 | 2 | constraint_2 | + 1 | 1 | constraint_1 | + 2 | 3 | constraint_3 | + 2 | 1 | constraint_1 | + 3 | 4 | constraint_4 | + 3 | 1 | constraint_1 | + 4 | 2 | constraint_2 | + 4 | 5 | constraint_5 | + 5 | 4 | constraint_4 | + 5 | 5 | constraint_5 | + 6 | 3 | constraint_3 | + 6 | 5 | constraint_5 | +(12 rows) + +-- Show contents after re-adding column +SELECT * FROM disttable; + time | device | temp | Color +------------------------------+--------+------+------- + Sun Jan 01 06:01:00 2017 PST | 1 | 1.1 | + Sun Jan 01 08:01:00 2017 PST | 1 | 1.2 | + Sun Jan 01 06:05:00 2017 PST | 1 | 1.4 | + Mon Jul 02 08:01:00 2018 PDT | 87 | 1.6 | + Mon Jul 02 09:01:00 2018 PDT | 87 | 1.4 | + Mon Jul 02 09:21:00 2018 PDT | 87 | 1.8 | + Mon Jan 02 08:01:00 2017 PST | 2 | 1.3 | + Mon Jan 02 09:01:00 2017 PST | 2 | 1.4 | + Mon Jan 02 08:21:00 2017 PST | 2 | 1.5 | + Sun Jul 01 06:01:00 2018 PDT | 13 | 1.4 | + Sun Jul 01 06:21:00 2018 PDT | 13 | 1.5 | + Sun Jul 01 07:01:00 2018 PDT | 13 | 1.4 | + Sun Jan 01 09:11:00 2017 PST | 3 | 2.1 | + Sun Jan 01 09:21:00 2017 PST | 3 | 2.2 | + Sun Jan 01 08:11:00 2017 PST | 3 | 2.3 | + Sun Jul 01 09:11:00 2018 PDT | 90 | 2.7 | + Sun Jul 01 08:01:00 2018 PDT | 29 | 1.5 | + Sun Jul 01 09:21:00 2018 PDT | 90 | 2.8 | + Sun Jul 01 08:21:00 2018 PDT | 29 | 1.2 | +(19 rows) + +-- Test INSERTS with RETURNING. Since we previously dropped a column +-- on the hypertable, this also tests that we handle conversion of the +-- attribute numbers in the RETURNING clause, since they now differ +-- between the hypertable root relation and the chunk currently +-- RETURNING from. +INSERT INTO disttable (time, device, "Color", temp) +VALUES ('2017-09-02 06:09', 4, 1, 9.8) +RETURNING time, "Color", temp; + time | Color | temp +------------------------------+-------+------ + Sat Sep 02 06:09:00 2017 PDT | 1 | 9.8 +(1 row) + +INSERT INTO disttable (time, device, "Color", temp) +VALUES ('2017-09-03 06:18', 9, 3, 8.7) +RETURNING 1; + ?column? +---------- + 1 +(1 row) + +-- On conflict +INSERT INTO disttable (time, device, "Color", temp) +VALUES ('2017-09-02 06:09', 6, 2, 10.5) +ON CONFLICT DO NOTHING; +SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ +SELECT (_timescaledb_functions.show_chunk(show_chunks)).* +FROM show_chunks('disttable'); +$$); +NOTICE: [db_dist_hypertable_1]: +SELECT (_timescaledb_functions.show_chunk(show_chunks)).* +FROM show_chunks('disttable') +NOTICE: [db_dist_hypertable_1]: +chunk_id|hypertable_id|schema_name |table_name |relkind|slices +--------+-------------+---------------------+---------------------+-------+------------------------------------------------------------------------------------------- + 1| 1|_timescaledb_internal|_dist_hyper_1_1_chunk|r |{"time": [1482969600000000, 1483574400000000], "device": [-9223372036854775808, 715827882]} + 2| 1|_timescaledb_internal|_dist_hyper_1_4_chunk|r |{"time": [1530144000000000, 1530748800000000], "device": [-9223372036854775808, 715827882]} + 3| 1|_timescaledb_internal|_dist_hyper_1_9_chunk|r |{"time": [1504137600000000, 1504742400000000], "device": [-9223372036854775808, 715827882]} +(3 rows) + + +NOTICE: [db_dist_hypertable_2]: +SELECT (_timescaledb_functions.show_chunk(show_chunks)).* +FROM show_chunks('disttable') +NOTICE: [db_dist_hypertable_2]: +chunk_id|hypertable_id|schema_name |table_name |relkind|slices +--------+-------------+---------------------+---------------------+-------+--------------------------------------------------------------------------------- + 1| 1|_timescaledb_internal|_dist_hyper_1_3_chunk|r |{"time": [1482969600000000, 1483574400000000], "device": [715827882, 1431655764]} + 2| 1|_timescaledb_internal|_dist_hyper_1_5_chunk|r |{"time": [1530144000000000, 1530748800000000], "device": [715827882, 1431655764]} + 3| 1|_timescaledb_internal|_dist_hyper_1_7_chunk|r |{"time": [1504137600000000, 1504742400000000], "device": [715827882, 1431655764]} +(3 rows) + + +NOTICE: [db_dist_hypertable_3]: +SELECT (_timescaledb_functions.show_chunk(show_chunks)).* +FROM show_chunks('disttable') +NOTICE: [db_dist_hypertable_3]: +chunk_id|hypertable_id|schema_name |table_name |relkind|slices +--------+-------------+---------------------+---------------------+-------+------------------------------------------------------------------------------------------- + 1| 1|_timescaledb_internal|_dist_hyper_1_2_chunk|r |{"time": [1482969600000000, 1483574400000000], "device": [1431655764, 9223372036854775807]} + 2| 1|_timescaledb_internal|_dist_hyper_1_6_chunk|r |{"time": [1530144000000000, 1530748800000000], "device": [1431655764, 9223372036854775807]} + 3| 1|_timescaledb_internal|_dist_hyper_1_8_chunk|r |{"time": [1504137600000000, 1504742400000000], "device": [1431655764, 9223372036854775807]} +(3 rows) + + + remote_exec +------------- + +(1 row) + +-- Show new row and that conflicting row is not inserted +SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ +SELECT * FROM disttable; +$$); +NOTICE: [db_dist_hypertable_1]: +SELECT * FROM disttable +NOTICE: [db_dist_hypertable_1]: +time |device|temp|Color +----------------------------+------+----+----- +Sun Jan 01 06:01:00 2017 PST| 1| 1.1| +Sun Jan 01 08:01:00 2017 PST| 1| 1.2| +Sun Jan 01 06:05:00 2017 PST| 1| 1.4| +Mon Jul 02 08:01:00 2018 PDT| 87| 1.6| +Mon Jul 02 09:01:00 2018 PDT| 87| 1.4| +Mon Jul 02 09:21:00 2018 PDT| 87| 1.8| +Sat Sep 02 06:09:00 2017 PDT| 6|10.5| 2 +(7 rows) + + +NOTICE: [db_dist_hypertable_2]: +SELECT * FROM disttable +NOTICE: [db_dist_hypertable_2]: +time |device|temp|Color +----------------------------+------+----+----- +Mon Jan 02 08:01:00 2017 PST| 2| 1.3| +Mon Jan 02 09:01:00 2017 PST| 2| 1.4| +Mon Jan 02 08:21:00 2017 PST| 2| 1.5| +Sun Jul 01 06:01:00 2018 PDT| 13| 1.4| +Sun Jul 01 06:21:00 2018 PDT| 13| 1.5| +Sun Jul 01 07:01:00 2018 PDT| 13| 1.4| +Sat Sep 02 06:09:00 2017 PDT| 4| 9.8| 1 +(7 rows) + + +NOTICE: [db_dist_hypertable_3]: +SELECT * FROM disttable +NOTICE: [db_dist_hypertable_3]: +time |device|temp|Color +----------------------------+------+----+----- +Sun Jan 01 09:11:00 2017 PST| 3| 2.1| +Sun Jan 01 09:21:00 2017 PST| 3| 2.2| +Sun Jan 01 08:11:00 2017 PST| 3| 2.3| +Sun Jul 01 09:11:00 2018 PDT| 90| 2.7| +Sun Jul 01 08:01:00 2018 PDT| 29| 1.5| +Sun Jul 01 09:21:00 2018 PDT| 90| 2.8| +Sun Jul 01 08:21:00 2018 PDT| 29| 1.2| +Sun Sep 03 06:18:00 2017 PDT| 9| 8.7| 3 +(8 rows) + + + remote_exec +------------- + +(1 row) + +\set ON_ERROR_STOP 0 +-- ON CONFLICT DO NOTHING only works when index inference is omitted +\set VERBOSITY default +INSERT INTO disttable +VALUES ('2017-09-02 06:09', 6) +ON CONFLICT(time,device) DO NOTHING; +ERROR: could not find arbiter index for hypertable index "disttable_pkey" on chunk "_dist_hyper_1_9_chunk" +HINT: Omit the index inference specification for the distributed hypertable in the ON CONFLICT clause. +INSERT INTO disttable +VALUES ('2017-09-02 06:09', 6) +ON CONFLICT(time,device,"Color") DO NOTHING; +ERROR: could not find arbiter index for hypertable index "disttable_color_unique" on chunk "_dist_hyper_1_9_chunk" +HINT: Omit the index inference specification for the distributed hypertable in the ON CONFLICT clause. +INSERT INTO disttable +VALUES ('2017-09-02 06:09', 6) +ON CONFLICT ON CONSTRAINT disttable_color_unique DO NOTHING; +ERROR: could not find arbiter index for hypertable index "disttable_color_unique" on chunk "_dist_hyper_1_9_chunk" +HINT: Omit the index inference specification for the distributed hypertable in the ON CONFLICT clause. +\set VERBOSITY terse +SELECT * FROM disttable ORDER BY disttable; + time | device | temp | Color +------------------------------+--------+------+------- + Sun Jan 01 06:01:00 2017 PST | 1 | 1.1 | + Sun Jan 01 06:05:00 2017 PST | 1 | 1.4 | + Sun Jan 01 08:01:00 2017 PST | 1 | 1.2 | + Sun Jan 01 08:11:00 2017 PST | 3 | 2.3 | + Sun Jan 01 09:11:00 2017 PST | 3 | 2.1 | + Sun Jan 01 09:21:00 2017 PST | 3 | 2.2 | + Mon Jan 02 08:01:00 2017 PST | 2 | 1.3 | + Mon Jan 02 08:21:00 2017 PST | 2 | 1.5 | + Mon Jan 02 09:01:00 2017 PST | 2 | 1.4 | + Sat Sep 02 06:09:00 2017 PDT | 4 | 9.8 | 1 + Sat Sep 02 06:09:00 2017 PDT | 6 | 10.5 | 2 + Sun Sep 03 06:18:00 2017 PDT | 9 | 8.7 | 3 + Sun Jul 01 06:01:00 2018 PDT | 13 | 1.4 | + Sun Jul 01 06:21:00 2018 PDT | 13 | 1.5 | + Sun Jul 01 07:01:00 2018 PDT | 13 | 1.4 | + Sun Jul 01 08:01:00 2018 PDT | 29 | 1.5 | + Sun Jul 01 08:21:00 2018 PDT | 29 | 1.2 | + Sun Jul 01 09:11:00 2018 PDT | 90 | 2.7 | + Sun Jul 01 09:21:00 2018 PDT | 90 | 2.8 | + Mon Jul 02 08:01:00 2018 PDT | 87 | 1.6 | + Mon Jul 02 09:01:00 2018 PDT | 87 | 1.4 | + Mon Jul 02 09:21:00 2018 PDT | 87 | 1.8 | +(22 rows) + +-- ON CONFLICT only works with DO NOTHING for now +INSERT INTO disttable (time, device, "Color", temp) +VALUES ('2017-09-09 08:13', 7, 3, 27.5) +ON CONFLICT (time) DO UPDATE SET temp = 3.2; +ERROR: ON CONFLICT DO UPDATE not supported on distributed hypertables +-- Test that an INSERT that would create a chunk does not work on a +-- data node +SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1'], $$ + INSERT INTO disttable VALUES ('2019-01-02 12:34', 1, 2, 9.3) +$$); +NOTICE: [db_dist_hypertable_1]: + INSERT INTO disttable VALUES ('2019-01-02 12:34', 1, 2, 9.3) + +ERROR: [db_dist_hypertable_1]: distributed hypertable member cannot create chunk on its own +\set ON_ERROR_STOP 1 +-- However, INSERTs on a data node that does not create a chunk works. +SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1'], $$ + INSERT INTO disttable VALUES ('2017-09-03 06:09', 1, 2, 9.3) +$$); +NOTICE: [db_dist_hypertable_1]: + INSERT INTO disttable VALUES ('2017-09-03 06:09', 1, 2, 9.3) + + remote_exec +------------- + +(1 row) + +-- Test updates +UPDATE disttable SET "Color" = 4 WHERE "Color" = 3; +SELECT * FROM disttable; + time | device | temp | Color +------------------------------+--------+------+------- + Sun Jan 01 06:01:00 2017 PST | 1 | 1.1 | + Sun Jan 01 08:01:00 2017 PST | 1 | 1.2 | + Sun Jan 01 06:05:00 2017 PST | 1 | 1.4 | + Mon Jul 02 08:01:00 2018 PDT | 87 | 1.6 | + Mon Jul 02 09:01:00 2018 PDT | 87 | 1.4 | + Mon Jul 02 09:21:00 2018 PDT | 87 | 1.8 | + Sat Sep 02 06:09:00 2017 PDT | 6 | 10.5 | 2 + Sun Sep 03 06:09:00 2017 PDT | 1 | 2 | 9 + Mon Jan 02 08:01:00 2017 PST | 2 | 1.3 | + Mon Jan 02 09:01:00 2017 PST | 2 | 1.4 | + Mon Jan 02 08:21:00 2017 PST | 2 | 1.5 | + Sun Jul 01 06:01:00 2018 PDT | 13 | 1.4 | + Sun Jul 01 06:21:00 2018 PDT | 13 | 1.5 | + Sun Jul 01 07:01:00 2018 PDT | 13 | 1.4 | + Sat Sep 02 06:09:00 2017 PDT | 4 | 9.8 | 1 + Sun Jan 01 09:11:00 2017 PST | 3 | 2.1 | + Sun Jan 01 09:21:00 2017 PST | 3 | 2.2 | + Sun Jan 01 08:11:00 2017 PST | 3 | 2.3 | + Sun Jul 01 09:11:00 2018 PDT | 90 | 2.7 | + Sun Jul 01 08:01:00 2018 PDT | 29 | 1.5 | + Sun Jul 01 09:21:00 2018 PDT | 90 | 2.8 | + Sun Jul 01 08:21:00 2018 PDT | 29 | 1.2 | + Sun Sep 03 06:18:00 2017 PDT | 9 | 8.7 | 4 +(23 rows) + +WITH devices AS ( + SELECT DISTINCT device FROM disttable ORDER BY device +) +UPDATE disttable SET "Color" = 2 WHERE device = (SELECT device FROM devices LIMIT 1); +\set ON_ERROR_STOP 0 +-- Updates referencing non-existing column +UPDATE disttable SET device = 4 WHERE no_such_column = 2; +ERROR: column "no_such_column" does not exist at character 39 +UPDATE disttable SET no_such_column = 4 WHERE device = 2; +ERROR: column "no_such_column" of relation "disttable" does not exist at character 22 +-- Update to system column +UPDATE disttable SET tableoid = 4 WHERE device = 2; +ERROR: cannot assign to system column "tableoid" at character 22 +\set ON_ERROR_STOP 1 +-- Test deletes (no rows deleted) +DELETE FROM disttable WHERE device = 3 +RETURNING *; + time | device | temp | Color +------------------------------+--------+------+------- + Sun Jan 01 09:11:00 2017 PST | 3 | 2.1 | + Sun Jan 01 09:21:00 2017 PST | 3 | 2.2 | + Sun Jan 01 08:11:00 2017 PST | 3 | 2.3 | +(3 rows) + +DELETE FROM disttable WHERE time IS NULL; +-- Test deletes (rows deleted) +DELETE FROM disttable WHERE device = 4 +RETURNING *; + time | device | temp | Color +------------------------------+--------+------+------- + Sat Sep 02 06:09:00 2017 PDT | 4 | 9.8 | 1 +(1 row) + +-- Query to show that rows are deleted +SELECT * FROM disttable; + time | device | temp | Color +------------------------------+--------+------+------- + Sun Jan 01 06:01:00 2017 PST | 1 | 1.1 | 2 + Sun Jan 01 08:01:00 2017 PST | 1 | 1.2 | 2 + Sun Jan 01 06:05:00 2017 PST | 1 | 1.4 | 2 + Mon Jul 02 08:01:00 2018 PDT | 87 | 1.6 | + Mon Jul 02 09:01:00 2018 PDT | 87 | 1.4 | + Mon Jul 02 09:21:00 2018 PDT | 87 | 1.8 | + Sat Sep 02 06:09:00 2017 PDT | 6 | 10.5 | 2 + Sun Sep 03 06:09:00 2017 PDT | 1 | 2 | 2 + Mon Jan 02 08:01:00 2017 PST | 2 | 1.3 | + Mon Jan 02 09:01:00 2017 PST | 2 | 1.4 | + Mon Jan 02 08:21:00 2017 PST | 2 | 1.5 | + Sun Jul 01 06:01:00 2018 PDT | 13 | 1.4 | + Sun Jul 01 06:21:00 2018 PDT | 13 | 1.5 | + Sun Jul 01 07:01:00 2018 PDT | 13 | 1.4 | + Sun Jul 01 09:11:00 2018 PDT | 90 | 2.7 | + Sun Jul 01 08:01:00 2018 PDT | 29 | 1.5 | + Sun Jul 01 09:21:00 2018 PDT | 90 | 2.8 | + Sun Jul 01 08:21:00 2018 PDT | 29 | 1.2 | + Sun Sep 03 06:18:00 2017 PDT | 9 | 8.7 | 4 +(19 rows) + +-- Ensure rows are deleted on the data nodes +SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ +SELECT * FROM disttable; +$$); +NOTICE: [db_dist_hypertable_1]: +SELECT * FROM disttable +NOTICE: [db_dist_hypertable_1]: +time |device|temp|Color +----------------------------+------+----+----- +Sun Jan 01 06:01:00 2017 PST| 1| 1.1| 2 +Sun Jan 01 08:01:00 2017 PST| 1| 1.2| 2 +Sun Jan 01 06:05:00 2017 PST| 1| 1.4| 2 +Mon Jul 02 08:01:00 2018 PDT| 87| 1.6| +Mon Jul 02 09:01:00 2018 PDT| 87| 1.4| +Mon Jul 02 09:21:00 2018 PDT| 87| 1.8| +Sat Sep 02 06:09:00 2017 PDT| 6|10.5| 2 +Sun Sep 03 06:09:00 2017 PDT| 1| 2| 2 +(8 rows) + + +NOTICE: [db_dist_hypertable_2]: +SELECT * FROM disttable +NOTICE: [db_dist_hypertable_2]: +time |device|temp|Color +----------------------------+------+----+----- +Mon Jan 02 08:01:00 2017 PST| 2| 1.3| +Mon Jan 02 09:01:00 2017 PST| 2| 1.4| +Mon Jan 02 08:21:00 2017 PST| 2| 1.5| +Sun Jul 01 06:01:00 2018 PDT| 13| 1.4| +Sun Jul 01 06:21:00 2018 PDT| 13| 1.5| +Sun Jul 01 07:01:00 2018 PDT| 13| 1.4| +(6 rows) + + +NOTICE: [db_dist_hypertable_3]: +SELECT * FROM disttable +NOTICE: [db_dist_hypertable_3]: +time |device|temp|Color +----------------------------+------+----+----- +Sun Jul 01 09:11:00 2018 PDT| 90| 2.7| +Sun Jul 01 08:01:00 2018 PDT| 29| 1.5| +Sun Jul 01 09:21:00 2018 PDT| 90| 2.8| +Sun Jul 01 08:21:00 2018 PDT| 29| 1.2| +Sun Sep 03 06:18:00 2017 PDT| 9| 8.7| 4 +(5 rows) + + + remote_exec +------------- + +(1 row) + +-- Test TRUNCATE +TRUNCATE disttable; +-- No data should remain +SELECT * FROM disttable; + time | device | temp | Color +------+--------+------+------- +(0 rows) + +-- Metadata and tables cleaned up +SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk; + id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk +----+---------------+-------------+------------+---------------------+---------+--------+----------- +(0 rows) + +SELECT * FROM show_chunks('disttable'); + show_chunks +------------- +(0 rows) + +-- Also cleaned up remotely +SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ +SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk; +SELECT * FROM show_chunks('disttable'); +SELECT * FROM disttable; +$$); +NOTICE: [db_dist_hypertable_1]: +SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk +NOTICE: [db_dist_hypertable_1]: +id|hypertable_id|schema_name|table_name|compressed_chunk_id|dropped|status|osm_chunk +--+-------------+-----------+----------+-------------------+-------+------+--------- +(0 rows) + + +NOTICE: [db_dist_hypertable_1]: +SELECT * FROM show_chunks('disttable') +NOTICE: [db_dist_hypertable_1]: +show_chunks +----------- +(0 rows) + + +NOTICE: [db_dist_hypertable_1]: +SELECT * FROM disttable +NOTICE: [db_dist_hypertable_1]: +time|device|temp|Color +----+------+----+----- +(0 rows) + + +NOTICE: [db_dist_hypertable_2]: +SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk +NOTICE: [db_dist_hypertable_2]: +id|hypertable_id|schema_name|table_name|compressed_chunk_id|dropped|status|osm_chunk +--+-------------+-----------+----------+-------------------+-------+------+--------- +(0 rows) + + +NOTICE: [db_dist_hypertable_2]: +SELECT * FROM show_chunks('disttable') +NOTICE: [db_dist_hypertable_2]: +show_chunks +----------- +(0 rows) + + +NOTICE: [db_dist_hypertable_2]: +SELECT * FROM disttable +NOTICE: [db_dist_hypertable_2]: +time|device|temp|Color +----+------+----+----- +(0 rows) + + +NOTICE: [db_dist_hypertable_3]: +SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk +NOTICE: [db_dist_hypertable_3]: +id|hypertable_id|schema_name|table_name|compressed_chunk_id|dropped|status|osm_chunk +--+-------------+-----------+----------+-------------------+-------+------+--------- +(0 rows) + + +NOTICE: [db_dist_hypertable_3]: +SELECT * FROM show_chunks('disttable') +NOTICE: [db_dist_hypertable_3]: +show_chunks +----------- +(0 rows) + + +NOTICE: [db_dist_hypertable_3]: +SELECT * FROM disttable +NOTICE: [db_dist_hypertable_3]: +time|device|temp|Color +----+------+----+----- +(0 rows) + + + remote_exec +------------- + +(1 row) + +-- The hypertable view also shows no chunks and no data +SELECT * FROM timescaledb_information.hypertables +ORDER BY hypertable_schema, hypertable_name; + hypertable_schema | hypertable_name | owner | num_dimensions | num_chunks | compression_enabled | is_distributed | replication_factor | data_nodes | tablespaces +-------------------+-----------------+-------------+----------------+------------+---------------------+----------------+--------------------+------------------------------------------------------------------+------------- + public | disttable | test_role_1 | 2 | 0 | f | t | 1 | {db_dist_hypertable_1,db_dist_hypertable_2,db_dist_hypertable_3} | + public | underreplicated | test_role_1 | 1 | 0 | f | t | 4 | {db_dist_hypertable_1,db_dist_hypertable_2,db_dist_hypertable_3} | +(2 rows) + +-- Test underreplicated chunk warning +INSERT INTO underreplicated VALUES ('2017-01-01 06:01', 1, 1.1), + ('2017-01-02 07:01', 2, 3.5); +WARNING: insufficient number of data nodes +SELECT * FROM _timescaledb_catalog.chunk_data_node ORDER BY 1,2,3; + chunk_id | node_chunk_id | node_name +----------+---------------+---------------------- + 10 | 4 | db_dist_hypertable_1 + 10 | 4 | db_dist_hypertable_2 + 10 | 4 | db_dist_hypertable_3 +(3 rows) + +SELECT (_timescaledb_functions.show_chunk(show_chunks)).* +FROM show_chunks('underreplicated'); + chunk_id | hypertable_id | schema_name | table_name | relkind | slices +----------+---------------+-----------------------+------------------------+---------+------------------------------------------------ + 10 | 2 | _timescaledb_internal | _dist_hyper_2_10_chunk | f | {"time": [1482969600000000, 1483574400000000]} +(1 row) + +-- Show chunk data node mappings +SELECT * FROM _timescaledb_catalog.chunk_data_node ORDER BY 1,2,3; + chunk_id | node_chunk_id | node_name +----------+---------------+---------------------- + 10 | 4 | db_dist_hypertable_1 + 10 | 4 | db_dist_hypertable_2 + 10 | 4 | db_dist_hypertable_3 +(3 rows) + +-- Show that chunks are created on remote data nodes and that all +-- data nodes/chunks have the same data due to replication +SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ +SELECT (_timescaledb_functions.show_chunk(show_chunks)).* +FROM show_chunks('underreplicated'); +$$); +NOTICE: [db_dist_hypertable_1]: +SELECT (_timescaledb_functions.show_chunk(show_chunks)).* +FROM show_chunks('underreplicated') +NOTICE: [db_dist_hypertable_1]: +chunk_id|hypertable_id|schema_name |table_name |relkind|slices +--------+-------------+---------------------+----------------------+-------+---------------------------------------------- + 4| 2|_timescaledb_internal|_dist_hyper_2_10_chunk|r |{"time": [1482969600000000, 1483574400000000]} +(1 row) + + +NOTICE: [db_dist_hypertable_2]: +SELECT (_timescaledb_functions.show_chunk(show_chunks)).* +FROM show_chunks('underreplicated') +NOTICE: [db_dist_hypertable_2]: +chunk_id|hypertable_id|schema_name |table_name |relkind|slices +--------+-------------+---------------------+----------------------+-------+---------------------------------------------- + 4| 2|_timescaledb_internal|_dist_hyper_2_10_chunk|r |{"time": [1482969600000000, 1483574400000000]} +(1 row) + + +NOTICE: [db_dist_hypertable_3]: +SELECT (_timescaledb_functions.show_chunk(show_chunks)).* +FROM show_chunks('underreplicated') +NOTICE: [db_dist_hypertable_3]: +chunk_id|hypertable_id|schema_name |table_name |relkind|slices +--------+-------------+---------------------+----------------------+-------+---------------------------------------------- + 4| 2|_timescaledb_internal|_dist_hyper_2_10_chunk|r |{"time": [1482969600000000, 1483574400000000]} +(1 row) + + + remote_exec +------------- + +(1 row) + +SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ +SELECT * FROM underreplicated; +$$); +NOTICE: [db_dist_hypertable_1]: +SELECT * FROM underreplicated +NOTICE: [db_dist_hypertable_1]: +time |device|temp +----------------------------+------+---- +Sun Jan 01 06:01:00 2017 PST| 1| 1.1 +Mon Jan 02 07:01:00 2017 PST| 2| 3.5 +(2 rows) + + +NOTICE: [db_dist_hypertable_2]: +SELECT * FROM underreplicated +NOTICE: [db_dist_hypertable_2]: +time |device|temp +----------------------------+------+---- +Sun Jan 01 06:01:00 2017 PST| 1| 1.1 +Mon Jan 02 07:01:00 2017 PST| 2| 3.5 +(2 rows) + + +NOTICE: [db_dist_hypertable_3]: +SELECT * FROM underreplicated +NOTICE: [db_dist_hypertable_3]: +time |device|temp +----------------------------+------+---- +Sun Jan 01 06:01:00 2017 PST| 1| 1.1 +Mon Jan 02 07:01:00 2017 PST| 2| 3.5 +(2 rows) + + + remote_exec +------------- + +(1 row) + +-- Test updates +UPDATE underreplicated SET temp = 2.0 WHERE device = 2 +RETURNING time, temp, device; + time | temp | device +------------------------------+------+-------- + Mon Jan 02 07:01:00 2017 PST | 2 | 2 +(1 row) + +SELECT * FROM underreplicated; + time | device | temp +------------------------------+--------+------ + Sun Jan 01 06:01:00 2017 PST | 1 | 1.1 + Mon Jan 02 07:01:00 2017 PST | 2 | 2 +(2 rows) + +-- Show that all replica chunks are updated +SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ +SELECT * FROM underreplicated; +$$); +NOTICE: [db_dist_hypertable_1]: +SELECT * FROM underreplicated +NOTICE: [db_dist_hypertable_1]: +time |device|temp +----------------------------+------+---- +Sun Jan 01 06:01:00 2017 PST| 1| 1.1 +Mon Jan 02 07:01:00 2017 PST| 2| 2 +(2 rows) + + +NOTICE: [db_dist_hypertable_2]: +SELECT * FROM underreplicated +NOTICE: [db_dist_hypertable_2]: +time |device|temp +----------------------------+------+---- +Sun Jan 01 06:01:00 2017 PST| 1| 1.1 +Mon Jan 02 07:01:00 2017 PST| 2| 2 +(2 rows) + + +NOTICE: [db_dist_hypertable_3]: +SELECT * FROM underreplicated +NOTICE: [db_dist_hypertable_3]: +time |device|temp +----------------------------+------+---- +Sun Jan 01 06:01:00 2017 PST| 1| 1.1 +Mon Jan 02 07:01:00 2017 PST| 2| 2 +(2 rows) + + + remote_exec +------------- + +(1 row) + +DELETE FROM underreplicated WHERE device = 2 +RETURNING *; + time | device | temp +------------------------------+--------+------ + Mon Jan 02 07:01:00 2017 PST | 2 | 2 +(1 row) + +-- Ensure deletes across all data nodes +SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ +SELECT * FROM underreplicated; +$$); +NOTICE: [db_dist_hypertable_1]: +SELECT * FROM underreplicated +NOTICE: [db_dist_hypertable_1]: +time |device|temp +----------------------------+------+---- +Sun Jan 01 06:01:00 2017 PST| 1| 1.1 +(1 row) + + +NOTICE: [db_dist_hypertable_2]: +SELECT * FROM underreplicated +NOTICE: [db_dist_hypertable_2]: +time |device|temp +----------------------------+------+---- +Sun Jan 01 06:01:00 2017 PST| 1| 1.1 +(1 row) + + +NOTICE: [db_dist_hypertable_3]: +SELECT * FROM underreplicated +NOTICE: [db_dist_hypertable_3]: +time |device|temp +----------------------------+------+---- +Sun Jan 01 06:01:00 2017 PST| 1| 1.1 +(1 row) + + + remote_exec +------------- + +(1 row) + +-- Test hypertable creation fails on distributed error +SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_3'], $$ +CREATE TABLE remotetable(time timestamptz PRIMARY KEY, id int, cost float); +SELECT * FROM underreplicated; +$$); +NOTICE: [db_dist_hypertable_3]: +CREATE TABLE remotetable(time timestamptz PRIMARY KEY, id int, cost float) +NOTICE: [db_dist_hypertable_3]: +SELECT * FROM underreplicated +NOTICE: [db_dist_hypertable_3]: +time |device|temp +----------------------------+------+---- +Sun Jan 01 06:01:00 2017 PST| 1| 1.1 +(1 row) + + + remote_exec +------------- + +(1 row) + +\set ON_ERROR_STOP 0 +CREATE TABLE remotetable(time timestamptz PRIMARY KEY, device int CHECK (device > 0), color int, temp float); +SELECT * FROM create_hypertable('remotetable', 'time', replication_factor => 1); +ERROR: [db_dist_hypertable_3]: relation "remotetable" already exists +-- Test distributed_hypertable creation fails with replication factor 0 +CREATE TABLE remotetable2(time timestamptz PRIMARY KEY, device int CHECK (device > 0), color int, temp float); +SELECT * FROM create_distributed_hypertable('remotetable2', 'time', replication_factor => 0); +ERROR: invalid replication factor +\set ON_ERROR_STOP 1 +SELECT * FROM timescaledb_information.hypertables +ORDER BY hypertable_schema, hypertable_name; + hypertable_schema | hypertable_name | owner | num_dimensions | num_chunks | compression_enabled | is_distributed | replication_factor | data_nodes | tablespaces +-------------------+-----------------+-------------+----------------+------------+---------------------+----------------+--------------------+------------------------------------------------------------------+------------- + public | disttable | test_role_1 | 2 | 0 | f | t | 1 | {db_dist_hypertable_1,db_dist_hypertable_2,db_dist_hypertable_3} | + public | underreplicated | test_role_1 | 1 | 1 | f | t | 4 | {db_dist_hypertable_1,db_dist_hypertable_2,db_dist_hypertable_3} | +(2 rows) + +-- Test distributed hypertable creation with many parameters +\c :TEST_DBNAME :ROLE_SUPERUSER +--Ensure INSERTs use DataNodeDispatch. +SET timescaledb.enable_distributed_insert_with_copy=false; +CREATE SCHEMA "T3sTSch"; +CREATE SCHEMA "Table\\Schema"; +CREATE SCHEMA "single'schema"; +GRANT ALL ON SCHEMA "T3sTSch" TO :ROLE_1; +GRANT ALL ON SCHEMA "Table\\Schema" TO :ROLE_1; +GRANT ALL ON SCHEMA "single'schema" TO :ROLE_1; +SET ROLE :ROLE_1; +CREATE TABLE "Table\\Schema"."Param_Table"("time Col %#^#@$#" timestamptz, __region text, reading float); +SELECT * FROM create_distributed_hypertable('"Table\\Schema"."Param_Table"', 'time Col %#^#@$#', partitioning_column => '__region', +associated_schema_name => 'T3sTSch', associated_table_prefix => 'test*pre_', chunk_time_interval => interval '1 week', +create_default_indexes => FALSE, if_not_exists => TRUE, replication_factor => 2, +data_nodes => ARRAY[:'DATA_NODE_2', :'DATA_NODE_3']); +NOTICE: adding not-null constraint to column "time Col %#^#@$#" + hypertable_id | schema_name | table_name | created +---------------+---------------+-------------+--------- + 4 | Table\\Schema | Param_Table | t +(1 row) + +-- Test detach and attach data node +SELECT * FROM detach_data_node(:'DATA_NODE_2', '"Table\\Schema"."Param_Table"', force => true, drop_remote_data => true); +WARNING: insufficient number of data nodes for distributed hypertable "Param_Table" +NOTICE: the number of partitions in dimension "__region" of hypertable "Param_Table" was decreased to 1 + detach_data_node +------------------ + 1 +(1 row) + +-- Test attach_data_node. First show dimensions and currently attached +-- servers. The number of slices in the space dimension should equal +-- the number of servers since we didn't explicitly specify +-- number_partitions +SELECT h.table_name, d.column_name, d.num_slices +FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d +WHERE h.id = d.hypertable_id +AND h.table_name = 'Param_Table' +ORDER BY 1, 2, 3; + table_name | column_name | num_slices +-------------+------------------+------------ + Param_Table | __region | 1 + Param_Table | time Col %#^#@$# | +(2 rows) + +SELECT h.table_name, hdn.node_name +FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.hypertable_data_node hdn +WHERE h.id = hdn.hypertable_id +AND h.table_name = 'Param_Table' +ORDER BY 1, 2; + table_name | node_name +-------------+---------------------- + Param_Table | db_dist_hypertable_3 +(1 row) + +SELECT * FROM attach_data_node(:'DATA_NODE_1', '"Table\\Schema"."Param_Table"'); +NOTICE: the number of partitions in dimension "__region" was increased to 2 + hypertable_id | node_hypertable_id | node_name +---------------+--------------------+---------------------- + 4 | 3 | db_dist_hypertable_1 +(1 row) + +-- Show updated metadata after attach +SELECT h.table_name, d.column_name, d.num_slices +FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d +WHERE h.id = d.hypertable_id +AND h.table_name = 'Param_Table' +ORDER BY 1, 2, 3; + table_name | column_name | num_slices +-------------+------------------+------------ + Param_Table | __region | 2 + Param_Table | time Col %#^#@$# | +(2 rows) + +SELECT h.table_name, hdn.node_name +FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.hypertable_data_node hdn +WHERE h.id = hdn.hypertable_id +AND h.table_name = 'Param_Table' +ORDER BY 1, 2; + table_name | node_name +-------------+---------------------- + Param_Table | db_dist_hypertable_1 + Param_Table | db_dist_hypertable_3 +(2 rows) + +-- Attach another data node but do not auto-repartition, i.e., +-- increase the number of slices. +SELECT * FROM attach_data_node(:'DATA_NODE_2', '"Table\\Schema"."Param_Table"', repartition => false); +WARNING: insufficient number of partitions for dimension "__region" + hypertable_id | node_hypertable_id | node_name +---------------+--------------------+---------------------- + 4 | 4 | db_dist_hypertable_2 +(1 row) + +-- Number of slices should not be increased +SELECT h.table_name, d.column_name, d.num_slices +FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d +WHERE h.id = d.hypertable_id +AND h.table_name = 'Param_Table' +ORDER BY 1, 2, 3; + table_name | column_name | num_slices +-------------+------------------+------------ + Param_Table | __region | 2 + Param_Table | time Col %#^#@$# | +(2 rows) + +-- Manually increase the number of partitions +SELECT * FROM set_number_partitions('"Table\\Schema"."Param_Table"', 4); + set_number_partitions +----------------------- + +(1 row) + +-- Verify hypertables on all data nodes +SELECT * FROM _timescaledb_catalog.hypertable; + id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor | status +----+---------------+-----------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+--------------------+-------- + 1 | public | disttable | _timescaledb_internal | _dist_hyper_1 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | 1 | 0 + 2 | public | underreplicated | _timescaledb_internal | _dist_hyper_2 | 1 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | 4 | 0 + 4 | Table\\Schema | Param_Table | T3sTSch | test*pre_ | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | 2 | 0 +(3 rows) + +SELECT * FROM _timescaledb_catalog.dimension; + id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func +----+---------------+------------------+--------------------------+---------+------------+--------------------------+--------------------+-----------------+--------------------------+-------------------------+------------------ + 1 | 1 | time | timestamp with time zone | t | | | | 604800000000 | | | + 3 | 2 | time | timestamp with time zone | t | | | | 604800000000 | | | + 2 | 1 | device | integer | f | 3 | _timescaledb_functions | get_partition_hash | | | | + 5 | 4 | time Col %#^#@$# | timestamp with time zone | t | | | | 604800000000 | | | + 6 | 4 | __region | text | f | 4 | _timescaledb_functions | get_partition_hash | | | | +(5 rows) + +SELECT * FROM test.show_triggers('"Table\\Schema"."Param_Table"'); + Trigger | Type | Function +-------------------+------+--------------------------------------- + ts_insert_blocker | 7 | _timescaledb_functions.insert_blocker +(1 row) + +SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ +SELECT * FROM _timescaledb_catalog.hypertable; +SELECT * FROM _timescaledb_catalog.dimension; +SELECT t.tgname, t.tgtype, t.tgfoid::regproc +FROM pg_trigger t, pg_class c WHERE c.relname = 'Param_Table' AND t.tgrelid = c.oid; +$$); +NOTICE: [db_dist_hypertable_1]: +SELECT * FROM _timescaledb_catalog.hypertable +NOTICE: [db_dist_hypertable_1]: +id|schema_name |table_name |associated_schema_name|associated_table_prefix|num_dimensions|chunk_sizing_func_schema|chunk_sizing_func_name |chunk_target_size|compression_state|compressed_hypertable_id|replication_factor|status +--+-------------+---------------+----------------------+-----------------------+--------------+------------------------+------------------------+-----------------+-----------------+------------------------+------------------+------ + 1|public |disttable |_timescaledb_internal |_dist_hyper_1 | 2|_timescaledb_functions |calculate_chunk_interval| 0| 0| | -1| 0 + 2|public |underreplicated|_timescaledb_internal |_dist_hyper_2 | 1|_timescaledb_functions |calculate_chunk_interval| 0| 0| | -1| 0 + 3|Table\\Schema|Param_Table |T3sTSch |test*pre_ | 2|_timescaledb_functions |calculate_chunk_interval| 0| 0| | -1| 0 +(3 rows) + + +NOTICE: [db_dist_hypertable_1]: +SELECT * FROM _timescaledb_catalog.dimension +NOTICE: [db_dist_hypertable_1]: +id|hypertable_id|column_name |column_type |aligned|num_slices|partitioning_func_schema|partitioning_func |interval_length|compress_interval_length|integer_now_func_schema|integer_now_func +--+-------------+----------------+------------------------+-------+----------+------------------------+------------------+---------------+------------------------+-----------------------+---------------- + 1| 1|time |timestamp with time zone|t | | | | 604800000000| | | + 3| 2|time |timestamp with time zone|t | | | | 604800000000| | | + 2| 1|device |integer |f | 3|_timescaledb_functions |get_partition_hash| | | | + 4| 3|time Col %#^#@$#|timestamp with time zone|t | | | | 604800000000| | | + 5| 3|__region |text |f | 4|_timescaledb_functions |get_partition_hash| | | | +(5 rows) + + +NOTICE: [db_dist_hypertable_1]: +SELECT t.tgname, t.tgtype, t.tgfoid::regproc +FROM pg_trigger t, pg_class c WHERE c.relname = 'Param_Table' AND t.tgrelid = c.oid +NOTICE: [db_dist_hypertable_1]: +tgname |tgtype|tgfoid +-----------------+------+------------------------------------- +ts_insert_blocker| 7|_timescaledb_functions.insert_blocker +(1 row) + + +NOTICE: [db_dist_hypertable_2]: +SELECT * FROM _timescaledb_catalog.hypertable +NOTICE: [db_dist_hypertable_2]: +id|schema_name |table_name |associated_schema_name|associated_table_prefix|num_dimensions|chunk_sizing_func_schema|chunk_sizing_func_name |chunk_target_size|compression_state|compressed_hypertable_id|replication_factor|status +--+-------------+---------------+----------------------+-----------------------+--------------+------------------------+------------------------+-----------------+-----------------+------------------------+------------------+------ + 1|public |disttable |_timescaledb_internal |_dist_hyper_1 | 2|_timescaledb_functions |calculate_chunk_interval| 0| 0| | -1| 0 + 2|public |underreplicated|_timescaledb_internal |_dist_hyper_2 | 1|_timescaledb_functions |calculate_chunk_interval| 0| 0| | -1| 0 + 4|Table\\Schema|Param_Table |T3sTSch |test*pre_ | 2|_timescaledb_functions |calculate_chunk_interval| 0| 0| | -1| 0 +(3 rows) + + +NOTICE: [db_dist_hypertable_2]: +SELECT * FROM _timescaledb_catalog.dimension +NOTICE: [db_dist_hypertable_2]: +id|hypertable_id|column_name |column_type |aligned|num_slices|partitioning_func_schema|partitioning_func |interval_length|compress_interval_length|integer_now_func_schema|integer_now_func +--+-------------+----------------+------------------------+-------+----------+------------------------+------------------+---------------+------------------------+-----------------------+---------------- + 1| 1|time |timestamp with time zone|t | | | | 604800000000| | | + 3| 2|time |timestamp with time zone|t | | | | 604800000000| | | + 2| 1|device |integer |f | 3|_timescaledb_functions |get_partition_hash| | | | + 6| 4|time Col %#^#@$#|timestamp with time zone|t | | | | 604800000000| | | + 7| 4|__region |text |f | 4|_timescaledb_functions |get_partition_hash| | | | +(5 rows) + + +NOTICE: [db_dist_hypertable_2]: +SELECT t.tgname, t.tgtype, t.tgfoid::regproc +FROM pg_trigger t, pg_class c WHERE c.relname = 'Param_Table' AND t.tgrelid = c.oid +NOTICE: [db_dist_hypertable_2]: +tgname |tgtype|tgfoid +-----------------+------+------------------------------------- +ts_insert_blocker| 7|_timescaledb_functions.insert_blocker +(1 row) + + +NOTICE: [db_dist_hypertable_3]: +SELECT * FROM _timescaledb_catalog.hypertable +NOTICE: [db_dist_hypertable_3]: +id|schema_name |table_name |associated_schema_name|associated_table_prefix|num_dimensions|chunk_sizing_func_schema|chunk_sizing_func_name |chunk_target_size|compression_state|compressed_hypertable_id|replication_factor|status +--+-------------+---------------+----------------------+-----------------------+--------------+------------------------+------------------------+-----------------+-----------------+------------------------+------------------+------ + 1|public |disttable |_timescaledb_internal |_dist_hyper_1 | 2|_timescaledb_functions |calculate_chunk_interval| 0| 0| | -1| 0 + 2|public |underreplicated|_timescaledb_internal |_dist_hyper_2 | 1|_timescaledb_functions |calculate_chunk_interval| 0| 0| | -1| 0 + 3|Table\\Schema|Param_Table |T3sTSch |test*pre_ | 2|_timescaledb_functions |calculate_chunk_interval| 0| 0| | -1| 0 +(3 rows) + + +NOTICE: [db_dist_hypertable_3]: +SELECT * FROM _timescaledb_catalog.dimension +NOTICE: [db_dist_hypertable_3]: +id|hypertable_id|column_name |column_type |aligned|num_slices|partitioning_func_schema|partitioning_func |interval_length|compress_interval_length|integer_now_func_schema|integer_now_func +--+-------------+----------------+------------------------+-------+----------+------------------------+------------------+---------------+------------------------+-----------------------+---------------- + 1| 1|time |timestamp with time zone|t | | | | 604800000000| | | + 3| 2|time |timestamp with time zone|t | | | | 604800000000| | | + 2| 1|device |integer |f | 3|_timescaledb_functions |get_partition_hash| | | | + 4| 3|time Col %#^#@$#|timestamp with time zone|t | | | | 604800000000| | | + 5| 3|__region |text |f | 4|_timescaledb_functions |get_partition_hash| | | | +(5 rows) + + +NOTICE: [db_dist_hypertable_3]: +SELECT t.tgname, t.tgtype, t.tgfoid::regproc +FROM pg_trigger t, pg_class c WHERE c.relname = 'Param_Table' AND t.tgrelid = c.oid +NOTICE: [db_dist_hypertable_3]: +tgname |tgtype|tgfoid +-----------------+------+------------------------------------- +ts_insert_blocker| 7|_timescaledb_functions.insert_blocker +(1 row) + + + remote_exec +------------- + +(1 row) + +-- Verify that repartitioning works as expected on detach_data_node +SELECT * FROM detach_data_node(:'DATA_NODE_1', '"Table\\Schema"."Param_Table"', repartition => true); +NOTICE: the number of partitions in dimension "__region" of hypertable "Param_Table" was decreased to 2 + detach_data_node +------------------ + 1 +(1 row) + +SELECT h.table_name, d.column_name, d.num_slices +FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d +WHERE h.id = d.hypertable_id +AND h.table_name = 'Param_Table'; + table_name | column_name | num_slices +-------------+------------------+------------ + Param_Table | __region | 2 + Param_Table | time Col %#^#@$# | +(2 rows) + +SELECT * FROM detach_data_node(:'DATA_NODE_2', '"Table\\Schema"."Param_Table"', force => true, repartition => false); +WARNING: insufficient number of data nodes for distributed hypertable "Param_Table" + detach_data_node +------------------ + 1 +(1 row) + +SELECT h.table_name, d.column_name, d.num_slices +FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d +WHERE h.id = d.hypertable_id +AND h.table_name = 'Param_Table'; + table_name | column_name | num_slices +-------------+------------------+------------ + Param_Table | __region | 2 + Param_Table | time Col %#^#@$# | +(2 rows) + +-- Test multi-dimensional hypertable. The add_dimension() command +-- should be propagated to backends. +CREATE TABLE dimented_table (time timestamptz, column1 int, column2 timestamptz, column3 int); +SELECT * FROM create_distributed_hypertable('dimented_table', 'time', partitioning_column => 'column1', number_partitions => 4, replication_factor => 1, data_nodes => ARRAY[:'DATA_NODE_1']); +WARNING: only one data node was assigned to the hypertable +NOTICE: adding not-null constraint to column "time" + hypertable_id | schema_name | table_name | created +---------------+-------------+----------------+--------- + 5 | public | dimented_table | t +(1 row) + +-- Create one chunk to block add_dimension +INSERT INTO dimented_table VALUES('2017-01-01 06:01', 1, '2017-01-01 08:01', 1); +CREATE VIEW dimented_table_slices AS +SELECT c.id AS chunk_id, c.hypertable_id, ds.dimension_id, cc.dimension_slice_id, c.schema_name AS + chunk_schema, c.table_name AS chunk_table, ds.range_start, ds.range_end +FROM _timescaledb_catalog.chunk c +INNER JOIN _timescaledb_catalog.hypertable h ON (c.hypertable_id = h.id) +INNER JOIN _timescaledb_catalog.dimension td ON (h.id = td.hypertable_id) +INNER JOIN _timescaledb_catalog.dimension_slice ds ON (ds.dimension_id = td.id) +INNER JOIN _timescaledb_catalog.chunk_constraint cc ON (cc.dimension_slice_id = ds.id AND cc.chunk_id = c.id) +WHERE h.table_name = 'dimented_table' +ORDER BY c.id, ds.dimension_id; +SELECT * FROM dimented_table_slices; + chunk_id | hypertable_id | dimension_id | dimension_slice_id | chunk_schema | chunk_table | range_start | range_end +----------+---------------+--------------+--------------------+-----------------------+------------------------+----------------------+------------------ + 11 | 5 | 7 | 8 | _timescaledb_internal | _dist_hyper_5_11_chunk | 1482969600000000 | 1483574400000000 + 11 | 5 | 8 | 9 | _timescaledb_internal | _dist_hyper_5_11_chunk | -9223372036854775808 | 536870911 +(2 rows) + +-- add_dimension() with existing data +SELECT * FROM add_dimension('dimented_table', 'column2', chunk_time_interval => interval '1 week'); +NOTICE: adding not-null constraint to column "column2" + dimension_id | schema_name | table_name | column_name | created +--------------+-------------+----------------+-------------+--------- + 9 | public | dimented_table | column2 | t +(1 row) + +SELECT * FROM dimented_table_slices; + chunk_id | hypertable_id | dimension_id | dimension_slice_id | chunk_schema | chunk_table | range_start | range_end +----------+---------------+--------------+--------------------+-----------------------+------------------------+----------------------+--------------------- + 11 | 5 | 7 | 8 | _timescaledb_internal | _dist_hyper_5_11_chunk | 1482969600000000 | 1483574400000000 + 11 | 5 | 8 | 9 | _timescaledb_internal | _dist_hyper_5_11_chunk | -9223372036854775808 | 536870911 + 11 | 5 | 9 | 10 | _timescaledb_internal | _dist_hyper_5_11_chunk | -9223372036854775808 | 9223372036854775807 +(3 rows) + +SELECT * FROM add_dimension('dimented_table', 'column3', 4, partitioning_func => '_timescaledb_functions.get_partition_for_key'); + dimension_id | schema_name | table_name | column_name | created +--------------+-------------+----------------+-------------+--------- + 10 | public | dimented_table | column3 | t +(1 row) + +SELECT * FROM dimented_table_slices; + chunk_id | hypertable_id | dimension_id | dimension_slice_id | chunk_schema | chunk_table | range_start | range_end +----------+---------------+--------------+--------------------+-----------------------+------------------------+----------------------+--------------------- + 11 | 5 | 7 | 8 | _timescaledb_internal | _dist_hyper_5_11_chunk | 1482969600000000 | 1483574400000000 + 11 | 5 | 8 | 9 | _timescaledb_internal | _dist_hyper_5_11_chunk | -9223372036854775808 | 536870911 + 11 | 5 | 9 | 10 | _timescaledb_internal | _dist_hyper_5_11_chunk | -9223372036854775808 | 9223372036854775807 + 11 | 5 | 10 | 11 | _timescaledb_internal | _dist_hyper_5_11_chunk | -9223372036854775808 | 9223372036854775807 +(4 rows) + +SELECT * FROM dimented_table ORDER BY time; + time | column1 | column2 | column3 +------------------------------+---------+------------------------------+--------- + Sun Jan 01 06:01:00 2017 PST | 1 | Sun Jan 01 08:01:00 2017 PST | 1 +(1 row) + +SELECT * FROM _timescaledb_catalog.dimension; + id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func +----+---------------+------------------+--------------------------+---------+------------+--------------------------+-----------------------+-----------------+--------------------------+-------------------------+------------------ + 1 | 1 | time | timestamp with time zone | t | | | | 604800000000 | | | + 3 | 2 | time | timestamp with time zone | t | | | | 604800000000 | | | + 2 | 1 | device | integer | f | 3 | _timescaledb_functions | get_partition_hash | | | | + 5 | 4 | time Col %#^#@$# | timestamp with time zone | t | | | | 604800000000 | | | + 6 | 4 | __region | text | f | 2 | _timescaledb_functions | get_partition_hash | | | | + 7 | 5 | time | timestamp with time zone | t | | | | 604800000000 | | | + 8 | 5 | column1 | integer | f | 4 | _timescaledb_functions | get_partition_hash | | | | + 9 | 5 | column2 | timestamp with time zone | t | | | | 604800000000 | | | + 10 | 5 | column3 | integer | f | 4 | _timescaledb_functions | get_partition_for_key | | | | +(9 rows) + +SELECT * FROM attach_data_node(:'DATA_NODE_2', 'dimented_table'); + hypertable_id | node_hypertable_id | node_name +---------------+--------------------+---------------------- + 5 | 5 | db_dist_hypertable_2 +(1 row) + +SELECT * FROM _timescaledb_catalog.dimension; + id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func +----+---------------+------------------+--------------------------+---------+------------+--------------------------+-----------------------+-----------------+--------------------------+-------------------------+------------------ + 1 | 1 | time | timestamp with time zone | t | | | | 604800000000 | | | + 3 | 2 | time | timestamp with time zone | t | | | | 604800000000 | | | + 2 | 1 | device | integer | f | 3 | _timescaledb_functions | get_partition_hash | | | | + 5 | 4 | time Col %#^#@$# | timestamp with time zone | t | | | | 604800000000 | | | + 6 | 4 | __region | text | f | 2 | _timescaledb_functions | get_partition_hash | | | | + 7 | 5 | time | timestamp with time zone | t | | | | 604800000000 | | | + 8 | 5 | column1 | integer | f | 4 | _timescaledb_functions | get_partition_hash | | | | + 9 | 5 | column2 | timestamp with time zone | t | | | | 604800000000 | | | + 10 | 5 | column3 | integer | f | 4 | _timescaledb_functions | get_partition_for_key | | | | +(9 rows) + +-- ensure data node has new dimensions +SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ +SELECT * FROM _timescaledb_catalog.dimension; +$$); +NOTICE: [db_dist_hypertable_1]: +SELECT * FROM _timescaledb_catalog.dimension +NOTICE: [db_dist_hypertable_1]: +id|hypertable_id|column_name |column_type |aligned|num_slices|partitioning_func_schema|partitioning_func |interval_length|compress_interval_length|integer_now_func_schema|integer_now_func +--+-------------+----------------+------------------------+-------+----------+------------------------+---------------------+---------------+------------------------+-----------------------+---------------- + 1| 1|time |timestamp with time zone|t | | | | 604800000000| | | + 3| 2|time |timestamp with time zone|t | | | | 604800000000| | | + 2| 1|device |integer |f | 3|_timescaledb_functions |get_partition_hash | | | | + 4| 3|time Col %#^#@$#|timestamp with time zone|t | | | | 604800000000| | | + 5| 3|__region |text |f | 4|_timescaledb_functions |get_partition_hash | | | | + 6| 4|time |timestamp with time zone|t | | | | 604800000000| | | + 7| 4|column1 |integer |f | 4|_timescaledb_functions |get_partition_hash | | | | + 8| 4|column2 |timestamp with time zone|t | | | | 604800000000| | | + 9| 4|column3 |integer |f | 4|_timescaledb_functions |get_partition_for_key| | | | +(9 rows) + + +NOTICE: [db_dist_hypertable_2]: +SELECT * FROM _timescaledb_catalog.dimension +NOTICE: [db_dist_hypertable_2]: +id|hypertable_id|column_name |column_type |aligned|num_slices|partitioning_func_schema|partitioning_func |interval_length|compress_interval_length|integer_now_func_schema|integer_now_func +--+-------------+----------------+------------------------+-------+----------+------------------------+---------------------+---------------+------------------------+-----------------------+---------------- + 1| 1|time |timestamp with time zone|t | | | | 604800000000| | | + 3| 2|time |timestamp with time zone|t | | | | 604800000000| | | + 2| 1|device |integer |f | 3|_timescaledb_functions |get_partition_hash | | | | + 6| 4|time Col %#^#@$#|timestamp with time zone|t | | | | 604800000000| | | + 7| 4|__region |text |f | 4|_timescaledb_functions |get_partition_hash | | | | + 8| 5|time |timestamp with time zone|t | | | | 604800000000| | | + 9| 5|column1 |integer |f | 4|_timescaledb_functions |get_partition_hash | | | | +10| 5|column2 |timestamp with time zone|t | | | | 604800000000| | | +11| 5|column3 |integer |f | 4|_timescaledb_functions |get_partition_for_key| | | | +(9 rows) + + +NOTICE: [db_dist_hypertable_3]: +SELECT * FROM _timescaledb_catalog.dimension +NOTICE: [db_dist_hypertable_3]: +id|hypertable_id|column_name |column_type |aligned|num_slices|partitioning_func_schema|partitioning_func |interval_length|compress_interval_length|integer_now_func_schema|integer_now_func +--+-------------+----------------+------------------------+-------+----------+------------------------+------------------+---------------+------------------------+-----------------------+---------------- + 1| 1|time |timestamp with time zone|t | | | | 604800000000| | | + 3| 2|time |timestamp with time zone|t | | | | 604800000000| | | + 2| 1|device |integer |f | 3|_timescaledb_functions |get_partition_hash| | | | + 4| 3|time Col %#^#@$#|timestamp with time zone|t | | | | 604800000000| | | + 5| 3|__region |text |f | 4|_timescaledb_functions |get_partition_hash| | | | +(5 rows) + + + remote_exec +------------- + +(1 row) + +--test per-data node queries +-- Create some chunks through insertion +CREATE TABLE disttable_replicated(time timestamptz PRIMARY KEY, device int CHECK (device > 0), temp float, "Color" int); +SELECT * FROM create_hypertable('disttable_replicated', 'time', replication_factor => 2); + hypertable_id | schema_name | table_name | created +---------------+-------------+----------------------+--------- + 6 | public | disttable_replicated | t +(1 row) + +INSERT INTO disttable_replicated VALUES + ('2017-01-01 06:01', 1, 1.1, 1), + ('2017-01-01 08:01', 1, 1.2, 2), + ('2018-01-02 08:01', 2, 1.3, 3), + ('2019-01-01 09:11', 3, 2.1, 4), + ('2020-01-01 06:01', 5, 1.1, 10), + ('2020-01-01 08:01', 6, 1.2, 11), + ('2021-01-02 08:01', 7, 1.3, 12), + ('2022-01-01 09:11', 8, 2.1, 13); +SELECT * FROM disttable_replicated; + time | device | temp | Color +------------------------------+--------+------+------- + Sun Jan 01 06:01:00 2017 PST | 1 | 1.1 | 1 + Sun Jan 01 08:01:00 2017 PST | 1 | 1.2 | 2 + Wed Jan 01 06:01:00 2020 PST | 5 | 1.1 | 10 + Wed Jan 01 08:01:00 2020 PST | 6 | 1.2 | 11 + Tue Jan 02 08:01:00 2018 PST | 2 | 1.3 | 3 + Sat Jan 02 08:01:00 2021 PST | 7 | 1.3 | 12 + Tue Jan 01 09:11:00 2019 PST | 3 | 2.1 | 4 + Sat Jan 01 09:11:00 2022 PST | 8 | 2.1 | 13 +(8 rows) + +EXPLAIN (VERBOSE, ANALYZE, COSTS FALSE, TIMING FALSE, SUMMARY FALSE) +SELECT * FROM disttable_replicated; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Custom Scan (AsyncAppend) (actual rows=8 loops=1) + Output: disttable_replicated."time", disttable_replicated.device, disttable_replicated.temp, disttable_replicated."Color" + -> Append (actual rows=8 loops=1) + -> Custom Scan (DataNodeScan) on public.disttable_replicated disttable_replicated_1 (actual rows=4 loops=1) + Output: disttable_replicated_1."time", disttable_replicated_1.device, disttable_replicated_1.temp, disttable_replicated_1."Color" + Data node: db_dist_hypertable_1 + Fetcher Type: COPY + Chunks: _dist_hyper_6_12_chunk, _dist_hyper_6_15_chunk + Remote SQL: SELECT "time", device, temp, "Color" FROM public.disttable_replicated WHERE _timescaledb_functions.chunks_in(public.disttable_replicated.*, ARRAY[6, 8]) + -> Custom Scan (DataNodeScan) on public.disttable_replicated disttable_replicated_2 (actual rows=2 loops=1) + Output: disttable_replicated_2."time", disttable_replicated_2.device, disttable_replicated_2.temp, disttable_replicated_2."Color" + Data node: db_dist_hypertable_2 + Fetcher Type: COPY + Chunks: _dist_hyper_6_13_chunk, _dist_hyper_6_16_chunk + Remote SQL: SELECT "time", device, temp, "Color" FROM public.disttable_replicated WHERE _timescaledb_functions.chunks_in(public.disttable_replicated.*, ARRAY[6, 8]) + -> Custom Scan (DataNodeScan) on public.disttable_replicated disttable_replicated_3 (actual rows=2 loops=1) + Output: disttable_replicated_3."time", disttable_replicated_3.device, disttable_replicated_3.temp, disttable_replicated_3."Color" + Data node: db_dist_hypertable_3 + Fetcher Type: COPY + Chunks: _dist_hyper_6_14_chunk, _dist_hyper_6_17_chunk + Remote SQL: SELECT "time", device, temp, "Color" FROM public.disttable_replicated WHERE _timescaledb_functions.chunks_in(public.disttable_replicated.*, ARRAY[6, 8]) +(21 rows) + +--guc disables the optimization +SET timescaledb.enable_per_data_node_queries = FALSE; +EXPLAIN (VERBOSE, ANALYZE, COSTS FALSE, TIMING FALSE, SUMMARY FALSE) +SELECT * FROM disttable_replicated; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------- + Append (actual rows=8 loops=1) + -> Foreign Scan on _timescaledb_internal._dist_hyper_6_12_chunk (actual rows=2 loops=1) + Output: _dist_hyper_6_12_chunk."time", _dist_hyper_6_12_chunk.device, _dist_hyper_6_12_chunk.temp, _dist_hyper_6_12_chunk."Color" + Data node: db_dist_hypertable_1 + Fetcher Type: Cursor + Remote SQL: SELECT "time", device, temp, "Color" FROM _timescaledb_internal._dist_hyper_6_12_chunk + -> Foreign Scan on _timescaledb_internal._dist_hyper_6_13_chunk (actual rows=1 loops=1) + Output: _dist_hyper_6_13_chunk."time", _dist_hyper_6_13_chunk.device, _dist_hyper_6_13_chunk.temp, _dist_hyper_6_13_chunk."Color" + Data node: db_dist_hypertable_2 + Fetcher Type: Cursor + Remote SQL: SELECT "time", device, temp, "Color" FROM _timescaledb_internal._dist_hyper_6_13_chunk + -> Foreign Scan on _timescaledb_internal._dist_hyper_6_14_chunk (actual rows=1 loops=1) + Output: _dist_hyper_6_14_chunk."time", _dist_hyper_6_14_chunk.device, _dist_hyper_6_14_chunk.temp, _dist_hyper_6_14_chunk."Color" + Data node: db_dist_hypertable_3 + Fetcher Type: Cursor + Remote SQL: SELECT "time", device, temp, "Color" FROM _timescaledb_internal._dist_hyper_6_14_chunk + -> Foreign Scan on _timescaledb_internal._dist_hyper_6_15_chunk (actual rows=2 loops=1) + Output: _dist_hyper_6_15_chunk."time", _dist_hyper_6_15_chunk.device, _dist_hyper_6_15_chunk.temp, _dist_hyper_6_15_chunk."Color" + Data node: db_dist_hypertable_1 + Fetcher Type: Cursor + Remote SQL: SELECT "time", device, temp, "Color" FROM _timescaledb_internal._dist_hyper_6_15_chunk + -> Foreign Scan on _timescaledb_internal._dist_hyper_6_16_chunk (actual rows=1 loops=1) + Output: _dist_hyper_6_16_chunk."time", _dist_hyper_6_16_chunk.device, _dist_hyper_6_16_chunk.temp, _dist_hyper_6_16_chunk."Color" + Data node: db_dist_hypertable_2 + Fetcher Type: Cursor + Remote SQL: SELECT "time", device, temp, "Color" FROM _timescaledb_internal._dist_hyper_6_16_chunk + -> Foreign Scan on _timescaledb_internal._dist_hyper_6_17_chunk (actual rows=1 loops=1) + Output: _dist_hyper_6_17_chunk."time", _dist_hyper_6_17_chunk.device, _dist_hyper_6_17_chunk.temp, _dist_hyper_6_17_chunk."Color" + Data node: db_dist_hypertable_3 + Fetcher Type: Cursor + Remote SQL: SELECT "time", device, temp, "Color" FROM _timescaledb_internal._dist_hyper_6_17_chunk +(31 rows) + +SET timescaledb.enable_per_data_node_queries = TRUE; +--test WHERE clause +EXPLAIN (VERBOSE, ANALYZE, COSTS FALSE, TIMING FALSE, SUMMARY FALSE) +SELECT * FROM disttable_replicated WHERE temp > 2.0; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) (actual rows=2 loops=1) + Output: disttable_replicated."time", disttable_replicated.device, disttable_replicated.temp, disttable_replicated."Color" + -> Append (actual rows=2 loops=1) + -> Custom Scan (DataNodeScan) on public.disttable_replicated disttable_replicated_1 (actual rows=0 loops=1) + Output: disttable_replicated_1."time", disttable_replicated_1.device, disttable_replicated_1.temp, disttable_replicated_1."Color" + Data node: db_dist_hypertable_1 + Fetcher Type: COPY + Chunks: _dist_hyper_6_12_chunk, _dist_hyper_6_15_chunk + Remote SQL: SELECT "time", device, temp, "Color" FROM public.disttable_replicated WHERE _timescaledb_functions.chunks_in(public.disttable_replicated.*, ARRAY[6, 8]) AND ((temp > 2::double precision)) + -> Custom Scan (DataNodeScan) on public.disttable_replicated disttable_replicated_2 (actual rows=0 loops=1) + Output: disttable_replicated_2."time", disttable_replicated_2.device, disttable_replicated_2.temp, disttable_replicated_2."Color" + Data node: db_dist_hypertable_2 + Fetcher Type: COPY + Chunks: _dist_hyper_6_13_chunk, _dist_hyper_6_16_chunk + Remote SQL: SELECT "time", device, temp, "Color" FROM public.disttable_replicated WHERE _timescaledb_functions.chunks_in(public.disttable_replicated.*, ARRAY[6, 8]) AND ((temp > 2::double precision)) + -> Custom Scan (DataNodeScan) on public.disttable_replicated disttable_replicated_3 (actual rows=2 loops=1) + Output: disttable_replicated_3."time", disttable_replicated_3.device, disttable_replicated_3.temp, disttable_replicated_3."Color" + Data node: db_dist_hypertable_3 + Fetcher Type: COPY + Chunks: _dist_hyper_6_14_chunk, _dist_hyper_6_17_chunk + Remote SQL: SELECT "time", device, temp, "Color" FROM public.disttable_replicated WHERE _timescaledb_functions.chunks_in(public.disttable_replicated.*, ARRAY[6, 8]) AND ((temp > 2::double precision)) +(21 rows) + +--test OR +EXPLAIN (VERBOSE, ANALYZE, COSTS FALSE, TIMING FALSE, SUMMARY FALSE) +SELECT * FROM disttable_replicated WHERE temp > 2.0 or "Color" = 11; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) (actual rows=3 loops=1) + Output: disttable_replicated."time", disttable_replicated.device, disttable_replicated.temp, disttable_replicated."Color" + -> Append (actual rows=3 loops=1) + -> Custom Scan (DataNodeScan) on public.disttable_replicated disttable_replicated_1 (actual rows=1 loops=1) + Output: disttable_replicated_1."time", disttable_replicated_1.device, disttable_replicated_1.temp, disttable_replicated_1."Color" + Data node: db_dist_hypertable_1 + Fetcher Type: COPY + Chunks: _dist_hyper_6_12_chunk, _dist_hyper_6_15_chunk + Remote SQL: SELECT "time", device, temp, "Color" FROM public.disttable_replicated WHERE _timescaledb_functions.chunks_in(public.disttable_replicated.*, ARRAY[6, 8]) AND (((temp > 2::double precision) OR ("Color" = 11))) + -> Custom Scan (DataNodeScan) on public.disttable_replicated disttable_replicated_2 (actual rows=0 loops=1) + Output: disttable_replicated_2."time", disttable_replicated_2.device, disttable_replicated_2.temp, disttable_replicated_2."Color" + Data node: db_dist_hypertable_2 + Fetcher Type: COPY + Chunks: _dist_hyper_6_13_chunk, _dist_hyper_6_16_chunk + Remote SQL: SELECT "time", device, temp, "Color" FROM public.disttable_replicated WHERE _timescaledb_functions.chunks_in(public.disttable_replicated.*, ARRAY[6, 8]) AND (((temp > 2::double precision) OR ("Color" = 11))) + -> Custom Scan (DataNodeScan) on public.disttable_replicated disttable_replicated_3 (actual rows=2 loops=1) + Output: disttable_replicated_3."time", disttable_replicated_3.device, disttable_replicated_3.temp, disttable_replicated_3."Color" + Data node: db_dist_hypertable_3 + Fetcher Type: COPY + Chunks: _dist_hyper_6_14_chunk, _dist_hyper_6_17_chunk + Remote SQL: SELECT "time", device, temp, "Color" FROM public.disttable_replicated WHERE _timescaledb_functions.chunks_in(public.disttable_replicated.*, ARRAY[6, 8]) AND (((temp > 2::double precision) OR ("Color" = 11))) +(21 rows) + +--test some chunks excluded +EXPLAIN (VERBOSE, ANALYZE, COSTS FALSE, TIMING FALSE, SUMMARY FALSE) +SELECT * FROM disttable_replicated WHERE time < '2018-01-01 09:11'; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) (actual rows=2 loops=1) + Output: disttable_replicated."time", disttable_replicated.device, disttable_replicated.temp, disttable_replicated."Color" + -> Append (actual rows=2 loops=1) + -> Custom Scan (DataNodeScan) on public.disttable_replicated disttable_replicated_1 (actual rows=2 loops=1) + Output: disttable_replicated_1."time", disttable_replicated_1.device, disttable_replicated_1.temp, disttable_replicated_1."Color" + Data node: db_dist_hypertable_1 + Fetcher Type: COPY + Chunks: _dist_hyper_6_12_chunk + Remote SQL: SELECT "time", device, temp, "Color" FROM public.disttable_replicated WHERE _timescaledb_functions.chunks_in(public.disttable_replicated.*, ARRAY[6]) AND (("time" < '2018-01-01 09:11:00-08'::timestamp with time zone)) + -> Custom Scan (DataNodeScan) on public.disttable_replicated disttable_replicated_2 (actual rows=0 loops=1) + Output: disttable_replicated_2."time", disttable_replicated_2.device, disttable_replicated_2.temp, disttable_replicated_2."Color" + Data node: db_dist_hypertable_2 + Fetcher Type: COPY + Chunks: _dist_hyper_6_13_chunk + Remote SQL: SELECT "time", device, temp, "Color" FROM public.disttable_replicated WHERE _timescaledb_functions.chunks_in(public.disttable_replicated.*, ARRAY[6]) AND (("time" < '2018-01-01 09:11:00-08'::timestamp with time zone)) +(15 rows) + +--test all chunks excluded +EXPLAIN (VERBOSE, ANALYZE, COSTS FALSE, TIMING FALSE, SUMMARY FALSE) +SELECT * FROM disttable_replicated WHERE time < '2002-01-01 09:11'; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=0 loops=1) + Output: disttable_replicated."time", disttable_replicated.device, disttable_replicated.temp, disttable_replicated."Color" + One-Time Filter: false +(3 rows) + +--test cte +EXPLAIN (VERBOSE, ANALYZE, COSTS FALSE, TIMING FALSE, SUMMARY FALSE) +WITH cte AS ( + SELECT * FROM disttable_replicated +) +SELECT * FROM cte; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Custom Scan (AsyncAppend) (actual rows=8 loops=1) + Output: disttable_replicated."time", disttable_replicated.device, disttable_replicated.temp, disttable_replicated."Color" + -> Append (actual rows=8 loops=1) + -> Custom Scan (DataNodeScan) on public.disttable_replicated disttable_replicated_1 (actual rows=4 loops=1) + Output: disttable_replicated_1."time", disttable_replicated_1.device, disttable_replicated_1.temp, disttable_replicated_1."Color" + Data node: db_dist_hypertable_1 + Fetcher Type: COPY + Chunks: _dist_hyper_6_12_chunk, _dist_hyper_6_15_chunk + Remote SQL: SELECT "time", device, temp, "Color" FROM public.disttable_replicated WHERE _timescaledb_functions.chunks_in(public.disttable_replicated.*, ARRAY[6, 8]) + -> Custom Scan (DataNodeScan) on public.disttable_replicated disttable_replicated_2 (actual rows=2 loops=1) + Output: disttable_replicated_2."time", disttable_replicated_2.device, disttable_replicated_2.temp, disttable_replicated_2."Color" + Data node: db_dist_hypertable_2 + Fetcher Type: COPY + Chunks: _dist_hyper_6_13_chunk, _dist_hyper_6_16_chunk + Remote SQL: SELECT "time", device, temp, "Color" FROM public.disttable_replicated WHERE _timescaledb_functions.chunks_in(public.disttable_replicated.*, ARRAY[6, 8]) + -> Custom Scan (DataNodeScan) on public.disttable_replicated disttable_replicated_3 (actual rows=2 loops=1) + Output: disttable_replicated_3."time", disttable_replicated_3.device, disttable_replicated_3.temp, disttable_replicated_3."Color" + Data node: db_dist_hypertable_3 + Fetcher Type: COPY + Chunks: _dist_hyper_6_14_chunk, _dist_hyper_6_17_chunk + Remote SQL: SELECT "time", device, temp, "Color" FROM public.disttable_replicated WHERE _timescaledb_functions.chunks_in(public.disttable_replicated.*, ARRAY[6, 8]) +(21 rows) + +--queries that involve updates/inserts are not optimized +EXPLAIN (VERBOSE, ANALYZE, COSTS FALSE, TIMING FALSE, SUMMARY FALSE) +WITH devices AS ( + SELECT DISTINCT device FROM disttable_replicated ORDER BY device +) +UPDATE disttable_replicated SET device = 2 WHERE device = (SELECT device FROM devices LIMIT 1); + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Update on public.disttable_replicated (actual rows=0 loops=1) + Update on public.disttable_replicated disttable_replicated_1 + Foreign Update on _timescaledb_internal._dist_hyper_6_12_chunk disttable_replicated_2 + Remote SQL: UPDATE _timescaledb_internal._dist_hyper_6_12_chunk SET device = $2 WHERE ctid = $1 + Foreign Update on _timescaledb_internal._dist_hyper_6_13_chunk disttable_replicated_3 + Remote SQL: UPDATE _timescaledb_internal._dist_hyper_6_13_chunk SET device = $2 WHERE ctid = $1 + Foreign Update on _timescaledb_internal._dist_hyper_6_14_chunk disttable_replicated_4 + Remote SQL: UPDATE _timescaledb_internal._dist_hyper_6_14_chunk SET device = $2 WHERE ctid = $1 + Foreign Update on _timescaledb_internal._dist_hyper_6_15_chunk disttable_replicated_5 + Remote SQL: UPDATE _timescaledb_internal._dist_hyper_6_15_chunk SET device = $2 WHERE ctid = $1 + Foreign Update on _timescaledb_internal._dist_hyper_6_16_chunk disttable_replicated_6 + Remote SQL: UPDATE _timescaledb_internal._dist_hyper_6_16_chunk SET device = $2 WHERE ctid = $1 + Foreign Update on _timescaledb_internal._dist_hyper_6_17_chunk disttable_replicated_7 + Remote SQL: UPDATE _timescaledb_internal._dist_hyper_6_17_chunk SET device = $2 WHERE ctid = $1 + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + Output: disttable_replicated_8.device + -> Unique (actual rows=1 loops=1) + Output: disttable_replicated_8.device + -> Custom Scan (AsyncAppend) (actual rows=1 loops=1) + Output: disttable_replicated_8.device + -> Merge Append (actual rows=1 loops=1) + Sort Key: disttable_replicated_9.device + -> Custom Scan (DataNodeScan) on public.disttable_replicated disttable_replicated_9 (actual rows=1 loops=1) + Output: disttable_replicated_9.device + Data node: db_dist_hypertable_1 + Fetcher Type: Cursor + Chunks: _dist_hyper_6_12_chunk, _dist_hyper_6_15_chunk + Remote SQL: SELECT DISTINCT device FROM public.disttable_replicated WHERE _timescaledb_functions.chunks_in(public.disttable_replicated.*, ARRAY[6, 8]) ORDER BY device ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.disttable_replicated disttable_replicated_10 (actual rows=1 loops=1) + Output: disttable_replicated_10.device + Data node: db_dist_hypertable_2 + Fetcher Type: Cursor + Chunks: _dist_hyper_6_13_chunk, _dist_hyper_6_16_chunk + Remote SQL: SELECT DISTINCT device FROM public.disttable_replicated WHERE _timescaledb_functions.chunks_in(public.disttable_replicated.*, ARRAY[6, 8]) ORDER BY device ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.disttable_replicated disttable_replicated_11 (actual rows=1 loops=1) + Output: disttable_replicated_11.device + Data node: db_dist_hypertable_3 + Fetcher Type: Cursor + Chunks: _dist_hyper_6_14_chunk, _dist_hyper_6_17_chunk + Remote SQL: SELECT DISTINCT device FROM public.disttable_replicated WHERE _timescaledb_functions.chunks_in(public.disttable_replicated.*, ARRAY[6, 8]) ORDER BY device ASC NULLS LAST + -> Result (actual rows=2 loops=1) + Output: 2, disttable_replicated.tableoid, disttable_replicated.ctid, (NULL::record) + -> Append (actual rows=2 loops=1) + -> Seq Scan on public.disttable_replicated disttable_replicated_1 (actual rows=0 loops=1) + Output: disttable_replicated_1.tableoid, disttable_replicated_1.ctid, NULL::record + Filter: (disttable_replicated_1.device = $0) + -> Foreign Scan on _timescaledb_internal._dist_hyper_6_12_chunk disttable_replicated_2 (actual rows=2 loops=1) + Output: disttable_replicated_2.tableoid, disttable_replicated_2.ctid, disttable_replicated_2.* + Data node: db_dist_hypertable_1 + Fetcher Type: Cursor + Remote SQL: SELECT "time", device, temp, "Color", ctid FROM _timescaledb_internal._dist_hyper_6_12_chunk WHERE ((device = $1::integer)) + -> Foreign Scan on _timescaledb_internal._dist_hyper_6_13_chunk disttable_replicated_3 (actual rows=0 loops=1) + Output: disttable_replicated_3.tableoid, disttable_replicated_3.ctid, disttable_replicated_3.* + Data node: db_dist_hypertable_2 + Fetcher Type: Cursor + Remote SQL: SELECT "time", device, temp, "Color", ctid FROM _timescaledb_internal._dist_hyper_6_13_chunk WHERE ((device = $1::integer)) + -> Foreign Scan on _timescaledb_internal._dist_hyper_6_14_chunk disttable_replicated_4 (actual rows=0 loops=1) + Output: disttable_replicated_4.tableoid, disttable_replicated_4.ctid, disttable_replicated_4.* + Data node: db_dist_hypertable_3 + Fetcher Type: Cursor + Remote SQL: SELECT "time", device, temp, "Color", ctid FROM _timescaledb_internal._dist_hyper_6_14_chunk WHERE ((device = $1::integer)) + -> Foreign Scan on _timescaledb_internal._dist_hyper_6_15_chunk disttable_replicated_5 (actual rows=0 loops=1) + Output: disttable_replicated_5.tableoid, disttable_replicated_5.ctid, disttable_replicated_5.* + Data node: db_dist_hypertable_1 + Fetcher Type: Cursor + Remote SQL: SELECT "time", device, temp, "Color", ctid FROM _timescaledb_internal._dist_hyper_6_15_chunk WHERE ((device = $1::integer)) + -> Foreign Scan on _timescaledb_internal._dist_hyper_6_16_chunk disttable_replicated_6 (actual rows=0 loops=1) + Output: disttable_replicated_6.tableoid, disttable_replicated_6.ctid, disttable_replicated_6.* + Data node: db_dist_hypertable_2 + Fetcher Type: Cursor + Remote SQL: SELECT "time", device, temp, "Color", ctid FROM _timescaledb_internal._dist_hyper_6_16_chunk WHERE ((device = $1::integer)) + -> Foreign Scan on _timescaledb_internal._dist_hyper_6_17_chunk disttable_replicated_7 (actual rows=0 loops=1) + Output: disttable_replicated_7.tableoid, disttable_replicated_7.ctid, disttable_replicated_7.* + Data node: db_dist_hypertable_3 + Fetcher Type: Cursor + Remote SQL: SELECT "time", device, temp, "Color", ctid FROM _timescaledb_internal._dist_hyper_6_17_chunk WHERE ((device = $1::integer)) +(77 rows) + +-- Test inserts with smaller batch size and more tuples to reach full +-- batch +SET timescaledb.max_insert_batch_size=4; +CREATE TABLE twodim (time timestamptz DEFAULT '2019-02-10 10:11', "Color" int DEFAULT 11 CHECK ("Color" > 0), temp float DEFAULT 22.1); +-- Create a replicated table to ensure we handle that case correctly +-- with batching +SELECT * FROM create_hypertable('twodim', 'time', 'Color', 3, replication_factor => 2, data_nodes => ARRAY[:'DATA_NODE_1',:'DATA_NODE_2',:'DATA_NODE_3']); +NOTICE: adding not-null constraint to column "time" + hypertable_id | schema_name | table_name | created +---------------+-------------+------------+--------- + 7 | public | twodim | t +(1 row) + +SELECT * FROM twodim +ORDER BY time; + time | Color | temp +------+-------+------ +(0 rows) + +-- INSERT enough data to stretch across multiple batches per +-- data node. Also return a system column. Although we write tuples to +-- multiple data nodes, the returned tuple should only be the ones in the +-- original insert statement (without the replica tuples). +WITH result AS ( + INSERT INTO twodim VALUES + ('2017-02-01 06:01', 1, 1.1), + ('2017-02-01 08:01', 1, 1.2), + ('2018-02-02 08:01', 2, 1.3), + ('2019-02-01 09:11', 3, 2.1), + ('2019-02-02 09:11', 3, 2.1), + ('2019-02-02 10:01', 5, 1.2), + ('2019-02-03 11:11', 6, 3.5), + ('2019-02-04 08:21', 4, 6.6), + ('2019-02-04 10:11', 7, 7.4), + ('2019-02-04 12:11', 8, 2.1), + ('2019-02-05 13:31', 8, 6.3), + ('2019-02-06 02:11', 5, 1.8), + ('2019-02-06 01:13', 7, 7.9), + ('2019-02-06 19:24', 9, 5.9), + ('2019-02-07 18:44', 5, 9.7), + ('2019-02-07 20:24', 6, NULL), + ('2019-02-07 09:33', 7, 9.5), + ('2019-02-08 08:54', 1, 7.3), + ('2019-02-08 18:14', 4, 8.2), + ('2019-02-09 19:23', 8, 9.1) + RETURNING tableoid = 'twodim'::regclass AS is_tableoid, time, temp, "Color" +) SELECT * FROM result ORDER BY time; + is_tableoid | time | temp | Color +-------------+------------------------------+------+------- + t | Wed Feb 01 06:01:00 2017 PST | 1.1 | 1 + t | Wed Feb 01 08:01:00 2017 PST | 1.2 | 1 + t | Fri Feb 02 08:01:00 2018 PST | 1.3 | 2 + t | Fri Feb 01 09:11:00 2019 PST | 2.1 | 3 + t | Sat Feb 02 09:11:00 2019 PST | 2.1 | 3 + t | Sat Feb 02 10:01:00 2019 PST | 1.2 | 5 + t | Sun Feb 03 11:11:00 2019 PST | 3.5 | 6 + t | Mon Feb 04 08:21:00 2019 PST | 6.6 | 4 + t | Mon Feb 04 10:11:00 2019 PST | 7.4 | 7 + t | Mon Feb 04 12:11:00 2019 PST | 2.1 | 8 + t | Tue Feb 05 13:31:00 2019 PST | 6.3 | 8 + t | Wed Feb 06 01:13:00 2019 PST | 7.9 | 7 + t | Wed Feb 06 02:11:00 2019 PST | 1.8 | 5 + t | Wed Feb 06 19:24:00 2019 PST | 5.9 | 9 + t | Thu Feb 07 09:33:00 2019 PST | 9.5 | 7 + t | Thu Feb 07 18:44:00 2019 PST | 9.7 | 5 + t | Thu Feb 07 20:24:00 2019 PST | | 6 + t | Fri Feb 08 08:54:00 2019 PST | 7.3 | 1 + t | Fri Feb 08 18:14:00 2019 PST | 8.2 | 4 + t | Sat Feb 09 19:23:00 2019 PST | 9.1 | 8 +(20 rows) + +-- Test insert with default values and a batch size of 1. +SET timescaledb.max_insert_batch_size=1; +EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF, SUMMARY OFF) +INSERT INTO twodim DEFAULT VALUES; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + Insert on distributed hypertable public.twodim + Data nodes: db_dist_hypertable_1, db_dist_hypertable_2, db_dist_hypertable_3 + -> Insert on public.twodim + -> Custom Scan (DataNodeDispatch) + Output: 'Sun Feb 10 10:11:00 2019 PST'::timestamp with time zone, 11, '22.1'::double precision + Batch size: 1 + Remote SQL: INSERT INTO public.twodim("time", "Color", temp) VALUES ($1, $2, $3) + -> Custom Scan (ChunkDispatch) + Output: 'Sun Feb 10 10:11:00 2019 PST'::timestamp with time zone, 11, '22.1'::double precision + -> Result + Output: 'Sun Feb 10 10:11:00 2019 PST'::timestamp with time zone, 11, '22.1'::double precision +(12 rows) + +INSERT INTO twodim DEFAULT VALUES; +-- Reset the batch size +SET timescaledb.max_insert_batch_size=4; +-- Constraint violation error check +-- +-- Execute and filter mentioned data node name in the error message. +\set ON_ERROR_STOP 0 +SELECT test.execute_sql_and_filter_data_node_name_on_error($$ INSERT INTO twodim VALUES ('2019-02-10 17:54', 0, 10.2) $$, :'TEST_DBNAME'); +ERROR: [db_dist_hypertable_x]: new row for relation "_dist_hyper_7_23_chunk" violates check constraint "twodim_Color_check" +\set ON_ERROR_STOP 1 +-- Disable batching, reverting to FDW tuple-by-tuple inserts. +-- First EXPLAIN with batching turned on. +EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF, SUMMARY OFF) +INSERT INTO twodim VALUES + ('2019-02-10 16:23', 5, 7.1), + ('2019-02-10 17:11', 7, 3.2); + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + Insert on distributed hypertable public.twodim + Data nodes: db_dist_hypertable_1, db_dist_hypertable_2, db_dist_hypertable_3 + -> Insert on public.twodim + -> Custom Scan (DataNodeDispatch) + Output: "*VALUES*".column1, "*VALUES*".column2, "*VALUES*".column3 + Batch size: 4 + Remote SQL: INSERT INTO public.twodim("time", "Color", temp) VALUES ($1, $2, $3), ..., ($10, $11, $12) + -> Custom Scan (ChunkDispatch) + Output: "*VALUES*".column1, "*VALUES*".column2, "*VALUES*".column3 + -> Values Scan on "*VALUES*" + Output: "*VALUES*".column1, "*VALUES*".column2, "*VALUES*".column3 +(12 rows) + +SET timescaledb.max_insert_batch_size=0; +-- Compare without batching +EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF, SUMMARY OFF) +INSERT INTO twodim VALUES + ('2019-02-10 16:23', 5, 7.1), + ('2019-02-10 17:11', 7, 3.2); + QUERY PLAN +---------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + Insert on distributed hypertable public.twodim + Data nodes: db_dist_hypertable_1, db_dist_hypertable_2, db_dist_hypertable_3 + Remote SQL: INSERT INTO public.twodim("time", "Color", temp) VALUES ($1, $2, $3) + -> Insert on public.twodim + -> Custom Scan (ChunkDispatch) + Output: "*VALUES*".column1, "*VALUES*".column2, "*VALUES*".column3 + -> Values Scan on "*VALUES*" + Output: "*VALUES*".column1, "*VALUES*".column2, "*VALUES*".column3 +(9 rows) + +-- Insert without batching +INSERT INTO twodim VALUES + ('2019-02-10 16:23', 5, 7.1), + ('2019-02-10 17:11', 7, 3.2); +-- Check that datanodes use ChunkAppend plans with chunks_in function in the +-- "Remote SQL" when multiple dimensions are involved. +SET timescaledb.enable_remote_explain = ON; +EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF, SUMMARY OFF) +SELECT * FROM twodim +ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) + Output: twodim."time", twodim."Color", twodim.temp + -> Merge Append + Sort Key: twodim_1."time" + -> Custom Scan (DataNodeScan) on public.twodim twodim_1 + Output: twodim_1."time", twodim_1."Color", twodim_1.temp + Data node: db_dist_hypertable_1 + Chunks: _dist_hyper_7_18_chunk, _dist_hyper_7_22_chunk, _dist_hyper_7_25_chunk + Remote SQL: SELECT "time", "Color", temp FROM public.twodim WHERE _timescaledb_functions.chunks_in(public.twodim.*, ARRAY[10, 12, 14]) ORDER BY "time" ASC NULLS LAST + Remote EXPLAIN: + Custom Scan (ChunkAppend) on public.twodim + Output: twodim."time", twodim."Color", twodim.temp + Order: twodim."time" + Startup Exclusion: false + Runtime Exclusion: false + -> Index Scan Backward using _dist_hyper_7_18_chunk_twodim_time_idx on _timescaledb_internal._dist_hyper_7_18_chunk + Output: _dist_hyper_7_18_chunk."time", _dist_hyper_7_18_chunk."Color", _dist_hyper_7_18_chunk.temp + -> Index Scan Backward using _dist_hyper_7_22_chunk_twodim_time_idx on _timescaledb_internal._dist_hyper_7_22_chunk + Output: _dist_hyper_7_22_chunk."time", _dist_hyper_7_22_chunk."Color", _dist_hyper_7_22_chunk.temp + -> Index Scan Backward using _dist_hyper_7_25_chunk_twodim_time_idx on _timescaledb_internal._dist_hyper_7_25_chunk + Output: _dist_hyper_7_25_chunk."time", _dist_hyper_7_25_chunk."Color", _dist_hyper_7_25_chunk.temp + + -> Custom Scan (DataNodeScan) on public.twodim twodim_2 + Output: twodim_2."time", twodim_2."Color", twodim_2.temp + Data node: db_dist_hypertable_2 + Chunks: _dist_hyper_7_19_chunk, _dist_hyper_7_21_chunk, _dist_hyper_7_24_chunk + Remote SQL: SELECT "time", "Color", temp FROM public.twodim WHERE _timescaledb_functions.chunks_in(public.twodim.*, ARRAY[10, 11, 13]) ORDER BY "time" ASC NULLS LAST + Remote EXPLAIN: + Custom Scan (ChunkAppend) on public.twodim + Output: twodim."time", twodim."Color", twodim.temp + Order: twodim."time" + Startup Exclusion: false + Runtime Exclusion: false + -> Index Scan Backward using _dist_hyper_7_19_chunk_twodim_time_idx on _timescaledb_internal._dist_hyper_7_19_chunk + Output: _dist_hyper_7_19_chunk."time", _dist_hyper_7_19_chunk."Color", _dist_hyper_7_19_chunk.temp + -> Index Scan Backward using _dist_hyper_7_21_chunk_twodim_time_idx on _timescaledb_internal._dist_hyper_7_21_chunk + Output: _dist_hyper_7_21_chunk."time", _dist_hyper_7_21_chunk."Color", _dist_hyper_7_21_chunk.temp + -> Index Scan Backward using _dist_hyper_7_24_chunk_twodim_time_idx on _timescaledb_internal._dist_hyper_7_24_chunk + Output: _dist_hyper_7_24_chunk."time", _dist_hyper_7_24_chunk."Color", _dist_hyper_7_24_chunk.temp + + -> Custom Scan (DataNodeScan) on public.twodim twodim_3 + Output: twodim_3."time", twodim_3."Color", twodim_3.temp + Data node: db_dist_hypertable_3 + Chunks: _dist_hyper_7_20_chunk, _dist_hyper_7_23_chunk + Remote SQL: SELECT "time", "Color", temp FROM public.twodim WHERE _timescaledb_functions.chunks_in(public.twodim.*, ARRAY[10, 12]) ORDER BY "time" ASC NULLS LAST + Remote EXPLAIN: + Custom Scan (ChunkAppend) on public.twodim + Output: twodim."time", twodim."Color", twodim.temp + Order: twodim."time" + Startup Exclusion: false + Runtime Exclusion: false + -> Index Scan Backward using _dist_hyper_7_20_chunk_twodim_time_idx on _timescaledb_internal._dist_hyper_7_20_chunk + Output: _dist_hyper_7_20_chunk."time", _dist_hyper_7_20_chunk."Color", _dist_hyper_7_20_chunk.temp + -> Index Scan Backward using _dist_hyper_7_23_chunk_twodim_time_idx on _timescaledb_internal._dist_hyper_7_23_chunk + Output: _dist_hyper_7_23_chunk."time", _dist_hyper_7_23_chunk."Color", _dist_hyper_7_23_chunk.temp + +(56 rows) + +SET timescaledb.enable_remote_explain = OFF; +-- Check results +SELECT * FROM twodim +ORDER BY time; + time | Color | temp +------------------------------+-------+------ + Wed Feb 01 06:01:00 2017 PST | 1 | 1.1 + Wed Feb 01 08:01:00 2017 PST | 1 | 1.2 + Fri Feb 02 08:01:00 2018 PST | 2 | 1.3 + Fri Feb 01 09:11:00 2019 PST | 3 | 2.1 + Sat Feb 02 09:11:00 2019 PST | 3 | 2.1 + Sat Feb 02 10:01:00 2019 PST | 5 | 1.2 + Sun Feb 03 11:11:00 2019 PST | 6 | 3.5 + Mon Feb 04 08:21:00 2019 PST | 4 | 6.6 + Mon Feb 04 10:11:00 2019 PST | 7 | 7.4 + Mon Feb 04 12:11:00 2019 PST | 8 | 2.1 + Tue Feb 05 13:31:00 2019 PST | 8 | 6.3 + Wed Feb 06 01:13:00 2019 PST | 7 | 7.9 + Wed Feb 06 02:11:00 2019 PST | 5 | 1.8 + Wed Feb 06 19:24:00 2019 PST | 9 | 5.9 + Thu Feb 07 09:33:00 2019 PST | 7 | 9.5 + Thu Feb 07 18:44:00 2019 PST | 5 | 9.7 + Thu Feb 07 20:24:00 2019 PST | 6 | + Fri Feb 08 08:54:00 2019 PST | 1 | 7.3 + Fri Feb 08 18:14:00 2019 PST | 4 | 8.2 + Sat Feb 09 19:23:00 2019 PST | 8 | 9.1 + Sun Feb 10 10:11:00 2019 PST | 11 | 22.1 + Sun Feb 10 16:23:00 2019 PST | 5 | 7.1 + Sun Feb 10 17:11:00 2019 PST | 7 | 3.2 +(23 rows) + +SELECT count(*) FROM twodim; + count +------- + 23 +(1 row) + +-- Show distribution across data nodes +SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ +SELECT * FROM twodim +ORDER BY time; +SELECT count(*) FROM twodim; +$$); +NOTICE: [db_dist_hypertable_1]: +SELECT * FROM twodim +ORDER BY time +NOTICE: [db_dist_hypertable_1]: +time |Color|temp +----------------------------+-----+---- +Wed Feb 01 06:01:00 2017 PST| 1| 1.1 +Wed Feb 01 08:01:00 2017 PST| 1| 1.2 +Fri Feb 01 09:11:00 2019 PST| 3| 2.1 +Sat Feb 02 09:11:00 2019 PST| 3| 2.1 +Sun Feb 03 11:11:00 2019 PST| 6| 3.5 +Mon Feb 04 12:11:00 2019 PST| 8| 2.1 +Tue Feb 05 13:31:00 2019 PST| 8| 6.3 +Wed Feb 06 19:24:00 2019 PST| 9| 5.9 +Thu Feb 07 20:24:00 2019 PST| 6| +Fri Feb 08 08:54:00 2019 PST| 1| 7.3 +Sat Feb 09 19:23:00 2019 PST| 8| 9.1 +Sun Feb 10 10:11:00 2019 PST| 11|22.1 +(12 rows) + + +NOTICE: [db_dist_hypertable_1]: +SELECT count(*) FROM twodim +NOTICE: [db_dist_hypertable_1]: +count +----- + 12 +(1 row) + + +NOTICE: [db_dist_hypertable_2]: +SELECT * FROM twodim +ORDER BY time +NOTICE: [db_dist_hypertable_2]: +time |Color|temp +----------------------------+-----+---- +Wed Feb 01 06:01:00 2017 PST| 1| 1.1 +Wed Feb 01 08:01:00 2017 PST| 1| 1.2 +Fri Feb 02 08:01:00 2018 PST| 2| 1.3 +Sat Feb 02 10:01:00 2019 PST| 5| 1.2 +Sun Feb 03 11:11:00 2019 PST| 6| 3.5 +Mon Feb 04 08:21:00 2019 PST| 4| 6.6 +Mon Feb 04 10:11:00 2019 PST| 7| 7.4 +Mon Feb 04 12:11:00 2019 PST| 8| 2.1 +Tue Feb 05 13:31:00 2019 PST| 8| 6.3 +Wed Feb 06 01:13:00 2019 PST| 7| 7.9 +Wed Feb 06 02:11:00 2019 PST| 5| 1.8 +Thu Feb 07 09:33:00 2019 PST| 7| 9.5 +Thu Feb 07 18:44:00 2019 PST| 5| 9.7 +Thu Feb 07 20:24:00 2019 PST| 6| +Fri Feb 08 08:54:00 2019 PST| 1| 7.3 +Fri Feb 08 18:14:00 2019 PST| 4| 8.2 +Sat Feb 09 19:23:00 2019 PST| 8| 9.1 +Sun Feb 10 16:23:00 2019 PST| 5| 7.1 +Sun Feb 10 17:11:00 2019 PST| 7| 3.2 +(19 rows) + + +NOTICE: [db_dist_hypertable_2]: +SELECT count(*) FROM twodim +NOTICE: [db_dist_hypertable_2]: +count +----- + 19 +(1 row) + + +NOTICE: [db_dist_hypertable_3]: +SELECT * FROM twodim +ORDER BY time +NOTICE: [db_dist_hypertable_3]: +time |Color|temp +----------------------------+-----+---- +Fri Feb 02 08:01:00 2018 PST| 2| 1.3 +Fri Feb 01 09:11:00 2019 PST| 3| 2.1 +Sat Feb 02 09:11:00 2019 PST| 3| 2.1 +Sat Feb 02 10:01:00 2019 PST| 5| 1.2 +Mon Feb 04 08:21:00 2019 PST| 4| 6.6 +Mon Feb 04 10:11:00 2019 PST| 7| 7.4 +Wed Feb 06 01:13:00 2019 PST| 7| 7.9 +Wed Feb 06 02:11:00 2019 PST| 5| 1.8 +Wed Feb 06 19:24:00 2019 PST| 9| 5.9 +Thu Feb 07 09:33:00 2019 PST| 7| 9.5 +Thu Feb 07 18:44:00 2019 PST| 5| 9.7 +Fri Feb 08 18:14:00 2019 PST| 4| 8.2 +Sun Feb 10 10:11:00 2019 PST| 11|22.1 +Sun Feb 10 16:23:00 2019 PST| 5| 7.1 +Sun Feb 10 17:11:00 2019 PST| 7| 3.2 +(15 rows) + + +NOTICE: [db_dist_hypertable_3]: +SELECT count(*) FROM twodim +NOTICE: [db_dist_hypertable_3]: +count +----- + 15 +(1 row) + + + remote_exec +------------- + +(1 row) + +-- Distributed table with custom type that has no binary output +CREATE TABLE disttable_with_ct(time timestamptz, txn_id rxid, val float, info text); +SELECT * FROM create_hypertable('disttable_with_ct', 'time', replication_factor => 2); +NOTICE: adding not-null constraint to column "time" + hypertable_id | schema_name | table_name | created +---------------+-------------+-------------------+--------- + 8 | public | disttable_with_ct | t +(1 row) + +-- Insert data with custom type +INSERT INTO disttable_with_ct VALUES + ('2019-01-01 01:01', 'ts-1-10-20-30', 1.1, 'a'), + ('2019-01-01 01:02', 'ts-1-11-20-30', 2.0, repeat('abc', 1000000)); -- TOAST +-- Test queries on distributed table with custom type +SELECT time, txn_id, val, substring(info for 20) FROM disttable_with_ct; + time | txn_id | val | substring +------------------------------+---------------+-----+---------------------- + Tue Jan 01 01:01:00 2019 PST | ts-1-10-20-30 | 1.1 | a + Tue Jan 01 01:02:00 2019 PST | ts-1-11-20-30 | 2 | abcabcabcabcabcabcab +(2 rows) + +SET timescaledb.enable_connection_binary_data=false; +SELECT time, txn_id, val, substring(info for 20) FROM disttable_with_ct; + time | txn_id | val | substring +------------------------------+---------------+-----+---------------------- + Tue Jan 01 01:01:00 2019 PST | ts-1-10-20-30 | 1.1 | a + Tue Jan 01 01:02:00 2019 PST | ts-1-11-20-30 | 2 | abcabcabcabcabcabcab +(2 rows) + +-- Test DELETE with replication +DELETE FROM disttable_with_ct WHERE info = 'a'; +-- Check if row is gone +SELECT time, txn_id, val, substring(info for 20) FROM disttable_with_ct; + time | txn_id | val | substring +------------------------------+---------------+-----+---------------------- + Tue Jan 01 01:02:00 2019 PST | ts-1-11-20-30 | 2 | abcabcabcabcabcabcab +(1 row) + +-- Connect to data nodes to see if data is gone +SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ +SELECT time, txn_id, val, substring(info for 20) FROM disttable_with_ct; +$$); +NOTICE: [db_dist_hypertable_1]: +SELECT time, txn_id, val, substring(info for 20) FROM disttable_with_ct +NOTICE: [db_dist_hypertable_1]: +time |txn_id |val|substring +----------------------------+-------------+---+-------------------- +Tue Jan 01 01:02:00 2019 PST|ts-1-11-20-30| 2|abcabcabcabcabcabcab +(1 row) + + +NOTICE: [db_dist_hypertable_2]: +SELECT time, txn_id, val, substring(info for 20) FROM disttable_with_ct +NOTICE: [db_dist_hypertable_2]: +time|txn_id|val|substring +----+------+---+--------- +(0 rows) + + +NOTICE: [db_dist_hypertable_3]: +SELECT time, txn_id, val, substring(info for 20) FROM disttable_with_ct +NOTICE: [db_dist_hypertable_3]: +time |txn_id |val|substring +----------------------------+-------------+---+-------------------- +Tue Jan 01 01:02:00 2019 PST|ts-1-11-20-30| 2|abcabcabcabcabcabcab +(1 row) + + + remote_exec +------------- + +(1 row) + +-- Test single quote in names +SET SCHEMA 'single''schema'; +CREATE TABLE "disttable'quote"(time timestamptz, "device'quote" int, val float, info text); +SELECT public.create_distributed_hypertable( + 'disttable''quote', 'time', 'device''quote', data_nodes => ARRAY[:'DATA_NODE_1'] +); +WARNING: only one data node was assigned to the hypertable +NOTICE: adding not-null constraint to column "time" + create_distributed_hypertable +------------------------------------- + (9,single'schema,disttable'quote,t) +(1 row) + +SET SCHEMA 'public'; +CREATE TABLE disttable_drop_chunks(time timestamptz, device int CHECK (device > 0), color int, PRIMARY KEY (time,device)); +SELECT * FROM create_distributed_hypertable('disttable_drop_chunks', 'time', 'device', number_partitions => 3, replication_factor => 2); + hypertable_id | schema_name | table_name | created +---------------+-------------+-----------------------+--------- + 10 | public | disttable_drop_chunks | t +(1 row) + +INSERT INTO disttable_drop_chunks VALUES + ('2017-01-01 06:01', 1, 1.1), + ('2017-01-01 09:11', 3, 2.1), + ('2017-01-01 08:01', 1, 1.2), + ('2017-01-02 08:01', 2, 1.3), + ('2018-07-02 08:01', 87, 1.6), + ('2018-07-01 06:01', 13, 1.4), + ('2018-07-01 09:11', 90, 2.7), + ('2018-07-01 08:01', 29, 1.5); +-- Show chunks on access node +SELECT (_timescaledb_functions.show_chunk(show_chunks)).* +FROM show_chunks('disttable_drop_chunks'); + chunk_id | hypertable_id | schema_name | table_name | relkind | slices +----------+---------------+-----------------------+-------------------------+---------+--------------------------------------------------------------------------------------------- + 27 | 10 | _timescaledb_internal | _dist_hyper_10_27_chunk | f | {"time": [1482969600000000, 1483574400000000], "device": [-9223372036854775808, 715827882]} + 28 | 10 | _timescaledb_internal | _dist_hyper_10_28_chunk | f | {"time": [1482969600000000, 1483574400000000], "device": [1431655764, 9223372036854775807]} + 29 | 10 | _timescaledb_internal | _dist_hyper_10_29_chunk | f | {"time": [1482969600000000, 1483574400000000], "device": [715827882, 1431655764]} + 30 | 10 | _timescaledb_internal | _dist_hyper_10_30_chunk | f | {"time": [1530144000000000, 1530748800000000], "device": [-9223372036854775808, 715827882]} + 31 | 10 | _timescaledb_internal | _dist_hyper_10_31_chunk | f | {"time": [1530144000000000, 1530748800000000], "device": [715827882, 1431655764]} + 32 | 10 | _timescaledb_internal | _dist_hyper_10_32_chunk | f | {"time": [1530144000000000, 1530748800000000], "device": [1431655764, 9223372036854775807]} +(6 rows) + +-- Show chunks on data nodes +SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ +SELECT (_timescaledb_functions.show_chunk(show_chunks)).* +FROM show_chunks('disttable_drop_chunks'); +$$); +NOTICE: [db_dist_hypertable_1]: +SELECT (_timescaledb_functions.show_chunk(show_chunks)).* +FROM show_chunks('disttable_drop_chunks') +NOTICE: [db_dist_hypertable_1]: +chunk_id|hypertable_id|schema_name |table_name |relkind|slices +--------+-------------+---------------------+-----------------------+-------+------------------------------------------------------------------------------------------- + 16| 9|_timescaledb_internal|_dist_hyper_10_27_chunk|r |{"time": [1482969600000000, 1483574400000000], "device": [-9223372036854775808, 715827882]} + 17| 9|_timescaledb_internal|_dist_hyper_10_28_chunk|r |{"time": [1482969600000000, 1483574400000000], "device": [1431655764, 9223372036854775807]} + 18| 9|_timescaledb_internal|_dist_hyper_10_30_chunk|r |{"time": [1530144000000000, 1530748800000000], "device": [-9223372036854775808, 715827882]} + 19| 9|_timescaledb_internal|_dist_hyper_10_32_chunk|r |{"time": [1530144000000000, 1530748800000000], "device": [1431655764, 9223372036854775807]} +(4 rows) + + +NOTICE: [db_dist_hypertable_2]: +SELECT (_timescaledb_functions.show_chunk(show_chunks)).* +FROM show_chunks('disttable_drop_chunks') +NOTICE: [db_dist_hypertable_2]: +chunk_id|hypertable_id|schema_name |table_name |relkind|slices +--------+-------------+---------------------+-----------------------+-------+------------------------------------------------------------------------------------------- + 15| 9|_timescaledb_internal|_dist_hyper_10_27_chunk|r |{"time": [1482969600000000, 1483574400000000], "device": [-9223372036854775808, 715827882]} + 16| 9|_timescaledb_internal|_dist_hyper_10_29_chunk|r |{"time": [1482969600000000, 1483574400000000], "device": [715827882, 1431655764]} + 17| 9|_timescaledb_internal|_dist_hyper_10_30_chunk|r |{"time": [1530144000000000, 1530748800000000], "device": [-9223372036854775808, 715827882]} + 18| 9|_timescaledb_internal|_dist_hyper_10_31_chunk|r |{"time": [1530144000000000, 1530748800000000], "device": [715827882, 1431655764]} +(4 rows) + + +NOTICE: [db_dist_hypertable_3]: +SELECT (_timescaledb_functions.show_chunk(show_chunks)).* +FROM show_chunks('disttable_drop_chunks') +NOTICE: [db_dist_hypertable_3]: +chunk_id|hypertable_id|schema_name |table_name |relkind|slices +--------+-------------+---------------------+-----------------------+-------+------------------------------------------------------------------------------------------- + 15| 7|_timescaledb_internal|_dist_hyper_10_28_chunk|r |{"time": [1482969600000000, 1483574400000000], "device": [1431655764, 9223372036854775807]} + 16| 7|_timescaledb_internal|_dist_hyper_10_29_chunk|r |{"time": [1482969600000000, 1483574400000000], "device": [715827882, 1431655764]} + 17| 7|_timescaledb_internal|_dist_hyper_10_31_chunk|r |{"time": [1530144000000000, 1530748800000000], "device": [715827882, 1431655764]} + 18| 7|_timescaledb_internal|_dist_hyper_10_32_chunk|r |{"time": [1530144000000000, 1530748800000000], "device": [1431655764, 9223372036854775807]} +(4 rows) + + + remote_exec +------------- + +(1 row) + +SELECT * FROM drop_chunks('disttable_drop_chunks', older_than => '2018-01-01'::timestamptz); + drop_chunks +----------------------------------------------- + _timescaledb_internal._dist_hyper_10_27_chunk + _timescaledb_internal._dist_hyper_10_28_chunk + _timescaledb_internal._dist_hyper_10_29_chunk +(3 rows) + +SELECT * FROM disttable_drop_chunks; + time | device | color +------------------------------+--------+------- + Mon Jul 02 08:01:00 2018 PDT | 87 | 2 + Sun Jul 01 06:01:00 2018 PDT | 13 | 1 + Sun Jul 01 09:11:00 2018 PDT | 90 | 3 + Sun Jul 01 08:01:00 2018 PDT | 29 | 2 +(4 rows) + +SELECT (_timescaledb_functions.show_chunk(show_chunks)).* +FROM show_chunks('disttable_drop_chunks'); + chunk_id | hypertable_id | schema_name | table_name | relkind | slices +----------+---------------+-----------------------+-------------------------+---------+--------------------------------------------------------------------------------------------- + 30 | 10 | _timescaledb_internal | _dist_hyper_10_30_chunk | f | {"time": [1530144000000000, 1530748800000000], "device": [-9223372036854775808, 715827882]} + 31 | 10 | _timescaledb_internal | _dist_hyper_10_31_chunk | f | {"time": [1530144000000000, 1530748800000000], "device": [715827882, 1431655764]} + 32 | 10 | _timescaledb_internal | _dist_hyper_10_32_chunk | f | {"time": [1530144000000000, 1530748800000000], "device": [1431655764, 9223372036854775807]} +(3 rows) + +SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ +SELECT (_timescaledb_functions.show_chunk(show_chunks)).* +FROM show_chunks('disttable_drop_chunks'); +$$); +NOTICE: [db_dist_hypertable_1]: +SELECT (_timescaledb_functions.show_chunk(show_chunks)).* +FROM show_chunks('disttable_drop_chunks') +NOTICE: [db_dist_hypertable_1]: +chunk_id|hypertable_id|schema_name |table_name |relkind|slices +--------+-------------+---------------------+-----------------------+-------+------------------------------------------------------------------------------------------- + 18| 9|_timescaledb_internal|_dist_hyper_10_30_chunk|r |{"time": [1530144000000000, 1530748800000000], "device": [-9223372036854775808, 715827882]} + 19| 9|_timescaledb_internal|_dist_hyper_10_32_chunk|r |{"time": [1530144000000000, 1530748800000000], "device": [1431655764, 9223372036854775807]} +(2 rows) + + +NOTICE: [db_dist_hypertable_2]: +SELECT (_timescaledb_functions.show_chunk(show_chunks)).* +FROM show_chunks('disttable_drop_chunks') +NOTICE: [db_dist_hypertable_2]: +chunk_id|hypertable_id|schema_name |table_name |relkind|slices +--------+-------------+---------------------+-----------------------+-------+------------------------------------------------------------------------------------------- + 17| 9|_timescaledb_internal|_dist_hyper_10_30_chunk|r |{"time": [1530144000000000, 1530748800000000], "device": [-9223372036854775808, 715827882]} + 18| 9|_timescaledb_internal|_dist_hyper_10_31_chunk|r |{"time": [1530144000000000, 1530748800000000], "device": [715827882, 1431655764]} +(2 rows) + + +NOTICE: [db_dist_hypertable_3]: +SELECT (_timescaledb_functions.show_chunk(show_chunks)).* +FROM show_chunks('disttable_drop_chunks') +NOTICE: [db_dist_hypertable_3]: +chunk_id|hypertable_id|schema_name |table_name |relkind|slices +--------+-------------+---------------------+-----------------------+-------+------------------------------------------------------------------------------------------- + 17| 7|_timescaledb_internal|_dist_hyper_10_31_chunk|r |{"time": [1530144000000000, 1530748800000000], "device": [715827882, 1431655764]} + 18| 7|_timescaledb_internal|_dist_hyper_10_32_chunk|r |{"time": [1530144000000000, 1530748800000000], "device": [1431655764, 9223372036854775807]} +(2 rows) + + + remote_exec +------------- + +(1 row) + +-- test passing newer_than as interval +SELECT * FROM drop_chunks('disttable_drop_chunks', newer_than => interval '10 years'); + drop_chunks +----------------------------------------------- + _timescaledb_internal._dist_hyper_10_30_chunk + _timescaledb_internal._dist_hyper_10_31_chunk + _timescaledb_internal._dist_hyper_10_32_chunk +(3 rows) + +SELECT * FROM disttable_drop_chunks; + time | device | color +------+--------+------- +(0 rows) + +CREATE TABLE "weird nAme\\#^."(time bigint, device int CHECK (device > 0), color int, PRIMARY KEY (time,device)); +SELECT * FROM create_distributed_hypertable('"weird nAme\\#^."', 'time', 'device', 3, chunk_time_interval => 100, replication_factor => 2); + hypertable_id | schema_name | table_name | created +---------------+-------------+-----------------+--------- + 11 | public | weird nAme\\#^. | t +(1 row) + +INSERT INTO "weird nAme\\#^." VALUES + (300, 1, 1.1), + (400, 3, 2.1), + (350, 1, 1.2); +SELECT * FROM "weird nAme\\#^."; + time | device | color +------+--------+------- + 300 | 1 | 1 + 350 | 1 | 1 + 400 | 3 | 2 +(3 rows) + +-- drop chunks using bigint as time +SELECT * FROM drop_chunks('"weird nAme\\#^."', older_than => 1000); + drop_chunks +----------------------------------------------- + _timescaledb_internal._dist_hyper_11_33_chunk + _timescaledb_internal._dist_hyper_11_34_chunk +(2 rows) + +SELECT * FROM "weird nAme\\#^."; + time | device | color +------+--------+------- +(0 rows) + +----------------------------------------------------------------------------------------- +-- Test that settings on hypertables are distributed to data nodes +----------------------------------------------------------------------------------------- +DROP TABLE disttable CASCADE; +CREATE TABLE disttable (time bigint, device int, temp float); +SELECT create_distributed_hypertable('disttable', 'time', chunk_time_interval => 1000000::bigint); +NOTICE: adding not-null constraint to column "time" + create_distributed_hypertable +------------------------------- + (12,public,disttable,t) +(1 row) + +-- Show the dimension configuration on data nodes +SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ +SELECT d.* FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d +WHERE h.id = d.hypertable_id AND h.table_name = 'disttable'; +$$); +NOTICE: [db_dist_hypertable_1]: +SELECT d.* FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d +WHERE h.id = d.hypertable_id AND h.table_name = 'disttable' +NOTICE: [db_dist_hypertable_1]: +id|hypertable_id|column_name|column_type|aligned|num_slices|partitioning_func_schema|partitioning_func|interval_length|compress_interval_length|integer_now_func_schema|integer_now_func +--+-------------+-----------+-----------+-------+----------+------------------------+-----------------+---------------+------------------------+-----------------------+---------------- +20| 11|time |bigint |t | | | | 1000000| | | +(1 row) + + +NOTICE: [db_dist_hypertable_2]: +SELECT d.* FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d +WHERE h.id = d.hypertable_id AND h.table_name = 'disttable' +NOTICE: [db_dist_hypertable_2]: +id|hypertable_id|column_name|column_type|aligned|num_slices|partitioning_func_schema|partitioning_func|interval_length|compress_interval_length|integer_now_func_schema|integer_now_func +--+-------------+-----------+-----------+-------+----------+------------------------+-----------------+---------------+------------------------+-----------------------+---------------- +20| 11|time |bigint |t | | | | 1000000| | | +(1 row) + + +NOTICE: [db_dist_hypertable_3]: +SELECT d.* FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d +WHERE h.id = d.hypertable_id AND h.table_name = 'disttable' +NOTICE: [db_dist_hypertable_3]: +id|hypertable_id|column_name|column_type|aligned|num_slices|partitioning_func_schema|partitioning_func|interval_length|compress_interval_length|integer_now_func_schema|integer_now_func +--+-------------+-----------+-----------+-------+----------+------------------------+-----------------+---------------+------------------------+-----------------------+---------------- +14| 9|time |bigint |t | | | | 1000000| | | +(1 row) + + + remote_exec +------------- + +(1 row) + +-- Test adding a space dimension. Should apply to data nodes as +-- well. We're setting num_partitions lower than the number of servers +-- and expect a warning. +SELECT * FROM add_dimension('disttable', 'device', 1, partitioning_func => '_timescaledb_functions.get_partition_hash'); +WARNING: insufficient number of partitions for dimension "device" + dimension_id | schema_name | table_name | column_name | created +--------------+-------------+------------+-------------+--------- + 22 | public | disttable | device | t +(1 row) + +CREATE INDEX disttable_device_time_idx ON disttable (device, time); +SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ +SELECT d.* FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d +WHERE h.id = d.hypertable_id AND h.table_name = 'disttable'; +$$); +NOTICE: [db_dist_hypertable_1]: +SELECT d.* FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d +WHERE h.id = d.hypertable_id AND h.table_name = 'disttable' +NOTICE: [db_dist_hypertable_1]: +id|hypertable_id|column_name|column_type|aligned|num_slices|partitioning_func_schema|partitioning_func |interval_length|compress_interval_length|integer_now_func_schema|integer_now_func +--+-------------+-----------+-----------+-------+----------+------------------------+------------------+---------------+------------------------+-----------------------+---------------- +21| 11|device |integer |f | 1|_timescaledb_functions |get_partition_hash| | | | +20| 11|time |bigint |t | | | | 1000000| | | +(2 rows) + + +NOTICE: [db_dist_hypertable_2]: +SELECT d.* FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d +WHERE h.id = d.hypertable_id AND h.table_name = 'disttable' +NOTICE: [db_dist_hypertable_2]: +id|hypertable_id|column_name|column_type|aligned|num_slices|partitioning_func_schema|partitioning_func |interval_length|compress_interval_length|integer_now_func_schema|integer_now_func +--+-------------+-----------+-----------+-------+----------+------------------------+------------------+---------------+------------------------+-----------------------+---------------- +21| 11|device |integer |f | 1|_timescaledb_functions |get_partition_hash| | | | +20| 11|time |bigint |t | | | | 1000000| | | +(2 rows) + + +NOTICE: [db_dist_hypertable_3]: +SELECT d.* FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d +WHERE h.id = d.hypertable_id AND h.table_name = 'disttable' +NOTICE: [db_dist_hypertable_3]: +id|hypertable_id|column_name|column_type|aligned|num_slices|partitioning_func_schema|partitioning_func |interval_length|compress_interval_length|integer_now_func_schema|integer_now_func +--+-------------+-----------+-----------+-------+----------+------------------------+------------------+---------------+------------------------+-----------------------+---------------- +15| 9|device |integer |f | 1|_timescaledb_functions |get_partition_hash| | | | +14| 9|time |bigint |t | | | | 1000000| | | +(2 rows) + + + remote_exec +------------- + +(1 row) + +-- Show that changing dimension settings apply to data nodes +SELECT * FROM set_chunk_time_interval('disttable', 2000000000::bigint); + set_chunk_time_interval +------------------------- + +(1 row) + +SELECT * FROM set_number_partitions('disttable', 3); + set_number_partitions +----------------------- + +(1 row) + +CREATE OR REPLACE FUNCTION dummy_now() RETURNS BIGINT LANGUAGE SQL IMMUTABLE as 'SELECT 2::BIGINT'; +CALL distributed_exec($$ +CREATE OR REPLACE FUNCTION dummy_now() RETURNS BIGINT LANGUAGE SQL IMMUTABLE as 'SELECT 2::BIGINT' +$$); +SELECT * FROM set_integer_now_func('disttable', 'dummy_now'); + set_integer_now_func +---------------------- + +(1 row) + +-- Show changes to dimensions +SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ +SELECT d.* FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d +WHERE h.id = d.hypertable_id AND h.table_name = 'disttable'; +$$); +NOTICE: [db_dist_hypertable_1]: +SELECT d.* FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d +WHERE h.id = d.hypertable_id AND h.table_name = 'disttable' +NOTICE: [db_dist_hypertable_1]: +id|hypertable_id|column_name|column_type|aligned|num_slices|partitioning_func_schema|partitioning_func |interval_length|compress_interval_length|integer_now_func_schema|integer_now_func +--+-------------+-----------+-----------+-------+----------+------------------------+------------------+---------------+------------------------+-----------------------+---------------- +21| 11|device |integer |f | 3|_timescaledb_functions |get_partition_hash| | | | +20| 11|time |bigint |t | | | | 2000000000| |public |dummy_now +(2 rows) + + +NOTICE: [db_dist_hypertable_2]: +SELECT d.* FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d +WHERE h.id = d.hypertable_id AND h.table_name = 'disttable' +NOTICE: [db_dist_hypertable_2]: +id|hypertable_id|column_name|column_type|aligned|num_slices|partitioning_func_schema|partitioning_func |interval_length|compress_interval_length|integer_now_func_schema|integer_now_func +--+-------------+-----------+-----------+-------+----------+------------------------+------------------+---------------+------------------------+-----------------------+---------------- +21| 11|device |integer |f | 3|_timescaledb_functions |get_partition_hash| | | | +20| 11|time |bigint |t | | | | 2000000000| |public |dummy_now +(2 rows) + + +NOTICE: [db_dist_hypertable_3]: +SELECT d.* FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d +WHERE h.id = d.hypertable_id AND h.table_name = 'disttable' +NOTICE: [db_dist_hypertable_3]: +id|hypertable_id|column_name|column_type|aligned|num_slices|partitioning_func_schema|partitioning_func |interval_length|compress_interval_length|integer_now_func_schema|integer_now_func +--+-------------+-----------+-----------+-------+----------+------------------------+------------------+---------------+------------------------+-----------------------+---------------- +15| 9|device |integer |f | 3|_timescaledb_functions |get_partition_hash| | | | +14| 9|time |bigint |t | | | | 2000000000| |public |dummy_now +(2 rows) + + + remote_exec +------------- + +(1 row) + +-- Tests for using tablespaces with distributed hypertables +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; +--Ensure INSERTs use DataNodeDispatch. +SET timescaledb.enable_distributed_insert_with_copy=false; +CREATE TABLESPACE :TABLESPACE_1 OWNER :ROLE_1 LOCATION :'spc1path'; +CREATE TABLESPACE :TABLESPACE_2 OWNER :ROLE_1 LOCATION :'spc2path'; +\set ON_ERROR_STOP 0 +SELECT attach_tablespace(:'TABLESPACE_1', 'disttable'); +ERROR: cannot attach tablespace to distributed hypertable +SELECT detach_tablespace(:'TABLESPACE_1', 'disttable'); +ERROR: tablespace "db_dist_hypertable_1" is not attached to hypertable "disttable" +\set ON_ERROR_STOP 1 +SELECT detach_tablespaces('disttable'); + detach_tablespaces +-------------------- + 0 +(1 row) + +-- Continue to use previously attached tablespace, but block attach/detach +CREATE TABLE disttable2(time timestamptz, device int, temp float) TABLESPACE :TABLESPACE_1; +SELECT create_distributed_hypertable('disttable2', 'time', chunk_time_interval => 1000000::bigint); +NOTICE: adding not-null constraint to column "time" + create_distributed_hypertable +------------------------------- + (13,public,disttable2,t) +(1 row) + +-- Ensure that table is created on the data nodes without a tablespace +CALL distributed_exec($$ +SELECT * FROM show_tablespaces('disttable2'); +$$); +INSERT INTO disttable2 VALUES ('2017-01-01 06:01', 1, 1.1); +SELECT * FROM show_chunks('disttable2'); + show_chunks +----------------------------------------------- + _timescaledb_internal._dist_hyper_13_35_chunk +(1 row) + +-- Ensure tablespace oid is set to 0 for a foreign table +SELECT reltablespace +FROM pg_class cl, (SELECT show_chunks AS chunk FROM show_chunks('disttable2')) ch +WHERE cl.oid = ch.chunk::regclass; + reltablespace +--------------- + 0 +(1 row) + +\set ON_ERROR_STOP 0 +SELECT attach_tablespace(:'TABLESPACE_2', 'disttable2'); +ERROR: cannot attach tablespace to distributed hypertable +SELECT detach_tablespace(:'TABLESPACE_2', 'disttable2'); +ERROR: tablespace "db_dist_hypertable_2" is not attached to hypertable "disttable2" +\set ON_ERROR_STOP 1 +SELECT detach_tablespaces('disttable2'); + detach_tablespaces +-------------------- + 0 +(1 row) + +SELECT * FROM show_tablespaces('disttable2'); + show_tablespaces +------------------ +(0 rows) + +-- Ensure tablespace API works for data nodes +CALL distributed_exec(format($$ +SELECT attach_tablespace(%L, 'disttable2'); +$$, :'TABLESPACE_2')); +CALL distributed_exec(format($$ +SELECT detach_tablespace(%L, 'disttable2'); +$$, :'TABLESPACE_2')); +CALL distributed_exec(format($$ +SELECT attach_tablespace(%L, 'disttable2'); +$$, :'TABLESPACE_2')); +CALL distributed_exec($$ +SELECT detach_tablespaces('disttable2'); +$$); +DROP TABLE disttable2; +CREATE TABLE disttable2(time timestamptz, device int, temp float) TABLESPACE :TABLESPACE_1; +SELECT create_hypertable('disttable2', 'time', chunk_time_interval => 1000000::bigint, replication_factor => 1); +NOTICE: adding not-null constraint to column "time" + create_hypertable +-------------------------- + (14,public,disttable2,t) +(1 row) + +-- Ensure that table is created on the data nodes without a tablespace +CALL distributed_exec($$ +SELECT * FROM show_tablespaces('disttable2'); +$$); +INSERT INTO disttable2 VALUES ('2017-01-01 06:01', 1, 1.1); +SELECT * FROM show_chunks('disttable2'); + show_chunks +----------------------------------------------- + _timescaledb_internal._dist_hyper_14_36_chunk +(1 row) + +-- Ensure tablespace oid is set to 0 for a foreign table +SELECT reltablespace +FROM pg_class cl, (SELECT show_chunks AS chunk FROM show_chunks('disttable2')) ch +WHERE cl.oid = ch.chunk::regclass; + reltablespace +--------------- + 0 +(1 row) + +\set ON_ERROR_STOP 0 +SELECT attach_tablespace(:'TABLESPACE_2', 'disttable2'); +ERROR: cannot attach tablespace to distributed hypertable +SELECT detach_tablespace(:'TABLESPACE_2', 'disttable2'); +ERROR: tablespace "db_dist_hypertable_2" is not attached to hypertable "disttable2" +\set ON_ERROR_STOP 1 +SELECT * FROM show_tablespaces('disttable2'); + show_tablespaces +------------------ +(0 rows) + +DROP TABLE disttable2; +DROP TABLESPACE :TABLESPACE_1; +DROP TABLESPACE :TABLESPACE_2; +-- Make sure table qualified name is used in chunks_in function. Otherwise having a table name same as a column name might yield an error +CREATE TABLE dist_device(time timestamptz, dist_device int, temp float); +SELECT * FROM create_distributed_hypertable('dist_device', 'time'); +NOTICE: adding not-null constraint to column "time" + hypertable_id | schema_name | table_name | created +---------------+-------------+-------------+--------- + 15 | public | dist_device | t +(1 row) + +INSERT INTO dist_device VALUES + ('2017-01-01 06:01', 1, 1.1), + ('2017-01-01 09:11', 3, 2.1), + ('2017-01-01 08:01', 1, 1.2); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT * FROM dist_device; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------ + Custom Scan (DataNodeScan) on public.dist_device + Output: dist_device."time", dist_device.dist_device, dist_device.temp + Data node: db_dist_hypertable_1 + Chunks: _dist_hyper_15_37_chunk + Remote SQL: SELECT "time", dist_device, temp FROM public.dist_device WHERE _timescaledb_functions.chunks_in(public.dist_device.*, ARRAY[22]) +(5 rows) + +-- Check that datanodes use ChunkAppend plans with chunks_in function in the +-- "Remote SQL" when only time partitioning is being used. +SET timescaledb.enable_remote_explain = ON; +EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF, SUMMARY OFF) +SELECT "time", dist_device, temp FROM public.dist_device ORDER BY "time" ASC NULLS LAST; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (DataNodeScan) on public.dist_device + Output: dist_device."time", dist_device.dist_device, dist_device.temp + Data node: db_dist_hypertable_1 + Chunks: _dist_hyper_15_37_chunk + Remote SQL: SELECT "time", dist_device, temp FROM public.dist_device WHERE _timescaledb_functions.chunks_in(public.dist_device.*, ARRAY[22]) ORDER BY "time" ASC NULLS LAST + Remote EXPLAIN: + Index Scan Backward using _dist_hyper_15_37_chunk_dist_device_time_idx on _timescaledb_internal._dist_hyper_15_37_chunk + Output: _dist_hyper_15_37_chunk."time", _dist_hyper_15_37_chunk.dist_device, _dist_hyper_15_37_chunk.temp + +(9 rows) + +SELECT * FROM dist_device; + time | dist_device | temp +------------------------------+-------------+------ + Sun Jan 01 06:01:00 2017 PST | 1 | 1.1 + Sun Jan 01 09:11:00 2017 PST | 3 | 2.1 + Sun Jan 01 08:01:00 2017 PST | 1 | 1.2 +(3 rows) + +-- Test estimating relation size without stats +CREATE TABLE hyper_estimate(time timestamptz, device int, temp float); +SELECT * FROM create_distributed_hypertable('hyper_estimate', 'time', 'device', number_partitions => 3, replication_factor => 1, chunk_time_interval => INTERVAL '7 days'); +NOTICE: adding not-null constraint to column "time" + hypertable_id | schema_name | table_name | created +---------------+-------------+----------------+--------- + 16 | public | hyper_estimate | t +(1 row) + +-- This will enable us to more easily see estimates per chunk +SET timescaledb.enable_per_data_node_queries = false; +-- Estimating chunk progress uses current timestamp so we override it for test purposes +SELECT test.tsl_override_current_timestamptz('2017-11-11 00:00'::timestamptz); + tsl_override_current_timestamptz +---------------------------------- + +(1 row) + +-- Test estimates when backfilling. 3 chunks should be historical and 3 should be considered current when estimating. +-- Note that estimate numbers are way off since we are using shared buffer size as starting point. This will not be +-- an issue in 'production' like env since chunk size should be similar to shared buffer size. +INSERT INTO hyper_estimate VALUES + ('2017-01-01 06:01', 1, 1.1), + ('2017-01-01 09:11', 1, 2.1), + ('2017-01-01 08:01', 1, 1.2), + ('2017-01-02 08:01', 1, 1.3), + ('2017-01-02 08:01', 2, 1.6), + ('2017-01-02 06:01', 2, 1.4), + ('2017-01-03 01:01', 3, 2), + ('2017-01-03 01:16', 3, 3), + ('2017-01-03 01:17', 3, 4), + ('2018-01-13 01:01', 1, 2), + ('2018-01-13 01:10', 1, 0.4), + ('2018-01-13 02:10', 2, 1.4), + ('2018-01-13 05:01', 2, 2), + ('2018-01-13 05:50', 2, 4), + ('2018-01-13 16:01', 3, 2); +-- This will calculate the stats +ANALYZE hyper_estimate; +EXPLAIN (COSTS ON) +SELECT * +FROM hyper_estimate; + QUERY PLAN +------------------------------------------------------------------------------------------ + Append (cost=10000.00..60021.38 rows=15 width=20) + -> Foreign Scan on _dist_hyper_16_38_chunk (cost=10000.00..10005.08 rows=4 width=20) + -> Foreign Scan on _dist_hyper_16_39_chunk (cost=10000.00..10003.04 rows=2 width=20) + -> Foreign Scan on _dist_hyper_16_40_chunk (cost=10000.00..10004.06 rows=3 width=20) + -> Foreign Scan on _dist_hyper_16_41_chunk (cost=10000.00..10003.04 rows=2 width=20) + -> Foreign Scan on _dist_hyper_16_42_chunk (cost=10000.00..10004.06 rows=3 width=20) + -> Foreign Scan on _dist_hyper_16_43_chunk (cost=10000.00..10002.02 rows=1 width=20) +(7 rows) + +-- Let's insert data into a new chunk. This will result in chunk creation. +INSERT INTO hyper_estimate VALUES ('2019-11-11 06:01', 1, 1.1); +-- We have stats for previous chunks so we can interpolate number of rows for the new chunk +EXPLAIN (COSTS ON) +SELECT * +FROM hyper_estimate; + QUERY PLAN +------------------------------------------------------------------------------------------ + Append (cost=10000.00..70023.31 rows=17 width=20) + -> Foreign Scan on _dist_hyper_16_38_chunk (cost=10000.00..10005.08 rows=4 width=20) + -> Foreign Scan on _dist_hyper_16_39_chunk (cost=10000.00..10003.04 rows=2 width=20) + -> Foreign Scan on _dist_hyper_16_40_chunk (cost=10000.00..10004.06 rows=3 width=20) + -> Foreign Scan on _dist_hyper_16_41_chunk (cost=10000.00..10003.04 rows=2 width=20) + -> Foreign Scan on _dist_hyper_16_42_chunk (cost=10000.00..10004.06 rows=3 width=20) + -> Foreign Scan on _dist_hyper_16_43_chunk (cost=10000.00..10002.02 rows=1 width=20) + -> Foreign Scan on _dist_hyper_16_44_chunk (cost=10000.00..10001.93 rows=2 width=20) +(8 rows) + +CREATE TABLE devices ( + device_id INTEGER PRIMARY KEY, + device_name VARCHAR(10) +); +CALL distributed_exec($$ + CREATE TABLE devices(device_id INTEGER PRIMARY KEY, device_name VARCHAR(10)) +$$); +INSERT INTO devices VALUES + (1, 'A001'), (2, 'B015'), (3, 'D821'), (4, 'C561'), (5, 'D765'); +CALL distributed_exec($$ + INSERT INTO devices VALUES + (1, 'A001'), (2, 'B015'), (3, 'D821'), (4, 'C561'), (5, 'D765') +$$); +CREATE TABLE hyper ( + time TIMESTAMPTZ NOT NULL, + device INTEGER REFERENCES devices(device_id), + temp FLOAT +); +SELECT * FROM create_distributed_hypertable('hyper', 'time', 'device', 3, + chunk_time_interval => interval '18 hours' +); +WARNING: distributed hypertable "hyper" has a foreign key to a non-distributed table + hypertable_id | schema_name | table_name | created +---------------+-------------+------------+--------- + 17 | public | hyper | t +(1 row) + +-- Inserting some values should succeed. +INSERT INTO hyper VALUES + ('2017-01-01 06:01', 1, 1.1), + ('2017-01-01 09:11', 1, 2.1), + ('2017-01-01 08:01', 1, 1.2), + ('2017-01-02 08:01', 1, 1.3), + ('2017-01-02 08:01', 2, 1.6), + ('2017-01-02 06:01', 2, 1.4), + ('2017-01-03 01:01', 3, 2), + ('2017-01-03 01:16', 3, 3), + ('2017-01-03 01:17', 3, 4), + ('2018-01-13 01:01', 1, 2), + ('2018-01-13 01:10', 1, 0.4), + ('2018-01-13 02:10', 2, 1.4), + ('2018-01-13 05:01', 2, 2), + ('2018-01-13 05:50', 2, 4), + ('2018-01-13 16:01', 3, 2); +SELECT time_bucket('3 hours', time) AS time, device, avg(temp) AS avg_temp +FROM hyper +GROUP BY 1, 2 +HAVING avg(temp) > 1.2 +ORDER BY 1; + time | device | avg_temp +------------------------------+--------+---------- + Sun Jan 01 07:00:00 2017 PST | 1 | 1.65 + Mon Jan 02 04:00:00 2017 PST | 2 | 1.4 + Mon Jan 02 07:00:00 2017 PST | 2 | 1.6 + Mon Jan 02 07:00:00 2017 PST | 1 | 1.3 + Tue Jan 03 01:00:00 2017 PST | 3 | 3 + Sat Jan 13 01:00:00 2018 PST | 2 | 1.4 + Sat Jan 13 04:00:00 2018 PST | 2 | 3 + Sat Jan 13 16:00:00 2018 PST | 3 | 2 +(8 rows) + +-- Add some devices on the access node only. Inserts should then fail. +INSERT INTO devices VALUES (6, 'E999'); +\set ON_ERROR_STOP 0 +INSERT INTO hyper VALUES ('2017-01-01 06:01', 6, 1.1); +ERROR: [db_dist_hypertable_1]: insert or update on table "_dist_hyper_17_45_chunk" violates foreign key constraint "26_17_hyper_device_fkey" +\set ON_ERROR_STOP 1 +-- Test alter replication factor with data +SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ + SELECT (_timescaledb_functions.show_chunk(show_chunks)).* + FROM show_chunks('hyper'); +$$); +NOTICE: [db_dist_hypertable_1]: + SELECT (_timescaledb_functions.show_chunk(show_chunks)).* + FROM show_chunks('hyper') +NOTICE: [db_dist_hypertable_1]: +chunk_id|hypertable_id|schema_name |table_name |relkind|slices +--------+-------------+---------------------+-----------------------+-------+------------------------------------------------------------------------------------------- + 26| 16|_timescaledb_internal|_dist_hyper_17_45_chunk|r |{"time": [1483272000000000, 1483336800000000], "device": [-9223372036854775808, 715827882]} + 27| 16|_timescaledb_internal|_dist_hyper_17_46_chunk|r |{"time": [1483336800000000, 1483401600000000], "device": [-9223372036854775808, 715827882]} + 28| 16|_timescaledb_internal|_dist_hyper_17_49_chunk|r |{"time": [1515801600000000, 1515866400000000], "device": [-9223372036854775808, 715827882]} +(3 rows) + + +NOTICE: [db_dist_hypertable_2]: + SELECT (_timescaledb_functions.show_chunk(show_chunks)).* + FROM show_chunks('hyper') +NOTICE: [db_dist_hypertable_2]: +chunk_id|hypertable_id|schema_name |table_name |relkind|slices +--------+-------------+---------------------+-----------------------+-------+--------------------------------------------------------------------------------- + 23| 16|_timescaledb_internal|_dist_hyper_17_47_chunk|r |{"time": [1483336800000000, 1483401600000000], "device": [715827882, 1431655764]} + 24| 16|_timescaledb_internal|_dist_hyper_17_50_chunk|r |{"time": [1515801600000000, 1515866400000000], "device": [715827882, 1431655764]} +(2 rows) + + +NOTICE: [db_dist_hypertable_3]: + SELECT (_timescaledb_functions.show_chunk(show_chunks)).* + FROM show_chunks('hyper') +NOTICE: [db_dist_hypertable_3]: +chunk_id|hypertable_id|schema_name |table_name |relkind|slices +--------+-------------+---------------------+-----------------------+-------+------------------------------------------------------------------------------------------- + 23| 14|_timescaledb_internal|_dist_hyper_17_48_chunk|r |{"time": [1483401600000000, 1483466400000000], "device": [1431655764, 9223372036854775807]} + 24| 14|_timescaledb_internal|_dist_hyper_17_51_chunk|r |{"time": [1515866400000000, 1515931200000000], "device": [1431655764, 9223372036854775807]} +(2 rows) + + + remote_exec +------------- + +(1 row) + +-- Dimension partitions should be updated to account for replication +-- to additional data nodes +SELECT * FROM hypertable_partitions WHERE table_name = 'hyper'; + table_name | dimension_id | range_start | data_nodes +------------+--------------+----------------------+------------------------ + hyper | 29 | -9223372036854775808 | {db_dist_hypertable_1} + hyper | 29 | 715827882 | {db_dist_hypertable_2} + hyper | 29 | 1431655764 | {db_dist_hypertable_3} +(3 rows) + +SELECT * FROM set_replication_factor('hyper', 3); +WARNING: hypertable "hyper" is under-replicated + set_replication_factor +------------------------ + +(1 row) + +SELECT * FROM hypertable_partitions WHERE table_name = 'hyper'; + table_name | dimension_id | range_start | data_nodes +------------+--------------+----------------------+------------------------------------------------------------------ + hyper | 29 | -9223372036854775808 | {db_dist_hypertable_1,db_dist_hypertable_2,db_dist_hypertable_3} + hyper | 29 | 715827882 | {db_dist_hypertable_2,db_dist_hypertable_3,db_dist_hypertable_1} + hyper | 29 | 1431655764 | {db_dist_hypertable_3,db_dist_hypertable_1,db_dist_hypertable_2} +(3 rows) + +INSERT INTO hyper VALUES ('2017-01-02 07:11', 1, 1.7); +SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ + SELECT (_timescaledb_functions.show_chunk(show_chunks)).* + FROM show_chunks('hyper'); +$$); +NOTICE: [db_dist_hypertable_1]: + SELECT (_timescaledb_functions.show_chunk(show_chunks)).* + FROM show_chunks('hyper') +NOTICE: [db_dist_hypertable_1]: +chunk_id|hypertable_id|schema_name |table_name |relkind|slices +--------+-------------+---------------------+-----------------------+-------+------------------------------------------------------------------------------------------- + 26| 16|_timescaledb_internal|_dist_hyper_17_45_chunk|r |{"time": [1483272000000000, 1483336800000000], "device": [-9223372036854775808, 715827882]} + 27| 16|_timescaledb_internal|_dist_hyper_17_46_chunk|r |{"time": [1483336800000000, 1483401600000000], "device": [-9223372036854775808, 715827882]} + 28| 16|_timescaledb_internal|_dist_hyper_17_49_chunk|r |{"time": [1515801600000000, 1515866400000000], "device": [-9223372036854775808, 715827882]} +(3 rows) + + +NOTICE: [db_dist_hypertable_2]: + SELECT (_timescaledb_functions.show_chunk(show_chunks)).* + FROM show_chunks('hyper') +NOTICE: [db_dist_hypertable_2]: +chunk_id|hypertable_id|schema_name |table_name |relkind|slices +--------+-------------+---------------------+-----------------------+-------+--------------------------------------------------------------------------------- + 23| 16|_timescaledb_internal|_dist_hyper_17_47_chunk|r |{"time": [1483336800000000, 1483401600000000], "device": [715827882, 1431655764]} + 24| 16|_timescaledb_internal|_dist_hyper_17_50_chunk|r |{"time": [1515801600000000, 1515866400000000], "device": [715827882, 1431655764]} +(2 rows) + + +NOTICE: [db_dist_hypertable_3]: + SELECT (_timescaledb_functions.show_chunk(show_chunks)).* + FROM show_chunks('hyper') +NOTICE: [db_dist_hypertable_3]: +chunk_id|hypertable_id|schema_name |table_name |relkind|slices +--------+-------------+---------------------+-----------------------+-------+------------------------------------------------------------------------------------------- + 23| 14|_timescaledb_internal|_dist_hyper_17_48_chunk|r |{"time": [1483401600000000, 1483466400000000], "device": [1431655764, 9223372036854775807]} + 24| 14|_timescaledb_internal|_dist_hyper_17_51_chunk|r |{"time": [1515866400000000, 1515931200000000], "device": [1431655764, 9223372036854775807]} +(2 rows) + + + remote_exec +------------- + +(1 row) + +INSERT INTO hyper VALUES ('2017-02-01 06:01', 1, 5.1); +SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ + SELECT (_timescaledb_functions.show_chunk(show_chunks)).* + FROM show_chunks('hyper'); +$$); +NOTICE: [db_dist_hypertable_1]: + SELECT (_timescaledb_functions.show_chunk(show_chunks)).* + FROM show_chunks('hyper') +NOTICE: [db_dist_hypertable_1]: +chunk_id|hypertable_id|schema_name |table_name |relkind|slices +--------+-------------+---------------------+-----------------------+-------+------------------------------------------------------------------------------------------- + 26| 16|_timescaledb_internal|_dist_hyper_17_45_chunk|r |{"time": [1483272000000000, 1483336800000000], "device": [-9223372036854775808, 715827882]} + 27| 16|_timescaledb_internal|_dist_hyper_17_46_chunk|r |{"time": [1483336800000000, 1483401600000000], "device": [-9223372036854775808, 715827882]} + 28| 16|_timescaledb_internal|_dist_hyper_17_49_chunk|r |{"time": [1515801600000000, 1515866400000000], "device": [-9223372036854775808, 715827882]} + 29| 16|_timescaledb_internal|_dist_hyper_17_52_chunk|r |{"time": [1485928800000000, 1485993600000000], "device": [-9223372036854775808, 715827882]} +(4 rows) + + +NOTICE: [db_dist_hypertable_2]: + SELECT (_timescaledb_functions.show_chunk(show_chunks)).* + FROM show_chunks('hyper') +NOTICE: [db_dist_hypertable_2]: +chunk_id|hypertable_id|schema_name |table_name |relkind|slices +--------+-------------+---------------------+-----------------------+-------+------------------------------------------------------------------------------------------- + 23| 16|_timescaledb_internal|_dist_hyper_17_47_chunk|r |{"time": [1483336800000000, 1483401600000000], "device": [715827882, 1431655764]} + 24| 16|_timescaledb_internal|_dist_hyper_17_50_chunk|r |{"time": [1515801600000000, 1515866400000000], "device": [715827882, 1431655764]} + 25| 16|_timescaledb_internal|_dist_hyper_17_52_chunk|r |{"time": [1485928800000000, 1485993600000000], "device": [-9223372036854775808, 715827882]} +(3 rows) + + +NOTICE: [db_dist_hypertable_3]: + SELECT (_timescaledb_functions.show_chunk(show_chunks)).* + FROM show_chunks('hyper') +NOTICE: [db_dist_hypertable_3]: +chunk_id|hypertable_id|schema_name |table_name |relkind|slices +--------+-------------+---------------------+-----------------------+-------+------------------------------------------------------------------------------------------- + 23| 14|_timescaledb_internal|_dist_hyper_17_48_chunk|r |{"time": [1483401600000000, 1483466400000000], "device": [1431655764, 9223372036854775807]} + 24| 14|_timescaledb_internal|_dist_hyper_17_51_chunk|r |{"time": [1515866400000000, 1515931200000000], "device": [1431655764, 9223372036854775807]} + 25| 14|_timescaledb_internal|_dist_hyper_17_52_chunk|r |{"time": [1485928800000000, 1485993600000000], "device": [-9223372036854775808, 715827882]} +(3 rows) + + + remote_exec +------------- + +(1 row) + +SELECT * FROM set_replication_factor('hyper', 2); +WARNING: hypertable "hyper" is under-replicated + set_replication_factor +------------------------ + +(1 row) + +SELECT * FROM hypertable_partitions WHERE table_name = 'hyper'; + table_name | dimension_id | range_start | data_nodes +------------+--------------+----------------------+--------------------------------------------- + hyper | 29 | -9223372036854775808 | {db_dist_hypertable_1,db_dist_hypertable_2} + hyper | 29 | 715827882 | {db_dist_hypertable_2,db_dist_hypertable_3} + hyper | 29 | 1431655764 | {db_dist_hypertable_3,db_dist_hypertable_1} +(3 rows) + +INSERT INTO hyper VALUES ('2017-03-01 06:01', 1, 15.1); +SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ + SELECT (_timescaledb_functions.show_chunk(show_chunks)).* + FROM show_chunks('hyper'); +$$); +NOTICE: [db_dist_hypertable_1]: + SELECT (_timescaledb_functions.show_chunk(show_chunks)).* + FROM show_chunks('hyper') +NOTICE: [db_dist_hypertable_1]: +chunk_id|hypertable_id|schema_name |table_name |relkind|slices +--------+-------------+---------------------+-----------------------+-------+------------------------------------------------------------------------------------------- + 26| 16|_timescaledb_internal|_dist_hyper_17_45_chunk|r |{"time": [1483272000000000, 1483336800000000], "device": [-9223372036854775808, 715827882]} + 27| 16|_timescaledb_internal|_dist_hyper_17_46_chunk|r |{"time": [1483336800000000, 1483401600000000], "device": [-9223372036854775808, 715827882]} + 28| 16|_timescaledb_internal|_dist_hyper_17_49_chunk|r |{"time": [1515801600000000, 1515866400000000], "device": [-9223372036854775808, 715827882]} + 29| 16|_timescaledb_internal|_dist_hyper_17_52_chunk|r |{"time": [1485928800000000, 1485993600000000], "device": [-9223372036854775808, 715827882]} + 30| 16|_timescaledb_internal|_dist_hyper_17_53_chunk|r |{"time": [1488326400000000, 1488391200000000], "device": [-9223372036854775808, 715827882]} +(5 rows) + + +NOTICE: [db_dist_hypertable_2]: + SELECT (_timescaledb_functions.show_chunk(show_chunks)).* + FROM show_chunks('hyper') +NOTICE: [db_dist_hypertable_2]: +chunk_id|hypertable_id|schema_name |table_name |relkind|slices +--------+-------------+---------------------+-----------------------+-------+------------------------------------------------------------------------------------------- + 23| 16|_timescaledb_internal|_dist_hyper_17_47_chunk|r |{"time": [1483336800000000, 1483401600000000], "device": [715827882, 1431655764]} + 24| 16|_timescaledb_internal|_dist_hyper_17_50_chunk|r |{"time": [1515801600000000, 1515866400000000], "device": [715827882, 1431655764]} + 25| 16|_timescaledb_internal|_dist_hyper_17_52_chunk|r |{"time": [1485928800000000, 1485993600000000], "device": [-9223372036854775808, 715827882]} + 26| 16|_timescaledb_internal|_dist_hyper_17_53_chunk|r |{"time": [1488326400000000, 1488391200000000], "device": [-9223372036854775808, 715827882]} +(4 rows) + + +NOTICE: [db_dist_hypertable_3]: + SELECT (_timescaledb_functions.show_chunk(show_chunks)).* + FROM show_chunks('hyper') +NOTICE: [db_dist_hypertable_3]: +chunk_id|hypertable_id|schema_name |table_name |relkind|slices +--------+-------------+---------------------+-----------------------+-------+------------------------------------------------------------------------------------------- + 23| 14|_timescaledb_internal|_dist_hyper_17_48_chunk|r |{"time": [1483401600000000, 1483466400000000], "device": [1431655764, 9223372036854775807]} + 24| 14|_timescaledb_internal|_dist_hyper_17_51_chunk|r |{"time": [1515866400000000, 1515931200000000], "device": [1431655764, 9223372036854775807]} + 25| 14|_timescaledb_internal|_dist_hyper_17_52_chunk|r |{"time": [1485928800000000, 1485993600000000], "device": [-9223372036854775808, 715827882]} +(3 rows) + + + remote_exec +------------- + +(1 row) + +SELECT * FROM set_replication_factor('hyper', replication_factor => 2); +WARNING: hypertable "hyper" is under-replicated + set_replication_factor +------------------------ + +(1 row) + +INSERT INTO hyper VALUES ('2017-04-01 06:01', 2, 45.1); +SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ + SELECT (_timescaledb_functions.show_chunk(show_chunks)).* + FROM show_chunks('hyper'); +$$); +NOTICE: [db_dist_hypertable_1]: + SELECT (_timescaledb_functions.show_chunk(show_chunks)).* + FROM show_chunks('hyper') +NOTICE: [db_dist_hypertable_1]: +chunk_id|hypertable_id|schema_name |table_name |relkind|slices +--------+-------------+---------------------+-----------------------+-------+------------------------------------------------------------------------------------------- + 26| 16|_timescaledb_internal|_dist_hyper_17_45_chunk|r |{"time": [1483272000000000, 1483336800000000], "device": [-9223372036854775808, 715827882]} + 27| 16|_timescaledb_internal|_dist_hyper_17_46_chunk|r |{"time": [1483336800000000, 1483401600000000], "device": [-9223372036854775808, 715827882]} + 28| 16|_timescaledb_internal|_dist_hyper_17_49_chunk|r |{"time": [1515801600000000, 1515866400000000], "device": [-9223372036854775808, 715827882]} + 29| 16|_timescaledb_internal|_dist_hyper_17_52_chunk|r |{"time": [1485928800000000, 1485993600000000], "device": [-9223372036854775808, 715827882]} + 30| 16|_timescaledb_internal|_dist_hyper_17_53_chunk|r |{"time": [1488326400000000, 1488391200000000], "device": [-9223372036854775808, 715827882]} +(5 rows) + + +NOTICE: [db_dist_hypertable_2]: + SELECT (_timescaledb_functions.show_chunk(show_chunks)).* + FROM show_chunks('hyper') +NOTICE: [db_dist_hypertable_2]: +chunk_id|hypertable_id|schema_name |table_name |relkind|slices +--------+-------------+---------------------+-----------------------+-------+------------------------------------------------------------------------------------------- + 23| 16|_timescaledb_internal|_dist_hyper_17_47_chunk|r |{"time": [1483336800000000, 1483401600000000], "device": [715827882, 1431655764]} + 24| 16|_timescaledb_internal|_dist_hyper_17_50_chunk|r |{"time": [1515801600000000, 1515866400000000], "device": [715827882, 1431655764]} + 25| 16|_timescaledb_internal|_dist_hyper_17_52_chunk|r |{"time": [1485928800000000, 1485993600000000], "device": [-9223372036854775808, 715827882]} + 26| 16|_timescaledb_internal|_dist_hyper_17_53_chunk|r |{"time": [1488326400000000, 1488391200000000], "device": [-9223372036854775808, 715827882]} + 27| 16|_timescaledb_internal|_dist_hyper_17_54_chunk|r |{"time": [1491048000000000, 1491112800000000], "device": [715827882, 1431655764]} +(5 rows) + + +NOTICE: [db_dist_hypertable_3]: + SELECT (_timescaledb_functions.show_chunk(show_chunks)).* + FROM show_chunks('hyper') +NOTICE: [db_dist_hypertable_3]: +chunk_id|hypertable_id|schema_name |table_name |relkind|slices +--------+-------------+---------------------+-----------------------+-------+------------------------------------------------------------------------------------------- + 23| 14|_timescaledb_internal|_dist_hyper_17_48_chunk|r |{"time": [1483401600000000, 1483466400000000], "device": [1431655764, 9223372036854775807]} + 24| 14|_timescaledb_internal|_dist_hyper_17_51_chunk|r |{"time": [1515866400000000, 1515931200000000], "device": [1431655764, 9223372036854775807]} + 25| 14|_timescaledb_internal|_dist_hyper_17_52_chunk|r |{"time": [1485928800000000, 1485993600000000], "device": [-9223372036854775808, 715827882]} + 26| 14|_timescaledb_internal|_dist_hyper_17_54_chunk|r |{"time": [1491048000000000, 1491112800000000], "device": [715827882, 1431655764]} +(4 rows) + + + remote_exec +------------- + +(1 row) + +\set ON_ERROR_STOP 0 +SELECT * FROM set_replication_factor('hyper', replication_factor => 4); +ERROR: replication factor too large for hypertable "hyper" +\set ON_ERROR_STOP 1 +DROP TABLE hyper; +CALL distributed_exec($$ + DROP TABLE devices; +$$); +DROP TABLE devices; +-- Test storage options are distributed to data nodes +-- +-- Make sure that options used during CREATE TABLE WITH and CREATE INDEX WITH +-- are properly distributed. +-- +CREATE TABLE disttable_with_relopts_1(time timestamptz NOT NULL, device int) WITH (fillfactor=10); +CREATE TABLE disttable_with_relopts_2(time timestamptz NOT NULL, device int) WITH (fillfactor=10, parallel_workers=1); +CREATE TABLE disttable_with_relopts_3(time timestamptz NOT NULL, device int); +CREATE INDEX disttable_with_relopts_3_idx ON disttable_with_relopts_3(device) WITH (fillfactor=20); +SELECT * FROM create_distributed_hypertable('disttable_with_relopts_1', 'time'); + hypertable_id | schema_name | table_name | created +---------------+-------------+--------------------------+--------- + 18 | public | disttable_with_relopts_1 | t +(1 row) + +SELECT * FROM create_distributed_hypertable('disttable_with_relopts_2', 'time'); + hypertable_id | schema_name | table_name | created +---------------+-------------+--------------------------+--------- + 19 | public | disttable_with_relopts_2 | t +(1 row) + +SELECT * FROM create_distributed_hypertable('disttable_with_relopts_3', 'time'); + hypertable_id | schema_name | table_name | created +---------------+-------------+--------------------------+--------- + 20 | public | disttable_with_relopts_3 | t +(1 row) + +INSERT INTO disttable_with_relopts_1 VALUES + ('2017-01-01 06:01', 1), + ('2017-01-01 09:11', 3), + ('2017-01-01 08:01', 1), + ('2017-01-02 08:01', 2), + ('2018-07-02 08:01', 87); +INSERT INTO disttable_with_relopts_2 VALUES + ('2017-01-01 06:01', 1), + ('2017-01-01 09:11', 3), + ('2017-01-01 08:01', 1), + ('2017-01-02 08:01', 2), + ('2018-07-02 08:01', 87); +SELECT relname, reloptions FROM pg_class WHERE relname = 'disttable_with_relopts_1' ORDER BY relname; + relname | reloptions +--------------------------+----------------- + disttable_with_relopts_1 | {fillfactor=10} +(1 row) + +SELECT relname, reloptions FROM pg_class WHERE relname = 'disttable_with_relopts_2' ORDER BY relname; + relname | reloptions +--------------------------+------------------------------------ + disttable_with_relopts_2 | {fillfactor=10,parallel_workers=1} +(1 row) + +SELECT relname, reloptions FROM pg_class WHERE relname = 'disttable_with_relopts_3' ORDER BY relname; + relname | reloptions +--------------------------+------------ + disttable_with_relopts_3 | +(1 row) + +SELECT relname, reloptions FROM pg_class WHERE relname = 'disttable_with_relopts_3_idx' ORDER BY relname; + relname | reloptions +------------------------------+----------------- + disttable_with_relopts_3_idx | {fillfactor=20} +(1 row) + +-- Ensure reloptions are not set for distributed hypertable chunks on the AN +SELECT relname, reloptions FROM pg_class WHERE relname IN +(SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name FROM show_chunks('disttable_with_relopts_1')) +ORDER BY relname; + relname | reloptions +-------------------------+------------ + _dist_hyper_18_55_chunk | + _dist_hyper_18_56_chunk | +(2 rows) + +SELECT relname, reloptions FROM pg_class WHERE relname IN +(SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name FROM show_chunks('disttable_with_relopts_2')) +ORDER BY relname; + relname | reloptions +-------------------------+------------ + _dist_hyper_19_57_chunk | + _dist_hyper_19_58_chunk | +(2 rows) + +-- Ensure parent tables has proper storage options +SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ + SELECT relname, reloptions FROM pg_class WHERE relname = 'disttable_with_relopts_1' ORDER BY relname; +$$); +NOTICE: [db_dist_hypertable_1]: + SELECT relname, reloptions FROM pg_class WHERE relname = 'disttable_with_relopts_1' ORDER BY relname +NOTICE: [db_dist_hypertable_1]: +relname |reloptions +------------------------+--------------- +disttable_with_relopts_1|{fillfactor=10} +(1 row) + + +NOTICE: [db_dist_hypertable_2]: + SELECT relname, reloptions FROM pg_class WHERE relname = 'disttable_with_relopts_1' ORDER BY relname +NOTICE: [db_dist_hypertable_2]: +relname |reloptions +------------------------+--------------- +disttable_with_relopts_1|{fillfactor=10} +(1 row) + + +NOTICE: [db_dist_hypertable_3]: + SELECT relname, reloptions FROM pg_class WHERE relname = 'disttable_with_relopts_1' ORDER BY relname +NOTICE: [db_dist_hypertable_3]: +relname |reloptions +------------------------+--------------- +disttable_with_relopts_1|{fillfactor=10} +(1 row) + + + remote_exec +------------- + +(1 row) + +SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ + SELECT relname, reloptions FROM pg_class WHERE relname = 'disttable_with_relopts_2' ORDER BY relname; +$$); +NOTICE: [db_dist_hypertable_1]: + SELECT relname, reloptions FROM pg_class WHERE relname = 'disttable_with_relopts_2' ORDER BY relname +NOTICE: [db_dist_hypertable_1]: +relname |reloptions +------------------------+---------------------------------- +disttable_with_relopts_2|{fillfactor=10,parallel_workers=1} +(1 row) + + +NOTICE: [db_dist_hypertable_2]: + SELECT relname, reloptions FROM pg_class WHERE relname = 'disttable_with_relopts_2' ORDER BY relname +NOTICE: [db_dist_hypertable_2]: +relname |reloptions +------------------------+---------------------------------- +disttable_with_relopts_2|{fillfactor=10,parallel_workers=1} +(1 row) + + +NOTICE: [db_dist_hypertable_3]: + SELECT relname, reloptions FROM pg_class WHERE relname = 'disttable_with_relopts_2' ORDER BY relname +NOTICE: [db_dist_hypertable_3]: +relname |reloptions +------------------------+---------------------------------- +disttable_with_relopts_2|{fillfactor=10,parallel_workers=1} +(1 row) + + + remote_exec +------------- + +(1 row) + +-- Ensure index has proper storage options set +SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ + SELECT relname, reloptions FROM pg_class WHERE relname = 'disttable_with_relopts_3_idx' ORDER BY relname; +$$); +NOTICE: [db_dist_hypertable_1]: + SELECT relname, reloptions FROM pg_class WHERE relname = 'disttable_with_relopts_3_idx' ORDER BY relname +NOTICE: [db_dist_hypertable_1]: +relname |reloptions +----------------------------+--------------- +disttable_with_relopts_3_idx|{fillfactor=20} +(1 row) + + +NOTICE: [db_dist_hypertable_2]: + SELECT relname, reloptions FROM pg_class WHERE relname = 'disttable_with_relopts_3_idx' ORDER BY relname +NOTICE: [db_dist_hypertable_2]: +relname |reloptions +----------------------------+--------------- +disttable_with_relopts_3_idx|{fillfactor=20} +(1 row) + + +NOTICE: [db_dist_hypertable_3]: + SELECT relname, reloptions FROM pg_class WHERE relname = 'disttable_with_relopts_3_idx' ORDER BY relname +NOTICE: [db_dist_hypertable_3]: +relname |reloptions +----------------------------+--------------- +disttable_with_relopts_3_idx|{fillfactor=20} +(1 row) + + + remote_exec +------------- + +(1 row) + +-- Make sure chunks derive parent reloptions +SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ + SELECT relname, reloptions FROM pg_class WHERE relname IN + (SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name FROM show_chunks('disttable_with_relopts_1')) + ORDER BY relname; +$$); +NOTICE: [db_dist_hypertable_1]: + SELECT relname, reloptions FROM pg_class WHERE relname IN + (SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name FROM show_chunks('disttable_with_relopts_1')) + ORDER BY relname +NOTICE: [db_dist_hypertable_1]: +relname |reloptions +-----------------------+--------------- +_dist_hyper_18_55_chunk|{fillfactor=10} +(1 row) + + +NOTICE: [db_dist_hypertable_2]: + SELECT relname, reloptions FROM pg_class WHERE relname IN + (SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name FROM show_chunks('disttable_with_relopts_1')) + ORDER BY relname +NOTICE: [db_dist_hypertable_2]: +relname |reloptions +-----------------------+--------------- +_dist_hyper_18_56_chunk|{fillfactor=10} +(1 row) + + +NOTICE: [db_dist_hypertable_3]: + SELECT relname, reloptions FROM pg_class WHERE relname IN + (SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name FROM show_chunks('disttable_with_relopts_1')) + ORDER BY relname +NOTICE: [db_dist_hypertable_3]: +relname|reloptions +-------+---------- +(0 rows) + + + remote_exec +------------- + +(1 row) + +SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ + SELECT relname, reloptions FROM pg_class WHERE relname IN + (SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name FROM show_chunks('disttable_with_relopts_2')) + ORDER BY relname; +$$); +NOTICE: [db_dist_hypertable_1]: + SELECT relname, reloptions FROM pg_class WHERE relname IN + (SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name FROM show_chunks('disttable_with_relopts_2')) + ORDER BY relname +NOTICE: [db_dist_hypertable_1]: +relname|reloptions +-------+---------- +(0 rows) + + +NOTICE: [db_dist_hypertable_2]: + SELECT relname, reloptions FROM pg_class WHERE relname IN + (SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name FROM show_chunks('disttable_with_relopts_2')) + ORDER BY relname +NOTICE: [db_dist_hypertable_2]: +relname |reloptions +-----------------------+---------------------------------- +_dist_hyper_19_57_chunk|{fillfactor=10,parallel_workers=1} +(1 row) + + +NOTICE: [db_dist_hypertable_3]: + SELECT relname, reloptions FROM pg_class WHERE relname IN + (SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name FROM show_chunks('disttable_with_relopts_2')) + ORDER BY relname +NOTICE: [db_dist_hypertable_3]: +relname |reloptions +-----------------------+---------------------------------- +_dist_hyper_19_58_chunk|{fillfactor=10,parallel_workers=1} +(1 row) + + + remote_exec +------------- + +(1 row) + +-- ALTER TABLE SET/RESET support for distributed hypertable +-- +-- SET +ALTER TABLE disttable_with_relopts_1 SET (fillfactor=40); +SELECT relname, reloptions FROM pg_class WHERE relname = 'disttable_with_relopts_1' ORDER BY relname; + relname | reloptions +--------------------------+----------------- + disttable_with_relopts_1 | {fillfactor=40} +(1 row) + +-- Ensure chunks are not affected on the AN +SELECT relname, reloptions FROM pg_class WHERE relname IN +(SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name FROM show_chunks('disttable_with_relopts_1')) +ORDER BY relname; + relname | reloptions +-------------------------+------------ + _dist_hyper_18_55_chunk | + _dist_hyper_18_56_chunk | +(2 rows) + +-- Ensure data node chunks has proper options set +SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ + SELECT relname, reloptions FROM pg_class WHERE relname IN + (SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name FROM show_chunks('disttable_with_relopts_1')) + ORDER BY relname; +$$); +NOTICE: [db_dist_hypertable_1]: + SELECT relname, reloptions FROM pg_class WHERE relname IN + (SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name FROM show_chunks('disttable_with_relopts_1')) + ORDER BY relname +NOTICE: [db_dist_hypertable_1]: +relname |reloptions +-----------------------+--------------- +_dist_hyper_18_55_chunk|{fillfactor=40} +(1 row) + + +NOTICE: [db_dist_hypertable_2]: + SELECT relname, reloptions FROM pg_class WHERE relname IN + (SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name FROM show_chunks('disttable_with_relopts_1')) + ORDER BY relname +NOTICE: [db_dist_hypertable_2]: +relname |reloptions +-----------------------+--------------- +_dist_hyper_18_56_chunk|{fillfactor=40} +(1 row) + + +NOTICE: [db_dist_hypertable_3]: + SELECT relname, reloptions FROM pg_class WHERE relname IN + (SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name FROM show_chunks('disttable_with_relopts_1')) + ORDER BY relname +NOTICE: [db_dist_hypertable_3]: +relname|reloptions +-------+---------- +(0 rows) + + + remote_exec +------------- + +(1 row) + +-- RESET +ALTER TABLE disttable_with_relopts_1 RESET (fillfactor); +SELECT relname, reloptions FROM pg_class WHERE relname = 'disttable_with_relopts_1' ORDER BY relname; + relname | reloptions +--------------------------+------------ + disttable_with_relopts_1 | +(1 row) + +-- Ensure chunks are not affected on the AN +SELECT relname, reloptions FROM pg_class WHERE relname IN +(SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name FROM show_chunks('disttable_with_relopts_1')) +ORDER BY relname; + relname | reloptions +-------------------------+------------ + _dist_hyper_18_55_chunk | + _dist_hyper_18_56_chunk | +(2 rows) + +-- Ensure data node chunks has proper options set +SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ + SELECT relname, reloptions FROM pg_class WHERE relname IN + (SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name FROM show_chunks('disttable_with_relopts_1')) + ORDER BY relname; +$$); +NOTICE: [db_dist_hypertable_1]: + SELECT relname, reloptions FROM pg_class WHERE relname IN + (SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name FROM show_chunks('disttable_with_relopts_1')) + ORDER BY relname +NOTICE: [db_dist_hypertable_1]: +relname |reloptions +-----------------------+---------- +_dist_hyper_18_55_chunk| +(1 row) + + +NOTICE: [db_dist_hypertable_2]: + SELECT relname, reloptions FROM pg_class WHERE relname IN + (SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name FROM show_chunks('disttable_with_relopts_1')) + ORDER BY relname +NOTICE: [db_dist_hypertable_2]: +relname |reloptions +-----------------------+---------- +_dist_hyper_18_56_chunk| +(1 row) + + +NOTICE: [db_dist_hypertable_3]: + SELECT relname, reloptions FROM pg_class WHERE relname IN + (SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name FROM show_chunks('disttable_with_relopts_1')) + ORDER BY relname +NOTICE: [db_dist_hypertable_3]: +relname|reloptions +-------+---------- +(0 rows) + + + remote_exec +------------- + +(1 row) + +DROP TABLE disttable_with_relopts_1; +DROP TABLE disttable_with_relopts_2; +DROP TABLE disttable_with_relopts_3; +-- Test SERIAL type column support for distributed hypertables +-- +CREATE TABLE disttable_serial(time timestamptz NOT NULL, device int, id1 SERIAL, id2 SMALLSERIAL, id3 BIGSERIAL); +SELECT create_distributed_hypertable('disttable_serial', 'time', 'device'); + create_distributed_hypertable +-------------------------------- + (21,public,disttable_serial,t) +(1 row) + +-- Show created columns (AN and DN tables must be exact) +SELECT * FROM test.show_columns('disttable_serial'); + Column | Type | NotNull +--------+--------------------------+--------- + time | timestamp with time zone | t + device | integer | f + id1 | integer | t + id2 | smallint | t + id3 | bigint | t +(5 rows) + +SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ + SELECT * FROM test.show_columns('disttable_serial'); +$$); +NOTICE: [db_dist_hypertable_1]: + SELECT * FROM test.show_columns('disttable_serial') +NOTICE: [db_dist_hypertable_1]: +Column|Type |NotNull +------+------------------------+------- +time |timestamp with time zone|t +device|integer |f +id1 |integer |t +id2 |smallint |t +id3 |bigint |t +(5 rows) + + +NOTICE: [db_dist_hypertable_2]: + SELECT * FROM test.show_columns('disttable_serial') +NOTICE: [db_dist_hypertable_2]: +Column|Type |NotNull +------+------------------------+------- +time |timestamp with time zone|t +device|integer |f +id1 |integer |t +id2 |smallint |t +id3 |bigint |t +(5 rows) + + +NOTICE: [db_dist_hypertable_3]: + SELECT * FROM test.show_columns('disttable_serial') +NOTICE: [db_dist_hypertable_3]: +Column|Type |NotNull +------+------------------------+------- +time |timestamp with time zone|t +device|integer |f +id1 |integer |t +id2 |smallint |t +id3 |bigint |t +(5 rows) + + + remote_exec +------------- + +(1 row) + +-- Ensure DEFAULT expression is applied on the AN only +SELECT column_name, column_default +FROM information_schema.columns +WHERE table_name = 'disttable_serial'; + column_name | column_default +-------------+----------------------------------------------- + time | + device | + id1 | nextval('disttable_serial_id1_seq'::regclass) + id2 | nextval('disttable_serial_id2_seq'::regclass) + id3 | nextval('disttable_serial_id3_seq'::regclass) +(5 rows) + +SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ + SELECT column_name, column_default + FROM information_schema.columns + WHERE table_name = 'disttable_serial'; +$$); +NOTICE: [db_dist_hypertable_1]: + SELECT column_name, column_default + FROM information_schema.columns + WHERE table_name = 'disttable_serial' +NOTICE: [db_dist_hypertable_1]: +column_name|column_default +-----------+-------------- +time | +device | +id1 | +id2 | +id3 | +(5 rows) + + +NOTICE: [db_dist_hypertable_2]: + SELECT column_name, column_default + FROM information_schema.columns + WHERE table_name = 'disttable_serial' +NOTICE: [db_dist_hypertable_2]: +column_name|column_default +-----------+-------------- +time | +device | +id1 | +id2 | +id3 | +(5 rows) + + +NOTICE: [db_dist_hypertable_3]: + SELECT column_name, column_default + FROM information_schema.columns + WHERE table_name = 'disttable_serial' +NOTICE: [db_dist_hypertable_3]: +column_name|column_default +-----------+-------------- +time | +device | +id1 | +id2 | +id3 | +(5 rows) + + + remote_exec +------------- + +(1 row) + +-- Ensure sequences were created on the AN only +INSERT INTO disttable_serial VALUES + ('2017-01-01 06:01', 1), + ('2017-01-01 09:11', 3), + ('2017-01-01 08:01', 1), + ('2017-01-02 08:01', 2), + ('2018-07-02 08:01', 87); +SELECT currval('disttable_serial_id1_seq'::regclass), + currval('disttable_serial_id2_seq'::regclass), + currval('disttable_serial_id3_seq'::regclass); + currval | currval | currval +---------+---------+--------- + 5 | 5 | 5 +(1 row) + +\set ON_ERROR_STOP 0 +SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1'], $$ + SELECT currval('disttable_serial_id1_seq'::regclass); +$$); +NOTICE: [db_dist_hypertable_1]: + SELECT currval('disttable_serial_id1_seq'::regclass) +ERROR: [db_dist_hypertable_1]: relation "disttable_serial_id1_seq" does not exist +\set ON_ERROR_STOP 1 +-- Verify that the data is getting spread over multiple data nodes with the +-- serial values being set correctly +SELECT * from disttable_serial ORDER BY id1; + time | device | id1 | id2 | id3 +------------------------------+--------+-----+-----+----- + Sun Jan 01 06:01:00 2017 PST | 1 | 1 | 1 | 1 + Sun Jan 01 09:11:00 2017 PST | 3 | 2 | 2 | 2 + Sun Jan 01 08:01:00 2017 PST | 1 | 3 | 3 | 3 + Mon Jan 02 08:01:00 2017 PST | 2 | 4 | 4 | 4 + Mon Jul 02 08:01:00 2018 PDT | 87 | 5 | 5 | 5 +(5 rows) + +SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ + SELECT * from disttable_serial ORDER BY id1; +$$); +NOTICE: [db_dist_hypertable_1]: + SELECT * from disttable_serial ORDER BY id1 +NOTICE: [db_dist_hypertable_1]: +time |device|id1|id2|id3 +----------------------------+------+---+---+--- +Sun Jan 01 06:01:00 2017 PST| 1| 1| 1| 1 +Sun Jan 01 08:01:00 2017 PST| 1| 3| 3| 3 +Mon Jul 02 08:01:00 2018 PDT| 87| 5| 5| 5 +(3 rows) + + +NOTICE: [db_dist_hypertable_2]: + SELECT * from disttable_serial ORDER BY id1 +NOTICE: [db_dist_hypertable_2]: +time |device|id1|id2|id3 +----------------------------+------+---+---+--- +Mon Jan 02 08:01:00 2017 PST| 2| 4| 4| 4 +(1 row) + + +NOTICE: [db_dist_hypertable_3]: + SELECT * from disttable_serial ORDER BY id1 +NOTICE: [db_dist_hypertable_3]: +time |device|id1|id2|id3 +----------------------------+------+---+---+--- +Sun Jan 01 09:11:00 2017 PST| 3| 2| 2| 2 +(1 row) + + + remote_exec +------------- + +(1 row) + +DROP TABLE disttable_serial; +-- Test insert batching case which will hit the limit of arguments for +-- prepared statements (65k). +-- +-- Issue: #1702 +-- distributed hypertable insert fails when # of columns are more than 65 +-- +-- Use default value +SET timescaledb.max_insert_batch_size TO 1000; +CREATE TABLE test_1702 ( + id varchar(100) NOT NULL, + time timestamp NOT NULL, + dummy1 int , + dummy2 int , + dummy4 int , + dummy5 int , + dummy6 int , + dummy7 int , + dummy8 int , + dummy9 int , + dummy10 int , + dummy11 int , + dummy12 int , + dummy13 int , + dummy14 int , + dummy15 int , + dummy16 int , + dummy17 int , + dummy18 int , + dummy19 int , + dummy20 int , + dummy21 int , + dummy22 int , + dummy23 int , + dummy24 int , + dummy25 int , + dummy26 int , + dummy27 int , + dummy28 int , + dummy29 int , + dummy30 int , + dummy31 int , + dummy32 int , + dummy33 int , + dummy34 int , + dummy35 int , + dummy36 int , + dummy37 int , + dummy38 int , + dummy39 int , + dummy40 int , + dummy41 int , + dummy42 int , + dummy43 int , + dummy44 int , + dummy45 int , + dummy46 int , + dummy47 int , + dummy48 int , + dummy49 int , + dummy50 int , + dummy51 int , + dummy52 int , + dummy53 int , + dummy54 int , + dummy55 int , + dummy56 int , + dummy57 int , + dummy58 int , + dummy59 int , + dummy60 int , + dummy61 int , + dummy62 int , + dummy63 int , + dummy64 int , + dummy65 int , + dummy66 int , + dummy67 int , + dummy68 int , + dummy69 int , + dummy70 int , + dummy71 int +); +SELECT create_distributed_hypertable('test_1702', 'time', 'id'); +WARNING: column type "character varying" used for "id" does not follow best practices +WARNING: column type "timestamp without time zone" used for "time" does not follow best practices + create_distributed_hypertable +------------------------------- + (22,public,test_1702,t) +(1 row) + +-- Original issue case +-- +-- Expect batch size to be lower than defined max_insert_batch_size +-- +EXPLAIN (COSTS OFF) INSERT INTO test_1702(id, time) VALUES('1', current_timestamp); + QUERY PLAN +----------------------------------------------- + Custom Scan (HypertableModify) + Insert on distributed hypertable test_1702 + -> Insert on test_1702 + -> Custom Scan (DataNodeDispatch) + Batch size: 910 + -> Custom Scan (ChunkDispatch) + -> Result +(7 rows) + +INSERT INTO test_1702(id, time) VALUES('1', current_timestamp); +EXPLAIN (COSTS OFF) INSERT INTO test_1702(id, time) SELECT generate_series(2, 1500), current_timestamp; + QUERY PLAN +----------------------------------------------------- + Custom Scan (HypertableModify) + Insert on distributed hypertable test_1702 + -> Insert on test_1702 + -> Custom Scan (DataNodeDispatch) + Batch size: 910 + -> Custom Scan (ChunkDispatch) + -> Subquery Scan on "*SELECT*" + -> ProjectSet + -> Result +(9 rows) + +INSERT INTO test_1702(id, time) SELECT generate_series(2, 1500), current_timestamp; +SELECT count(*) from test_1702; + count +------- + 1500 +(1 row) + +DROP TABLE test_1702; +-- +-- Expect batch size to be similair to max_insert_batch_size +-- +CREATE TABLE test_1702 ( + id varchar(100) NOT NULL, + time timestamp NOT NULL, + dummy1 int , + dummy2 int , + dummy4 int , + dummy5 int + ); +SELECT create_distributed_hypertable('test_1702', 'time', 'id'); +WARNING: column type "character varying" used for "id" does not follow best practices +WARNING: column type "timestamp without time zone" used for "time" does not follow best practices + create_distributed_hypertable +------------------------------- + (23,public,test_1702,t) +(1 row) + +EXPLAIN (COSTS OFF) INSERT INTO test_1702(id, time) VALUES('1', current_timestamp); + QUERY PLAN +----------------------------------------------- + Custom Scan (HypertableModify) + Insert on distributed hypertable test_1702 + -> Insert on test_1702 + -> Custom Scan (DataNodeDispatch) + Batch size: 1000 + -> Custom Scan (ChunkDispatch) + -> Result +(7 rows) + +DROP TABLE test_1702; +-- +-- Test that creating a hypertable with a space dimension and +-- if_not_exists works as expected, that is, the second call does not +-- generate an error (and does not crash). +-- +CREATE TABLE whatever ( + timestamp TIMESTAMPTZ NOT NULL, + user_id INT NOT NULL, + data JSONB +); +SELECT * FROM create_distributed_hypertable('whatever', 'timestamp', 'user_id', + if_not_exists => true, chunk_time_interval => INTERVAL '1 day'); + hypertable_id | schema_name | table_name | created +---------------+-------------+------------+--------- + 24 | public | whatever | t +(1 row) + +-- Check the hypertable sequence before and after call to ensure that +-- the hypertable sequence was not increased with the second call. +SELECT last_value FROM _timescaledb_catalog.hypertable_id_seq; + last_value +------------ + 24 +(1 row) + +SELECT * FROM create_distributed_hypertable('whatever', 'timestamp', 'user_id', + if_not_exists => true, chunk_time_interval => INTERVAL '1 day'); +NOTICE: table "whatever" is already a hypertable, skipping + hypertable_id | schema_name | table_name | created +---------------+-------------+------------+--------- + 24 | public | whatever | f +(1 row) + +SELECT last_value FROM _timescaledb_catalog.hypertable_id_seq; + last_value +------------ + 24 +(1 row) + +-- Test that creating a distributed hypertable from a table with data +-- fails, and that migrate_data blocked. +CREATE TABLE dist_hypertable_1 ( + time TIMESTAMPTZ NOT NULL, + device INTEGER, + temp FLOAT +); +INSERT INTO dist_hypertable_1 VALUES + ('2017-01-01 06:01', 1), + ('2017-01-01 09:11', 3), + ('2017-01-01 08:01', 1), + ('2017-01-02 08:01', 2), + ('2018-07-02 08:01', 87); +\set ON_ERROR_STOP 0 +SELECT * FROM create_distributed_hypertable('dist_hypertable_1', 'time', 'device', 3, + migrate_data => FALSE); +ERROR: table "dist_hypertable_1" is not empty +SELECT * FROM create_distributed_hypertable('dist_hypertable_1', 'time', 'device', 3, + migrate_data => TRUE); +ERROR: cannot migrate data for distributed hypertable +\set ON_ERROR_STOP 1 +-- Test creating index with transaction per chunk on a distributed hypertable +-- +DROP TABLE disttable; +CREATE TABLE disttable( + time timestamptz NOT NULL, + device int, + value float +); +SELECT * FROM create_distributed_hypertable('disttable', 'time', 'device', 3); + hypertable_id | schema_name | table_name | created +---------------+-------------+------------+--------- + 25 | public | disttable | t +(1 row) + +INSERT INTO disttable VALUES + ('2017-01-01 06:01', 1, 1.2), + ('2017-01-01 09:11', 3, 4.3), + ('2017-01-01 08:01', 1, 7.3), + ('2017-01-02 08:01', 2, 0.23), + ('2018-07-02 08:01', 87, 0.0), + ('2018-07-01 06:01', 13, 3.1), + ('2018-07-01 09:11', 90, 10303.12), + ('2018-07-01 08:01', 29, 64); +\set ON_ERROR_STOP 0 +CREATE INDEX disttable_time_device_idx ON disttable (time, device) WITH (timescaledb.transaction_per_chunk); +ERROR: cannot use timescaledb.transaction_per_chunk with distributed hypertable +\set ON_ERROR_STOP 1 +-- Test using system columns with distributed hypertable +-- +CREATE TABLE dist_syscol(time timestamptz NOT NULL, color int, temp float); +SELECT * FROM create_distributed_hypertable('dist_syscol', 'time', 'color'); + hypertable_id | schema_name | table_name | created +---------------+-------------+-------------+--------- + 26 | public | dist_syscol | t +(1 row) + +INSERT INTO dist_syscol VALUES + ('2017-02-01 06:01', 1, 1.1), + ('2017-02-01 08:01', 1, 1.2), + ('2018-02-02 08:01', 2, 1.3), + ('2019-02-01 09:11', 3, 2.1), + ('2019-02-02 09:11', 3, 2.1), + ('2019-02-02 10:01', 5, 1.2), + ('2019-02-03 11:11', 6, 3.5), + ('2019-02-04 08:21', 4, 6.6), + ('2019-02-04 10:11', 7, 7.4), + ('2019-02-04 12:11', 8, 2.1), + ('2019-02-05 13:31', 8, 6.3), + ('2019-02-06 02:11', 5, 1.8), + ('2019-02-06 01:13', 7, 7.9), + ('2019-02-06 19:24', 9, 5.9), + ('2019-02-07 18:44', 5, 9.7), + ('2019-02-07 20:24', 6, NULL), + ('2019-02-07 09:33', 7, 9.5), + ('2019-02-08 08:54', 1, 7.3), + ('2019-02-08 18:14', 4, 8.2), + ('2019-02-09 19:23', 8, 9.1); +-- Return chunk table as a source +SET timescaledb.enable_per_data_node_queries = false; +SELECT tableoid::regclass, * FROM dist_syscol; + tableoid | time | color | temp +-----------------------------------------------+------------------------------+-------+------ + _timescaledb_internal._dist_hyper_26_72_chunk | Wed Feb 01 06:01:00 2017 PST | 1 | 1.1 + _timescaledb_internal._dist_hyper_26_72_chunk | Wed Feb 01 08:01:00 2017 PST | 1 | 1.2 + _timescaledb_internal._dist_hyper_26_73_chunk | Fri Feb 02 08:01:00 2018 PST | 2 | 1.3 + _timescaledb_internal._dist_hyper_26_74_chunk | Fri Feb 01 09:11:00 2019 PST | 3 | 2.1 + _timescaledb_internal._dist_hyper_26_74_chunk | Sat Feb 02 09:11:00 2019 PST | 3 | 2.1 + _timescaledb_internal._dist_hyper_26_75_chunk | Sat Feb 02 10:01:00 2019 PST | 5 | 1.2 + _timescaledb_internal._dist_hyper_26_75_chunk | Mon Feb 04 08:21:00 2019 PST | 4 | 6.6 + _timescaledb_internal._dist_hyper_26_75_chunk | Mon Feb 04 10:11:00 2019 PST | 7 | 7.4 + _timescaledb_internal._dist_hyper_26_75_chunk | Wed Feb 06 02:11:00 2019 PST | 5 | 1.8 + _timescaledb_internal._dist_hyper_26_75_chunk | Wed Feb 06 01:13:00 2019 PST | 7 | 7.9 + _timescaledb_internal._dist_hyper_26_76_chunk | Sun Feb 03 11:11:00 2019 PST | 6 | 3.5 + _timescaledb_internal._dist_hyper_26_76_chunk | Mon Feb 04 12:11:00 2019 PST | 8 | 2.1 + _timescaledb_internal._dist_hyper_26_76_chunk | Tue Feb 05 13:31:00 2019 PST | 8 | 6.3 + _timescaledb_internal._dist_hyper_26_77_chunk | Wed Feb 06 19:24:00 2019 PST | 9 | 5.9 + _timescaledb_internal._dist_hyper_26_78_chunk | Thu Feb 07 18:44:00 2019 PST | 5 | 9.7 + _timescaledb_internal._dist_hyper_26_78_chunk | Thu Feb 07 09:33:00 2019 PST | 7 | 9.5 + _timescaledb_internal._dist_hyper_26_78_chunk | Fri Feb 08 18:14:00 2019 PST | 4 | 8.2 + _timescaledb_internal._dist_hyper_26_79_chunk | Thu Feb 07 20:24:00 2019 PST | 6 | + _timescaledb_internal._dist_hyper_26_79_chunk | Fri Feb 08 08:54:00 2019 PST | 1 | 7.3 + _timescaledb_internal._dist_hyper_26_79_chunk | Sat Feb 09 19:23:00 2019 PST | 8 | 9.1 +(20 rows) + +-- Produce an error +SET timescaledb.enable_per_data_node_queries = true; +\set ON_ERROR_STOP 0 +SELECT tableoid::regclass, * FROM dist_syscol; +ERROR: system columns are not accessible on distributed hypertables with current settings +\set ON_ERROR_STOP 1 +----------------------- +-- Test DataNodeCopy -- +----------------------- +SET timescaledb.enable_distributed_insert_with_copy=true; +DROP TABLE disttable; +-- Add serial (autoincrement) and DEFAULT value columns to test that +-- these work with DataNodeCopy +CREATE TABLE disttable( + id serial, + time timestamptz NOT NULL, + device int DEFAULT 100, + temp_c float +); +SELECT * FROM create_distributed_hypertable('disttable', 'time', 'device'); + hypertable_id | schema_name | table_name | created +---------------+-------------+------------+--------- + 27 | public | disttable | t +(1 row) + +-- Create a datatable to source data from. Add array of composite data +-- type to test switching to text mode below. Arrays include the type +-- Oid when serialized in binary format. Since the Oid of a +-- user-created type can differ across data nodes, such serialization +-- is not safe. +CREATE TABLE datatable (LIKE disttable); +CREATE TYPE highlow AS (high int, low int); +CALL distributed_exec($$ CREATE TYPE highlow AS (high int, low int) $$); +ALTER TABLE datatable ADD COLUMN minmaxes highlow[]; +INSERT INTO datatable (id, time, device, temp_c, minmaxes) VALUES + (1, '2017-01-01 06:01', 1, 1.2, ARRAY[(1,2)::highlow]), + (2, '2017-01-01 09:11', 3, 4.3, ARRAY[(2,3)::highlow]), + (3, '2017-01-01 08:01', 1, 7.3, ARRAY[(4,5)::highlow]), + (4, '2017-01-02 08:01', 2, 0.23, ARRAY[(6,7)::highlow]), + (5, '2018-07-02 08:01', 87, 0.0, ARRAY[(8,9)::highlow]), + (6, '2018-07-01 06:01', 13, 3.1, ARRAY[(10,11)::highlow]), + (7, '2018-07-01 09:11', 90, 10303.12, ARRAY[(12,13)::highlow]), + (8, '2018-07-01 08:01', 29, 64, ARRAY[(14,15)::highlow]); +-- Show that DataNodeCopy is used instead of DataNodeDispatch. Should +-- default to FORMAT binary in the remote SQL. Add RETURNING to show +-- that it works. +EXPLAIN VERBOSE +INSERT INTO disttable (time, device, temp_c) +SELECT time, device, temp_c FROM datatable +RETURNING *; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (cost=0.00..24.55 rows=970 width=24) + Output: disttable.id, disttable."time", disttable.device, disttable.temp_c + Insert on distributed hypertable public.disttable + Data nodes: db_dist_hypertable_1, db_dist_hypertable_2, db_dist_hypertable_3 + -> Insert on public.disttable (cost=0.00..24.55 rows=970 width=24) + Output: disttable.id, disttable."time", disttable.device, disttable.temp_c + -> Custom Scan (DataNodeCopy) (cost=0.00..24.55 rows=970 width=24) + Output: ((nextval('disttable_id_seq'::regclass))::integer), datatable."time", datatable.device, datatable.temp_c + Remote SQL: COPY public.disttable (id, "time", device, temp_c) FROM STDIN WITH (FORMAT binary) + -> Custom Scan (ChunkDispatch) (cost=0.00..24.55 rows=970 width=24) + Output: ((nextval('disttable_id_seq'::regclass))::integer), datatable."time", datatable.device, datatable.temp_c + -> Seq Scan on public.datatable (cost=0.00..24.55 rows=970 width=24) + Output: nextval('disttable_id_seq'::regclass), datatable."time", datatable.device, datatable.temp_c +(13 rows) + +-- Perform the actual insert +INSERT INTO disttable (time, device, temp_c) +SELECT time, device, temp_c FROM datatable +RETURNING *; + id | time | device | temp_c +----+------------------------------+--------+---------- + 1 | Sun Jan 01 06:01:00 2017 PST | 1 | 1.2 + 2 | Sun Jan 01 09:11:00 2017 PST | 3 | 4.3 + 3 | Sun Jan 01 08:01:00 2017 PST | 1 | 7.3 + 4 | Mon Jan 02 08:01:00 2017 PST | 2 | 0.23 + 5 | Mon Jul 02 08:01:00 2018 PDT | 87 | 0 + 6 | Sun Jul 01 06:01:00 2018 PDT | 13 | 3.1 + 7 | Sun Jul 01 09:11:00 2018 PDT | 90 | 10303.12 + 8 | Sun Jul 01 08:01:00 2018 PDT | 29 | 64 +(8 rows) + +-- Show that the data was added: +SELECT * FROM disttable ORDER BY 1; + id | time | device | temp_c +----+------------------------------+--------+---------- + 1 | Sun Jan 01 06:01:00 2017 PST | 1 | 1.2 + 2 | Sun Jan 01 09:11:00 2017 PST | 3 | 4.3 + 3 | Sun Jan 01 08:01:00 2017 PST | 1 | 7.3 + 4 | Mon Jan 02 08:01:00 2017 PST | 2 | 0.23 + 5 | Mon Jul 02 08:01:00 2018 PDT | 87 | 0 + 6 | Sun Jul 01 06:01:00 2018 PDT | 13 | 3.1 + 7 | Sun Jul 01 09:11:00 2018 PDT | 90 | 10303.12 + 8 | Sun Jul 01 08:01:00 2018 PDT | 29 | 64 +(8 rows) + +SELECT count(*) FROM disttable; + count +------- + 8 +(1 row) + +-- Add an array of a composite type to check that DataNodeCopy +-- switches to text format if we use a table with an array of a custom +-- type. There should be no "FORMAT binary" in the remote explain. +ALTER TABLE disttable ADD COLUMN minmaxes highlow[]; +EXPLAIN VERBOSE +INSERT INTO disttable (time, device, temp_c, minmaxes) +SELECT time, device, temp_c, minmaxes FROM datatable; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (cost=0.00..24.55 rows=970 width=56) + Insert on distributed hypertable public.disttable + Data nodes: db_dist_hypertable_1, db_dist_hypertable_2, db_dist_hypertable_3 + -> Insert on public.disttable (cost=0.00..24.55 rows=970 width=56) + -> Custom Scan (DataNodeCopy) (cost=0.00..24.55 rows=970 width=56) + Output: ((nextval('disttable_id_seq'::regclass))::integer), datatable."time", datatable.device, datatable.temp_c, datatable.minmaxes + Remote SQL: COPY public.disttable (id, "time", device, temp_c, minmaxes) FROM STDIN + -> Custom Scan (ChunkDispatch) (cost=0.00..24.55 rows=970 width=56) + Output: ((nextval('disttable_id_seq'::regclass))::integer), datatable."time", datatable.device, datatable.temp_c, datatable.minmaxes + -> Seq Scan on public.datatable (cost=0.00..24.55 rows=970 width=56) + Output: nextval('disttable_id_seq'::regclass), datatable."time", datatable.device, datatable.temp_c, datatable.minmaxes +(11 rows) + +INSERT INTO disttable (time, device, temp_c, minmaxes) +SELECT time, device, temp_c, minmaxes FROM datatable; +-- Should have double amount of rows compared to before and half of +-- them values in the new column. Note, must use TEXT format on the +-- connection to make query work with custom type array. +SET timescaledb.enable_connection_binary_data=false; +SELECT * FROM disttable ORDER BY 1; + id | time | device | temp_c | minmaxes +----+------------------------------+--------+----------+------------- + 1 | Sun Jan 01 06:01:00 2017 PST | 1 | 1.2 | + 2 | Sun Jan 01 09:11:00 2017 PST | 3 | 4.3 | + 3 | Sun Jan 01 08:01:00 2017 PST | 1 | 7.3 | + 4 | Mon Jan 02 08:01:00 2017 PST | 2 | 0.23 | + 5 | Mon Jul 02 08:01:00 2018 PDT | 87 | 0 | + 6 | Sun Jul 01 06:01:00 2018 PDT | 13 | 3.1 | + 7 | Sun Jul 01 09:11:00 2018 PDT | 90 | 10303.12 | + 8 | Sun Jul 01 08:01:00 2018 PDT | 29 | 64 | + 9 | Sun Jan 01 06:01:00 2017 PST | 1 | 1.2 | {"(1,2)"} + 10 | Sun Jan 01 09:11:00 2017 PST | 3 | 4.3 | {"(2,3)"} + 11 | Sun Jan 01 08:01:00 2017 PST | 1 | 7.3 | {"(4,5)"} + 12 | Mon Jan 02 08:01:00 2017 PST | 2 | 0.23 | {"(6,7)"} + 13 | Mon Jul 02 08:01:00 2018 PDT | 87 | 0 | {"(8,9)"} + 14 | Sun Jul 01 06:01:00 2018 PDT | 13 | 3.1 | {"(10,11)"} + 15 | Sun Jul 01 09:11:00 2018 PDT | 90 | 10303.12 | {"(12,13)"} + 16 | Sun Jul 01 08:01:00 2018 PDT | 29 | 64 | {"(14,15)"} +(16 rows) + +SELECT count(*) FROM disttable; + count +------- + 16 +(1 row) + +-- Binary format should lead to data incompatibility in PG 13 and earlier, +-- because the highlow data type has different oids on data and access nodes. +-- Use this to test the deserialization error reporting. Newer PG version +-- ignore this oid mismatch for non-builtin types. +SET timescaledb.enable_connection_binary_data=true; +\set ON_ERROR_STOP 0 +SET timescaledb.remote_data_fetcher = 'copy'; +SELECT * FROM disttable ORDER BY 1; + id | time | device | temp_c | minmaxes +----+------------------------------+--------+----------+------------- + 1 | Sun Jan 01 06:01:00 2017 PST | 1 | 1.2 | + 2 | Sun Jan 01 09:11:00 2017 PST | 3 | 4.3 | + 3 | Sun Jan 01 08:01:00 2017 PST | 1 | 7.3 | + 4 | Mon Jan 02 08:01:00 2017 PST | 2 | 0.23 | + 5 | Mon Jul 02 08:01:00 2018 PDT | 87 | 0 | + 6 | Sun Jul 01 06:01:00 2018 PDT | 13 | 3.1 | + 7 | Sun Jul 01 09:11:00 2018 PDT | 90 | 10303.12 | + 8 | Sun Jul 01 08:01:00 2018 PDT | 29 | 64 | + 9 | Sun Jan 01 06:01:00 2017 PST | 1 | 1.2 | {"(1,2)"} + 10 | Sun Jan 01 09:11:00 2017 PST | 3 | 4.3 | {"(2,3)"} + 11 | Sun Jan 01 08:01:00 2017 PST | 1 | 7.3 | {"(4,5)"} + 12 | Mon Jan 02 08:01:00 2017 PST | 2 | 0.23 | {"(6,7)"} + 13 | Mon Jul 02 08:01:00 2018 PDT | 87 | 0 | {"(8,9)"} + 14 | Sun Jul 01 06:01:00 2018 PDT | 13 | 3.1 | {"(10,11)"} + 15 | Sun Jul 01 09:11:00 2018 PDT | 90 | 10303.12 | {"(12,13)"} + 16 | Sun Jul 01 08:01:00 2018 PDT | 29 | 64 | {"(14,15)"} +(16 rows) + +SET timescaledb.remote_data_fetcher = 'cursor'; +SELECT * FROM disttable ORDER BY 1; + id | time | device | temp_c | minmaxes +----+------------------------------+--------+----------+------------- + 1 | Sun Jan 01 06:01:00 2017 PST | 1 | 1.2 | + 2 | Sun Jan 01 09:11:00 2017 PST | 3 | 4.3 | + 3 | Sun Jan 01 08:01:00 2017 PST | 1 | 7.3 | + 4 | Mon Jan 02 08:01:00 2017 PST | 2 | 0.23 | + 5 | Mon Jul 02 08:01:00 2018 PDT | 87 | 0 | + 6 | Sun Jul 01 06:01:00 2018 PDT | 13 | 3.1 | + 7 | Sun Jul 01 09:11:00 2018 PDT | 90 | 10303.12 | + 8 | Sun Jul 01 08:01:00 2018 PDT | 29 | 64 | + 9 | Sun Jan 01 06:01:00 2017 PST | 1 | 1.2 | {"(1,2)"} + 10 | Sun Jan 01 09:11:00 2017 PST | 3 | 4.3 | {"(2,3)"} + 11 | Sun Jan 01 08:01:00 2017 PST | 1 | 7.3 | {"(4,5)"} + 12 | Mon Jan 02 08:01:00 2017 PST | 2 | 0.23 | {"(6,7)"} + 13 | Mon Jul 02 08:01:00 2018 PDT | 87 | 0 | {"(8,9)"} + 14 | Sun Jul 01 06:01:00 2018 PDT | 13 | 3.1 | {"(10,11)"} + 15 | Sun Jul 01 09:11:00 2018 PDT | 90 | 10303.12 | {"(12,13)"} + 16 | Sun Jul 01 08:01:00 2018 PDT | 29 | 64 | {"(14,15)"} +(16 rows) + +\set ON_ERROR_STOP 1 +RESET timescaledb.remote_data_fetcher; +-- Show that DataNodeCopy is NOT used when source hypertable and target hypertable +-- of the SELECT are both distributed. Try subselects with LIMIT, RETURNING and +-- different distributed hypertable as source +EXPLAIN (COSTS OFF) +INSERT INTO disttable (time, device, temp_c) +SELECT time, device, temp_c FROM disttable; + QUERY PLAN +----------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + Insert on distributed hypertable disttable + -> Insert on disttable + -> Custom Scan (DataNodeDispatch) + Batch size: 1000 + -> Custom Scan (ChunkDispatch) + -> Append + -> Result + -> Custom Scan (DataNodeScan) on disttable disttable_2 + -> Result + -> Custom Scan (DataNodeScan) on disttable disttable_3 + -> Result + -> Custom Scan (DataNodeScan) on disttable disttable_4 +(13 rows) + +EXPLAIN (COSTS OFF) +INSERT INTO disttable (time, device, temp_c) +SELECT time, device, temp_c FROM disttable LIMIT 1; + QUERY PLAN +----------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + Insert on distributed hypertable disttable + -> Insert on disttable + -> Custom Scan (DataNodeDispatch) + Batch size: 1000 + -> Custom Scan (ChunkDispatch) + -> Subquery Scan on "*SELECT*" + -> Limit + -> Custom Scan (AsyncAppend) + -> Append + -> Custom Scan (DataNodeScan) on disttable disttable_2 + -> Custom Scan (DataNodeScan) on disttable disttable_3 + -> Custom Scan (DataNodeScan) on disttable disttable_4 +(13 rows) + +EXPLAIN (COSTS OFF) +INSERT INTO disttable (time, device, temp_c) +SELECT time, device, temp_c FROM disttable RETURNING *; + QUERY PLAN +----------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + Insert on distributed hypertable disttable + -> Insert on disttable + -> Custom Scan (DataNodeDispatch) + Batch size: 1000 + -> Custom Scan (ChunkDispatch) + -> Append + -> Result + -> Custom Scan (DataNodeScan) on disttable disttable_2 + -> Result + -> Custom Scan (DataNodeScan) on disttable disttable_3 + -> Result + -> Custom Scan (DataNodeScan) on disttable disttable_4 +(13 rows) + +INSERT INTO disttable (time, device, temp_c) +SELECT time, device, temp_c FROM disttable; +INSERT INTO disttable (time, device, temp_c) +SELECT * FROM hyper_estimate LIMIT 2; +SELECT count(*) FROM disttable; + count +------- + 34 +(1 row) + +-- REMOVE a column on data nodes to check how errors are handled: +CALL distributed_exec($$ ALTER TABLE disttable DROP COLUMN minmaxes $$); +\set ON_ERROR_STOP 0 +INSERT INTO disttable SELECT * FROM datatable; +ERROR: [db_dist_hypertable_1]: column "minmaxes" of relation "disttable" does not exist +\set ON_ERROR_STOP 1 +DROP TABLE disttable; +-- Create a new table access method by reusing heap handler +CREATE ACCESS METHOD test_am TYPE TABLE HANDLER heap_tableam_handler; +SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ +CREATE ACCESS METHOD test_am TYPE TABLE HANDLER heap_tableam_handler; +$$); +NOTICE: [db_dist_hypertable_1]: +CREATE ACCESS METHOD test_am TYPE TABLE HANDLER heap_tableam_handler +NOTICE: [db_dist_hypertable_2]: +CREATE ACCESS METHOD test_am TYPE TABLE HANDLER heap_tableam_handler +NOTICE: [db_dist_hypertable_3]: +CREATE ACCESS METHOD test_am TYPE TABLE HANDLER heap_tableam_handler + remote_exec +------------- + +(1 row) + +-- Create distributed hypertable using non-default access method +CREATE TABLE disttable(time timestamptz NOT NULL, device int, temp_c float, temp_f float GENERATED ALWAYS AS (temp_c * 9 / 5 + 32) STORED) USING test_am; +SELECT * FROM create_distributed_hypertable('disttable', 'time', 'device', 3); + hypertable_id | schema_name | table_name | created +---------------+-------------+------------+--------- + 28 | public | disttable | t +(1 row) + +-- Make sure that distributed hypertable created on data nodes is +-- using the correct table access method +SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ + +SELECT amname AS hypertable_amname +FROM pg_class cl, pg_am am +WHERE cl.oid = 'disttable'::regclass +AND cl.relam = am.oid; +$$); +NOTICE: [db_dist_hypertable_1]: + +SELECT amname AS hypertable_amname +FROM pg_class cl, pg_am am +WHERE cl.oid = 'disttable'::regclass +AND cl.relam = am.oid +NOTICE: [db_dist_hypertable_1]: +hypertable_amname +----------------- +test_am +(1 row) + + +NOTICE: [db_dist_hypertable_2]: + +SELECT amname AS hypertable_amname +FROM pg_class cl, pg_am am +WHERE cl.oid = 'disttable'::regclass +AND cl.relam = am.oid +NOTICE: [db_dist_hypertable_2]: +hypertable_amname +----------------- +test_am +(1 row) + + +NOTICE: [db_dist_hypertable_3]: + +SELECT amname AS hypertable_amname +FROM pg_class cl, pg_am am +WHERE cl.oid = 'disttable'::regclass +AND cl.relam = am.oid +NOTICE: [db_dist_hypertable_3]: +hypertable_amname +----------------- +test_am +(1 row) + + + remote_exec +------------- + +(1 row) + +-- Check that basic operations are working as expected +INSERT INTO disttable VALUES + ('2017-01-01 06:01', 1, -10.0), + ('2017-01-01 09:11', 3, -5.0), + ('2017-01-01 08:01', 1, 1.0), + ('2017-01-02 08:01', 2, 5.0), + ('2018-07-02 08:01', 87, 10.0), + ('2018-07-01 06:01', 13, 15.0), + ('2018-07-01 09:11', 90, 20.0), + ('2018-07-01 08:01', 29, 24.0); +SELECT * FROM disttable ORDER BY time; + time | device | temp_c | temp_f +------------------------------+--------+--------+-------- + Sun Jan 01 06:01:00 2017 PST | 1 | -10 | 14 + Sun Jan 01 08:01:00 2017 PST | 1 | 1 | 33.8 + Sun Jan 01 09:11:00 2017 PST | 3 | -5 | 23 + Mon Jan 02 08:01:00 2017 PST | 2 | 5 | 41 + Sun Jul 01 06:01:00 2018 PDT | 13 | 15 | 59 + Sun Jul 01 08:01:00 2018 PDT | 29 | 24 | 75.2 + Sun Jul 01 09:11:00 2018 PDT | 90 | 20 | 68 + Mon Jul 02 08:01:00 2018 PDT | 87 | 10 | 50 +(8 rows) + +-- Show that GENERATED columns work for INSERT with RETURNING clause +-- (should use DataNodeCopy) +TRUNCATE disttable; +EXPLAIN VERBOSE +INSERT INTO disttable VALUES ('2017-08-01 06:01', 1, 35.0) RETURNING *; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (cost=0.00..0.01 rows=1 width=28) + Output: disttable."time", disttable.device, disttable.temp_c, disttable.temp_f + Insert on distributed hypertable public.disttable + Data nodes: db_dist_hypertable_1, db_dist_hypertable_2, db_dist_hypertable_3 + -> Insert on public.disttable (cost=0.00..0.01 rows=1 width=28) + Output: disttable."time", disttable.device, disttable.temp_c, disttable.temp_f + -> Custom Scan (DataNodeCopy) (cost=0.00..0.01 rows=1 width=28) + Output: 'Tue Aug 01 06:01:00 2017 PDT'::timestamp with time zone, 1, '35'::double precision, NULL::double precision + Remote SQL: COPY public.disttable ("time", device, temp_c) FROM STDIN WITH (FORMAT binary) + -> Custom Scan (ChunkDispatch) (cost=0.00..0.01 rows=1 width=28) + Output: 'Tue Aug 01 06:01:00 2017 PDT'::timestamp with time zone, 1, '35'::double precision, NULL::double precision + -> Result (cost=0.00..0.01 rows=1 width=28) + Output: 'Tue Aug 01 06:01:00 2017 PDT'::timestamp with time zone, 1, '35'::double precision, NULL::double precision +(13 rows) + +INSERT INTO disttable VALUES ('2017-08-01 06:01', 1, 35.0) RETURNING *; + time | device | temp_c | temp_f +------------------------------+--------+--------+-------- + Tue Aug 01 06:01:00 2017 PDT | 1 | 35 | 95 +(1 row) + +-- Same values returned with SELECT: +SELECT * FROM disttable ORDER BY 1; + time | device | temp_c | temp_f +------------------------------+--------+--------+-------- + Tue Aug 01 06:01:00 2017 PDT | 1 | 35 | 95 +(1 row) + +UPDATE disttable SET temp_c=40.0 WHERE device=1; +SELECT * FROM disttable ORDER BY 1; + time | device | temp_c | temp_f +------------------------------+--------+--------+-------- + Tue Aug 01 06:01:00 2017 PDT | 1 | 40 | 104 +(1 row) + +-- Insert another value +INSERT INTO disttable VALUES ('2017-09-01 06:01', 2, 30.0); +SELECT * FROM disttable ORDER BY 1; + time | device | temp_c | temp_f +------------------------------+--------+--------+-------- + Tue Aug 01 06:01:00 2017 PDT | 1 | 40 | 104 + Fri Sep 01 06:01:00 2017 PDT | 2 | 30 | 86 +(2 rows) + +-- Delete a value based on the generated column +DELETE FROM disttable WHERE temp_f=104; +SELECT * FROM disttable ORDER BY 1; + time | device | temp_c | temp_f +------------------------------+--------+--------+-------- + Fri Sep 01 06:01:00 2017 PDT | 2 | 30 | 86 +(1 row) + +-- Test also with DataNodeDispatch +TRUNCATE disttable; +SET timescaledb.enable_distributed_insert_with_copy=false; +EXPLAIN VERBOSE +INSERT INTO disttable VALUES ('2017-09-01 06:01', 5, 40.0) RETURNING *; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (cost=0.00..0.01 rows=1 width=28) + Output: disttable."time", disttable.device, disttable.temp_c, disttable.temp_f + Insert on distributed hypertable public.disttable + Data nodes: db_dist_hypertable_1, db_dist_hypertable_2, db_dist_hypertable_3 + -> Insert on public.disttable (cost=0.00..0.01 rows=1 width=28) + Output: disttable."time", disttable.device, disttable.temp_c, disttable.temp_f + -> Custom Scan (DataNodeDispatch) (cost=0.00..0.01 rows=1 width=28) + Output: 'Fri Sep 01 06:01:00 2017 PDT'::timestamp with time zone, 5, '40'::double precision, NULL::double precision + Batch size: 1000 + Remote SQL: INSERT INTO public.disttable("time", device, temp_c) VALUES ($1, $2, $3), ..., ($2998, $2999, $3000) RETURNING "time", device, temp_c, temp_f + -> Custom Scan (ChunkDispatch) (cost=0.00..0.01 rows=1 width=28) + Output: 'Fri Sep 01 06:01:00 2017 PDT'::timestamp with time zone, 5, '40'::double precision, NULL::double precision + -> Result (cost=0.00..0.01 rows=1 width=28) + Output: 'Fri Sep 01 06:01:00 2017 PDT'::timestamp with time zone, 5, '40'::double precision, NULL::double precision +(14 rows) + +INSERT INTO disttable VALUES ('2017-09-01 06:01', 5, 40.0) RETURNING *; + time | device | temp_c | temp_f +------------------------------+--------+--------+-------- + Fri Sep 01 06:01:00 2017 PDT | 5 | 40 | 104 +(1 row) + +-- Generated columns with SELECT +SELECT * FROM disttable ORDER BY 1; + time | device | temp_c | temp_f +------------------------------+--------+--------+-------- + Fri Sep 01 06:01:00 2017 PDT | 5 | 40 | 104 +(1 row) + +-- Check distributed hypertable within procedure properly drops remote tables +-- +-- #3663 +-- +CREATE TABLE test (time timestamp, v int); +SELECT create_distributed_hypertable('test','time'); +WARNING: column type "timestamp without time zone" used for "time" does not follow best practices +NOTICE: adding not-null constraint to column "time" + create_distributed_hypertable +------------------------------- + (29,public,test,t) +(1 row) + +CREATE PROCEDURE test_drop() LANGUAGE PLPGSQL AS $$ +BEGIN + DROP TABLE test; +END +$$; +CALL test_drop(); +CREATE TABLE test (time timestamp, v int); +SELECT create_distributed_hypertable('test','time'); +WARNING: column type "timestamp without time zone" used for "time" does not follow best practices +NOTICE: adding not-null constraint to column "time" + create_distributed_hypertable +------------------------------- + (30,public,test,t) +(1 row) + +DROP TABLE test; +-- Test that stable functions are calculated on the access node. +-- +-- As a stable function to test, use the timestamp -> timestamptz conversion +-- that is stable because it uses the current timezone. +-- We have to be careful about `timestamp < timestamptz` comparison. From postgres +-- docs: +-- When comparing a timestamp without time zone to a timestamp with time zone, +-- the former value is assumed to be given in the time zone specified by the +-- TimeZone configuration parameter, and is rotated to UTC for comparison to +-- the latter value (which is already in UTC internally). +-- We don't want this to happen on data node, so we cast the filter value to +-- timestamp, and check that this cast happens on the access node and uses the +-- current timezone. +SELECT test.tsl_override_current_timestamptz(null); + tsl_override_current_timestamptz +---------------------------------- + +(1 row) + +CREATE TABLE test_tz (time timestamp, v int); +SELECT create_distributed_hypertable('test_tz','time', + chunk_time_interval => interval '1 hour'); +WARNING: column type "timestamp without time zone" used for "time" does not follow best practices +NOTICE: adding not-null constraint to column "time" + create_distributed_hypertable +------------------------------- + (31,public,test_tz,t) +(1 row) + +INSERT INTO test_tz VALUES ('2018-01-02 12:00:00', 2), ('2018-01-02 11:00:00', 1), + ('2018-01-02 13:00:00', 3), ('2018-01-02 14:00:00', 4); +SET TIME ZONE 'Etc/GMT'; +SELECT '2018-01-02 12:00:00 +00'::timestamptz::timestamp; + timestamp +-------------------------- + Tue Jan 02 12:00:00 2018 +(1 row) + +-- Normal WHERE clause on baserel +SELECT * FROM test_tz WHERE time > '2018-01-02 12:00:00 +00'::timestamptz::timestamp; + time | v +--------------------------+--- + Tue Jan 02 13:00:00 2018 | 3 + Tue Jan 02 14:00:00 2018 | 4 +(2 rows) + +EXPLAIN (verbose, costs off) +SELECT * FROM test_tz WHERE time > '2018-01-02 12:00:00 +00'::timestamptz::timestamp; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) + Output: test_tz."time", test_tz.v + -> Append + -> Custom Scan (DataNodeScan) on public.test_tz test_tz_1 + Output: test_tz_1."time", test_tz_1.v + Data node: db_dist_hypertable_1 + Chunks: _dist_hyper_31_97_chunk + Remote SQL: SELECT "time", v FROM public.test_tz WHERE _timescaledb_functions.chunks_in(public.test_tz.*, ARRAY[45]) AND (("time" > '2018-01-02 12:00:00'::timestamp without time zone)) + Remote EXPLAIN: + Index Scan using _dist_hyper_31_97_chunk_test_tz_time_idx on _timescaledb_internal._dist_hyper_31_97_chunk + Output: _dist_hyper_31_97_chunk."time", _dist_hyper_31_97_chunk.v + Index Cond: (_dist_hyper_31_97_chunk."time" > '2018-01-02 12:00:00'::timestamp without time zone) + + -> Custom Scan (DataNodeScan) on public.test_tz test_tz_2 + Output: test_tz_2."time", test_tz_2.v + Data node: db_dist_hypertable_2 + Chunks: _dist_hyper_31_95_chunk, _dist_hyper_31_96_chunk, _dist_hyper_31_98_chunk + Remote SQL: SELECT "time", v FROM public.test_tz WHERE _timescaledb_functions.chunks_in(public.test_tz.*, ARRAY[43, 44, 45]) AND (("time" > '2018-01-02 12:00:00'::timestamp without time zone)) + Remote EXPLAIN: + Append + -> Index Scan using _dist_hyper_31_95_chunk_test_tz_time_idx on _timescaledb_internal._dist_hyper_31_95_chunk + Output: _dist_hyper_31_95_chunk."time", _dist_hyper_31_95_chunk.v + Index Cond: (_dist_hyper_31_95_chunk."time" > '2018-01-02 12:00:00'::timestamp without time zone) + -> Index Scan using _dist_hyper_31_98_chunk_test_tz_time_idx on _timescaledb_internal._dist_hyper_31_98_chunk + Output: _dist_hyper_31_98_chunk."time", _dist_hyper_31_98_chunk.v + Index Cond: (_dist_hyper_31_98_chunk."time" > '2018-01-02 12:00:00'::timestamp without time zone) + +(27 rows) + +-- Also test different code paths used with aggregation pushdown. +SELECT count(*) FROM test_tz WHERE time > '2018-01-02 12:00:00 +00'::timestamptz::timestamp; + count +------- + 2 +(1 row) + +EXPLAIN (verbose, costs off) +SELECT count(*) FROM test_tz WHERE time > '2018-01-02 12:00:00 +00'::timestamptz::timestamp; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Aggregate + Output: count(*) + -> Custom Scan (AsyncAppend) + -> Append + -> Custom Scan (DataNodeScan) on public.test_tz test_tz_1 + Data node: db_dist_hypertable_1 + Chunks: _dist_hyper_31_97_chunk + Remote SQL: SELECT NULL FROM public.test_tz WHERE _timescaledb_functions.chunks_in(public.test_tz.*, ARRAY[45]) AND (("time" > '2018-01-02 12:00:00'::timestamp without time zone)) + Remote EXPLAIN: + Result + Output: NULL::text + -> Index Only Scan using _dist_hyper_31_97_chunk_test_tz_time_idx on _timescaledb_internal._dist_hyper_31_97_chunk + Index Cond: (_dist_hyper_31_97_chunk."time" > '2018-01-02 12:00:00'::timestamp without time zone) + + -> Custom Scan (DataNodeScan) on public.test_tz test_tz_2 + Data node: db_dist_hypertable_2 + Chunks: _dist_hyper_31_95_chunk, _dist_hyper_31_96_chunk, _dist_hyper_31_98_chunk + Remote SQL: SELECT NULL FROM public.test_tz WHERE _timescaledb_functions.chunks_in(public.test_tz.*, ARRAY[43, 44, 45]) AND (("time" > '2018-01-02 12:00:00'::timestamp without time zone)) + Remote EXPLAIN: + Result + Output: NULL::text + -> Append + -> Index Only Scan using _dist_hyper_31_95_chunk_test_tz_time_idx on _timescaledb_internal._dist_hyper_31_95_chunk + Index Cond: (_dist_hyper_31_95_chunk."time" > '2018-01-02 12:00:00'::timestamp without time zone) + -> Index Only Scan using _dist_hyper_31_98_chunk_test_tz_time_idx on _timescaledb_internal._dist_hyper_31_98_chunk + Index Cond: (_dist_hyper_31_98_chunk."time" > '2018-01-02 12:00:00'::timestamp without time zone) + +(27 rows) + +-- TODO: test HAVING here and in the later now() tests as well. +-- Change the timezone and check that the conversion is applied correctly. +SET TIME ZONE 'Etc/GMT+1'; +SELECT '2018-01-02 12:00:00 +00'::timestamptz::timestamp; + timestamp +-------------------------- + Tue Jan 02 11:00:00 2018 +(1 row) + +SELECT * FROM test_tz WHERE time > '2018-01-02 12:00:00 +00'::timestamptz::timestamp; + time | v +--------------------------+--- + Tue Jan 02 13:00:00 2018 | 3 + Tue Jan 02 12:00:00 2018 | 2 + Tue Jan 02 14:00:00 2018 | 4 +(3 rows) + +EXPLAIN (verbose, costs off) +SELECT * FROM test_tz WHERE time > '2018-01-02 12:00:00 +00'::timestamptz::timestamp; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) + Output: test_tz."time", test_tz.v + -> Append + -> Custom Scan (DataNodeScan) on public.test_tz test_tz_1 + Output: test_tz_1."time", test_tz_1.v + Data node: db_dist_hypertable_1 + Chunks: _dist_hyper_31_97_chunk + Remote SQL: SELECT "time", v FROM public.test_tz WHERE _timescaledb_functions.chunks_in(public.test_tz.*, ARRAY[45]) AND (("time" > '2018-01-02 11:00:00'::timestamp without time zone)) + Remote EXPLAIN: + Index Scan using _dist_hyper_31_97_chunk_test_tz_time_idx on _timescaledb_internal._dist_hyper_31_97_chunk + Output: _dist_hyper_31_97_chunk."time", _dist_hyper_31_97_chunk.v + Index Cond: (_dist_hyper_31_97_chunk."time" > '2018-01-02 11:00:00'::timestamp without time zone) + + -> Custom Scan (DataNodeScan) on public.test_tz test_tz_2 + Output: test_tz_2."time", test_tz_2.v + Data node: db_dist_hypertable_2 + Chunks: _dist_hyper_31_95_chunk, _dist_hyper_31_96_chunk, _dist_hyper_31_98_chunk + Remote SQL: SELECT "time", v FROM public.test_tz WHERE _timescaledb_functions.chunks_in(public.test_tz.*, ARRAY[43, 44, 45]) AND (("time" > '2018-01-02 11:00:00'::timestamp without time zone)) + Remote EXPLAIN: + Append + -> Index Scan using _dist_hyper_31_95_chunk_test_tz_time_idx on _timescaledb_internal._dist_hyper_31_95_chunk + Output: _dist_hyper_31_95_chunk."time", _dist_hyper_31_95_chunk.v + Index Cond: (_dist_hyper_31_95_chunk."time" > '2018-01-02 11:00:00'::timestamp without time zone) + -> Index Scan using _dist_hyper_31_96_chunk_test_tz_time_idx on _timescaledb_internal._dist_hyper_31_96_chunk + Output: _dist_hyper_31_96_chunk."time", _dist_hyper_31_96_chunk.v + Index Cond: (_dist_hyper_31_96_chunk."time" > '2018-01-02 11:00:00'::timestamp without time zone) + -> Index Scan using _dist_hyper_31_98_chunk_test_tz_time_idx on _timescaledb_internal._dist_hyper_31_98_chunk + Output: _dist_hyper_31_98_chunk."time", _dist_hyper_31_98_chunk.v + Index Cond: (_dist_hyper_31_98_chunk."time" > '2018-01-02 11:00:00'::timestamp without time zone) + +(30 rows) + +SELECT count(*) FROM test_tz WHERE time > '2018-01-02 12:00:00 +00'::timestamptz::timestamp; + count +------- + 3 +(1 row) + +EXPLAIN (verbose, costs off) +SELECT count(*) FROM test_tz WHERE time > '2018-01-02 12:00:00 +00'::timestamptz::timestamp; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Aggregate + Output: count(*) + -> Custom Scan (AsyncAppend) + -> Append + -> Custom Scan (DataNodeScan) on public.test_tz test_tz_1 + Data node: db_dist_hypertable_1 + Chunks: _dist_hyper_31_97_chunk + Remote SQL: SELECT NULL FROM public.test_tz WHERE _timescaledb_functions.chunks_in(public.test_tz.*, ARRAY[45]) AND (("time" > '2018-01-02 11:00:00'::timestamp without time zone)) + Remote EXPLAIN: + Result + Output: NULL::text + -> Index Only Scan using _dist_hyper_31_97_chunk_test_tz_time_idx on _timescaledb_internal._dist_hyper_31_97_chunk + Index Cond: (_dist_hyper_31_97_chunk."time" > '2018-01-02 11:00:00'::timestamp without time zone) + + -> Custom Scan (DataNodeScan) on public.test_tz test_tz_2 + Data node: db_dist_hypertable_2 + Chunks: _dist_hyper_31_95_chunk, _dist_hyper_31_96_chunk, _dist_hyper_31_98_chunk + Remote SQL: SELECT NULL FROM public.test_tz WHERE _timescaledb_functions.chunks_in(public.test_tz.*, ARRAY[43, 44, 45]) AND (("time" > '2018-01-02 11:00:00'::timestamp without time zone)) + Remote EXPLAIN: + Result + Output: NULL::text + -> Append + -> Index Only Scan using _dist_hyper_31_95_chunk_test_tz_time_idx on _timescaledb_internal._dist_hyper_31_95_chunk + Index Cond: (_dist_hyper_31_95_chunk."time" > '2018-01-02 11:00:00'::timestamp without time zone) + -> Index Only Scan using _dist_hyper_31_96_chunk_test_tz_time_idx on _timescaledb_internal._dist_hyper_31_96_chunk + Index Cond: (_dist_hyper_31_96_chunk."time" > '2018-01-02 11:00:00'::timestamp without time zone) + -> Index Only Scan using _dist_hyper_31_98_chunk_test_tz_time_idx on _timescaledb_internal._dist_hyper_31_98_chunk + Index Cond: (_dist_hyper_31_98_chunk."time" > '2018-01-02 11:00:00'::timestamp without time zone) + +(29 rows) + +-- Conversion to timestamptz cannot be evaluated at the access node, because the +-- argument is a column reference. +SELECT count(*) FROM test_tz WHERE time::timestamptz > now(); + count +------- + 0 +(1 row) + +-- According to our docs, JIT is not recommended for use on access node in +-- multi-node environment. Turn it off so that it doesn't ruin EXPLAIN for the +-- next query. +SET jit = 0; +-- Test that operators are evaluated as well. Comparison of timestamp with +-- timestamptz is a stable operator, and comparison of two timestamps is an +-- immutable operator. This also test that immutable functions using these +-- operators as arguments are evaluated. +EXPLAIN (verbose, costs off) +WITH dummy AS (SELECT '2018-01-02 12:00:00 +00'::timestamptz::timestamp x) +SELECT * FROM test_tz, dummy +WHERE time > x + + (x = x)::int -- stable + * (x = '2018-01-02 11:00:00'::timestamp)::int -- immutable + * INTERVAL '1 hour'; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) + Output: test_tz."time", test_tz.v, (('Tue Jan 02 11:00:00 2018 -01'::timestamp with time zone)::timestamp without time zone) + -> Append + -> Result + Output: test_tz_1."time", test_tz_1.v, ('Tue Jan 02 11:00:00 2018 -01'::timestamp with time zone)::timestamp without time zone + -> Custom Scan (DataNodeScan) on public.test_tz test_tz_1 + Output: test_tz_1."time", test_tz_1.v + Data node: db_dist_hypertable_1 + Chunks: _dist_hyper_31_97_chunk + Remote SQL: SELECT "time", v FROM public.test_tz WHERE _timescaledb_functions.chunks_in(public.test_tz.*, ARRAY[45]) AND (("time" > '2018-01-02 12:00:00'::timestamp without time zone)) + Remote EXPLAIN: + Index Scan using _dist_hyper_31_97_chunk_test_tz_time_idx on _timescaledb_internal._dist_hyper_31_97_chunk + Output: _dist_hyper_31_97_chunk."time", _dist_hyper_31_97_chunk.v + Index Cond: (_dist_hyper_31_97_chunk."time" > '2018-01-02 12:00:00'::timestamp without time zone) + + -> Result + Output: test_tz_2."time", test_tz_2.v, ('Tue Jan 02 11:00:00 2018 -01'::timestamp with time zone)::timestamp without time zone + -> Custom Scan (DataNodeScan) on public.test_tz test_tz_2 + Output: test_tz_2."time", test_tz_2.v + Data node: db_dist_hypertable_2 + Chunks: _dist_hyper_31_95_chunk, _dist_hyper_31_96_chunk, _dist_hyper_31_98_chunk + Remote SQL: SELECT "time", v FROM public.test_tz WHERE _timescaledb_functions.chunks_in(public.test_tz.*, ARRAY[43, 44, 45]) AND (("time" > '2018-01-02 12:00:00'::timestamp without time zone)) + Remote EXPLAIN: + Append + -> Index Scan using _dist_hyper_31_95_chunk_test_tz_time_idx on _timescaledb_internal._dist_hyper_31_95_chunk + Output: _dist_hyper_31_95_chunk."time", _dist_hyper_31_95_chunk.v + Index Cond: (_dist_hyper_31_95_chunk."time" > '2018-01-02 12:00:00'::timestamp without time zone) + -> Index Scan using _dist_hyper_31_98_chunk_test_tz_time_idx on _timescaledb_internal._dist_hyper_31_98_chunk + Output: _dist_hyper_31_98_chunk."time", _dist_hyper_31_98_chunk.v + Index Cond: (_dist_hyper_31_98_chunk."time" > '2018-01-02 12:00:00'::timestamp without time zone) + +(31 rows) + +-- Reference value for the above test. +WITH dummy AS (SELECT '2018-01-02 12:00:00 +00'::timestamptz::timestamp x) +SELECT x + (x = x)::int * (x = '2018-01-02 11:00:00'::timestamp)::int * INTERVAL '1 hour' +FROM dummy; + ?column? +-------------------------- + Tue Jan 02 12:00:00 2018 +(1 row) + +-- Exercise some more stable timestamp-related functions. +EXPLAIN (COSTS OFF, VERBOSE) +SELECT * FROM test_tz WHERE date_trunc('month', time) > date_in('2021-01-01') + AND time::time > '00:00:00'::time + + (INTERVAL '1 hour') * date_part('hour', INTERVAL '1 hour'); + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) + Output: test_tz."time", test_tz.v + -> Append + -> Custom Scan (DataNodeScan) on public.test_tz test_tz_1 + Output: test_tz_1."time", test_tz_1.v + Data node: db_dist_hypertable_1 + Chunks: _dist_hyper_31_97_chunk + Remote SQL: SELECT "time", v FROM public.test_tz WHERE _timescaledb_functions.chunks_in(public.test_tz.*, ARRAY[45]) AND (("time"::time without time zone > '01:00:00'::time without time zone)) AND ((date_trunc('month'::text, "time") > '2021-01-01'::date)) + Remote EXPLAIN: + Seq Scan on _timescaledb_internal._dist_hyper_31_97_chunk + Output: _dist_hyper_31_97_chunk."time", _dist_hyper_31_97_chunk.v + Filter: (((_dist_hyper_31_97_chunk."time")::time without time zone > '01:00:00'::time without time zone) AND (date_trunc('month'::text, _dist_hyper_31_97_chunk."time") > '2021-01-01'::date)) + + -> Custom Scan (DataNodeScan) on public.test_tz test_tz_2 + Output: test_tz_2."time", test_tz_2.v + Data node: db_dist_hypertable_2 + Chunks: _dist_hyper_31_95_chunk, _dist_hyper_31_96_chunk, _dist_hyper_31_98_chunk + Remote SQL: SELECT "time", v FROM public.test_tz WHERE _timescaledb_functions.chunks_in(public.test_tz.*, ARRAY[43, 44, 45]) AND (("time"::time without time zone > '01:00:00'::time without time zone)) AND ((date_trunc('month'::text, "time") > '2021-01-01'::date)) + Remote EXPLAIN: + Append + -> Seq Scan on _timescaledb_internal._dist_hyper_31_95_chunk + Output: _dist_hyper_31_95_chunk."time", _dist_hyper_31_95_chunk.v + Filter: (((_dist_hyper_31_95_chunk."time")::time without time zone > '01:00:00'::time without time zone) AND (date_trunc('month'::text, _dist_hyper_31_95_chunk."time") > '2021-01-01'::date)) + -> Seq Scan on _timescaledb_internal._dist_hyper_31_96_chunk + Output: _dist_hyper_31_96_chunk."time", _dist_hyper_31_96_chunk.v + Filter: (((_dist_hyper_31_96_chunk."time")::time without time zone > '01:00:00'::time without time zone) AND (date_trunc('month'::text, _dist_hyper_31_96_chunk."time") > '2021-01-01'::date)) + -> Seq Scan on _timescaledb_internal._dist_hyper_31_98_chunk + Output: _dist_hyper_31_98_chunk."time", _dist_hyper_31_98_chunk.v + Filter: (((_dist_hyper_31_98_chunk."time")::time without time zone > '01:00:00'::time without time zone) AND (date_trunc('month'::text, _dist_hyper_31_98_chunk."time") > '2021-01-01'::date)) + +(30 rows) + +-- Check that the test function for partly overriding now() works. It's very +-- hacky and only has effect when we estimate some costs or evaluate sTABLE +-- functions in quals on access node, and has no effect in other cases. +-- Consider deleting it altogether. +SELECT test.tsl_override_current_timestamptz('2018-01-02 12:00:00 +00'::timestamptz); + tsl_override_current_timestamptz +---------------------------------- + +(1 row) + +SELECT count(*) FROM test_tz WHERE time > now(); + count +------- + 3 +(1 row) + +SELECT test.tsl_override_current_timestamptz(null); + tsl_override_current_timestamptz +---------------------------------- + +(1 row) + +RESET TIME ZONE; +DROP TABLE test_tz; +-- Check that now() is evaluated on the access node. Also check that it is evaluated +-- anew on every execution of a prepared statement. +CREATE TABLE test_now (time timestamp, v int); +SELECT create_distributed_hypertable('test_now','time', + chunk_time_interval => interval '1 hour'); +WARNING: column type "timestamp without time zone" used for "time" does not follow best practices +NOTICE: adding not-null constraint to column "time" + create_distributed_hypertable +------------------------------- + (32,public,test_now,t) +(1 row) + +PREPARE test_query as +SELECT count(*) FILTER (WHERE time = now()), count(*) FILTER (WHERE time < now()), + count(*) FILTER (WHERE time > now()) FROM test_now; +; +BEGIN; -- to fix the value of now(); +INSERT INTO test_now VALUES + (now(), 1), (now() + INTERVAL '1 hour', 1), + (now() + INTERVAL '2 hour', 2 ), (now() + INTERVAL '3 hour', 3); +SELECT count(*) FILTER (WHERE time = now()), count(*) FILTER (WHERE time < now()), + count(*) FILTER (WHERE time > now()) FROM test_now; + count | count | count +-------+-------+------- + 1 | 0 | 3 +(1 row) + +EXECUTE test_query; + count | count | count +-------+-------+------- + 1 | 0 | 3 +(1 row) + +-- Also test different code paths used with aggregation pushdown. +-- We can't run EXPLAIN here, because now() is different every time. But the +-- strict equality should be enough to detect if now() is being erroneously +-- evaluated on data node, where it will differ from time to time. +SELECT count(*) FROM test_now WHERE time = now(); + count +------- + 1 +(1 row) + +COMMIT; +-- now() will be different in a new transaction. +BEGIN; +SELECT count(*) FILTER (WHERE time = now()), count(*) FILTER (WHERE time < now()), + count(*) FILTER (WHERE time > now()) FROM test_now; + count | count | count +-------+-------+------- + 0 | 1 | 3 +(1 row) + +EXECUTE test_query; + count | count | count +-------+-------+------- + 0 | 1 | 3 +(1 row) + +SELECT count(*) FROM test_now WHERE time = now(); + count +------- + 0 +(1 row) + +COMMIT; +DROP TABLE test_now; +DEALLOCATE test_query; +-- Check enabling distributed compression within a +-- procedure/function works +-- +-- #3705 +-- +CREATE TABLE test (time timestamp, v int); +SELECT create_distributed_hypertable('test','time'); +WARNING: column type "timestamp without time zone" used for "time" does not follow best practices +NOTICE: adding not-null constraint to column "time" + create_distributed_hypertable +------------------------------- + (33,public,test,t) +(1 row) + +CREATE PROCEDURE test_set_compression() LANGUAGE PLPGSQL AS $$ +BEGIN + ALTER TABLE test SET (timescaledb.compress); +END +$$; +CALL test_set_compression(); +INSERT INTO test VALUES (now(), 0); +SELECT compress_chunk(show_chunks) FROM show_chunks('test'); + compress_chunk +------------------------------------------------ + _timescaledb_internal._dist_hyper_33_103_chunk +(1 row) + +DROP TABLE test; +CREATE TABLE test (time timestamp, v int); +SELECT create_distributed_hypertable('test','time'); +WARNING: column type "timestamp without time zone" used for "time" does not follow best practices +NOTICE: adding not-null constraint to column "time" + create_distributed_hypertable +------------------------------- + (34,public,test,t) +(1 row) + +CREATE FUNCTION test_set_compression_func() RETURNS BOOL LANGUAGE PLPGSQL AS $$ +BEGIN + ALTER TABLE test SET (timescaledb.compress); + RETURN TRUE; +END +$$; +SELECT test_set_compression_func(); + test_set_compression_func +--------------------------- + t +(1 row) + +INSERT INTO test VALUES (now(), 0); +SELECT compress_chunk(show_chunks) FROM show_chunks('test'); + compress_chunk +------------------------------------------------ + _timescaledb_internal._dist_hyper_34_104_chunk +(1 row) + +DROP TABLE test; +-- Fix ALTER SET/DROP NULL constraint on distributed hypertable +-- +-- #3860 +-- +CREATE TABLE test (time timestamp NOT NULL, my_column int NOT NULL); +SELECT create_distributed_hypertable('test','time'); +WARNING: column type "timestamp without time zone" used for "time" does not follow best practices + create_distributed_hypertable +------------------------------- + (35,public,test,t) +(1 row) + +\set ON_ERROR_STOP 0 +INSERT INTO test VALUES (now(), NULL); +ERROR: [db_dist_hypertable_3]: null value in column "my_column" of relation "_dist_hyper_35_105_chunk" violates not-null constraint +\set ON_ERROR_STOP 1 +ALTER TABLE test ALTER COLUMN my_column DROP NOT NULL; +INSERT INTO test VALUES (now(), NULL); +\set ON_ERROR_STOP 0 +ALTER TABLE test ALTER COLUMN my_column SET NOT NULL; +ERROR: [db_dist_hypertable_3]: column "my_column" of relation "_dist_hyper_35_106_chunk" contains null values +\set ON_ERROR_STOP 1 +DELETE FROM test; +ALTER TABLE test ALTER COLUMN my_column SET NOT NULL; +DROP TABLE test; +-- Test insert into distributed hypertable with pruned chunks +CREATE TABLE pruned_chunks_1(time TIMESTAMPTZ NOT NULL, sensor_id INTEGER, value FLOAT); +SELECT table_name FROM create_distributed_hypertable('pruned_chunks_1', 'time', 'sensor_id'); + table_name +----------------- + pruned_chunks_1 +(1 row) + +INSERT INTO pruned_chunks_1 VALUES ('2020-12-09',1,32.2); +CREATE TABLE pruned_chunks_2(time TIMESTAMPTZ NOT NULL, sensor_id INTEGER, value FLOAT); +-- Convert the table to a distributed hypertable +SELECT table_name FROM create_distributed_hypertable('pruned_chunks_2', 'time', 'sensor_id'); + table_name +----------------- + pruned_chunks_2 +(1 row) + +insert into pruned_chunks_2 select * from pruned_chunks_1; +insert into pruned_chunks_2 select * from pruned_chunks_1 WHERE time > '2022-01-01'; +-- TEST freeze_chunk api. does not work for distributed chunks +SELECT chunk_schema || '.' || chunk_name as "CHNAME" +FROM timescaledb_information.chunks +WHERE hypertable_name = 'pruned_chunks_1' +ORDER BY chunk_name LIMIT 1 +\gset +\set ON_ERROR_STOP 0 +SELECT _timescaledb_functions.freeze_chunk( :'CHNAME'); +ERROR: operation not supported on distributed chunk or foreign table "_dist_hyper_36_107_chunk" +\set ON_ERROR_STOP 1 +--TEST freeze_chunk api for regular hypertables. Works only for >= PG14 +CREATE TABLE freeze_1(time TIMESTAMPTZ NOT NULL, sensor_id INTEGER, value FLOAT); +SELECT table_name FROM create_hypertable('freeze_1', 'time'); + table_name +------------ + freeze_1 +(1 row) + +INSERT INTO freeze_1 VALUES ('2020-12-09',1,32.2); +\set ON_ERROR_STOP 0 +SELECT _timescaledb_functions.freeze_chunk( ch) FROM ( select show_chunks('freeze_1') ch ) q; + freeze_chunk +-------------- + t +(1 row) + +\set ON_ERROR_STOP 1 +DROP TABLE pruned_chunks_1; +DROP TABLE pruned_chunks_2; +-- Cleanup +DROP DATABASE :DATA_NODE_1 WITH (FORCE); +DROP DATABASE :DATA_NODE_2 WITH (FORCE); +DROP DATABASE :DATA_NODE_3 WITH (FORCE); diff --git a/tsl/test/expected/dist_partial_agg-16.out b/tsl/test/expected/dist_partial_agg-16.out new file mode 100644 index 00000000000..39449b18398 --- /dev/null +++ b/tsl/test/expected/dist_partial_agg-16.out @@ -0,0 +1,631 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- Need to be super user to create extension and add data nodes +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; +\ir include/remote_exec.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +CREATE SCHEMA IF NOT EXISTS test; +psql:include/remote_exec.sql:5: NOTICE: schema "test" already exists, skipping +GRANT USAGE ON SCHEMA test TO PUBLIC; +CREATE OR REPLACE FUNCTION test.remote_exec(srv_name name[], command text) +RETURNS VOID +AS :TSL_MODULE_PATHNAME, 'ts_remote_exec' +LANGUAGE C; +CREATE OR REPLACE FUNCTION test.remote_exec_get_result_strings(srv_name name[], command text) +RETURNS TABLE("table_record" CSTRING[]) +AS :TSL_MODULE_PATHNAME, 'ts_remote_exec_get_result_strings' +LANGUAGE C; +SET ROLE :ROLE_1; +\set TEST_TABLE 'conditions' +\ir 'include/aggregate_table_create.sql' +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- This file creates a table with a lot of different types to allow a range of aggregate functions. +-- This does not include the creation of a corresponding hypertable, as we may want to vary how that is done. +CREATE TYPE custom_type AS (high int, low int); +CREATE TABLE :TEST_TABLE ( + timec TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + region TEXT NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL, + lowp double precision NULL, + highp double precision null, + allnull double precision null, + highlow custom_type null, + bit_int smallint, + good_life boolean + ); +SET ROLE :ROLE_CLUSTER_SUPERUSER; +\set DATA_NODE_1 :TEST_DBNAME _1 +\set DATA_NODE_2 :TEST_DBNAME _2 +\set DATA_NODE_3 :TEST_DBNAME _3 +-- Add data nodes using the TimescaleDB node management API +SELECT node_name, database, node_created, database_created, extension_created +FROM ( + SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* + FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) +) a; + node_name | database | node_created | database_created | extension_created +-----------------------+-----------------------+--------------+------------------+------------------- + db_dist_partial_agg_1 | db_dist_partial_agg_1 | t | t | t + db_dist_partial_agg_2 | db_dist_partial_agg_2 | t | t | t + db_dist_partial_agg_3 | db_dist_partial_agg_3 | t | t | t +(3 rows) + +GRANT USAGE ON FOREIGN SERVER :DATA_NODE_1, :DATA_NODE_2, :DATA_NODE_3 TO :ROLE_1; +SELECT * FROM test.remote_exec('{ db_dist_partial_agg_1, db_dist_partial_agg_2, db_dist_partial_agg_3}', +$$ + CREATE TYPE custom_type AS (high int, low int); +$$); +NOTICE: [db_dist_partial_agg_1]: + CREATE TYPE custom_type AS (high int, low int) +NOTICE: [db_dist_partial_agg_2]: + CREATE TYPE custom_type AS (high int, low int) +NOTICE: [db_dist_partial_agg_3]: + CREATE TYPE custom_type AS (high int, low int) + remote_exec +------------- + +(1 row) + +GRANT CREATE ON SCHEMA public TO :ROLE_1; +-- make sure parallel query plans are preferred on data nodes +ALTER DATABASE :DATA_NODE_1 SET parallel_setup_cost TO 1; +ALTER DATABASE :DATA_NODE_2 SET parallel_setup_cost TO 1; +ALTER DATABASE :DATA_NODE_3 SET parallel_setup_cost TO 1; +-- make sure partitionwise aggregation is enabled on data nodes +ALTER DATABASE :DATA_NODE_1 SET enable_partitionwise_aggregate TO true; +ALTER DATABASE :DATA_NODE_2 SET enable_partitionwise_aggregate TO true; +ALTER DATABASE :DATA_NODE_3 SET enable_partitionwise_aggregate TO true; +SET ROLE :ROLE_1; +SELECT table_name FROM create_distributed_hypertable( 'conditions', 'timec', 'location', 3, chunk_time_interval => INTERVAL '1 day'); + table_name +------------ + conditions +(1 row) + +-- We need a lot of data and a lot of chunks to make the planner push down all of the aggregates +\ir 'include/aggregate_table_populate.sql' +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- This files assumes the existence of some table with definition as seen in the aggregate_table.sql file. +INSERT INTO :TEST_TABLE +SELECT generate_series('2018-12-01 00:00'::timestamp, '2018-12-04 08:00'::timestamp, '5 minute'), 'POR', 'west', generate_series(25, 85, 0.0625), 75, 40, 70, NULL, (1,2)::custom_type, 2, true; +INSERT INTO :TEST_TABLE +SELECT generate_series('2018-12-01 00:00'::timestamp, '2018-12-04 08:00'::timestamp, '5 minute'), 'SFO', 'west', generate_series(25, 85, 0.0625), 75, 40, 70, NULL, (1,2)::custom_type, 2, true; +INSERT INTO :TEST_TABLE +SELECT generate_series('2018-12-01 00:00'::timestamp, '2018-12-04 08:00'::timestamp, '5 minute'), 'SAC', 'west', generate_series(25, 85, 0.0625), 75, 40, 70, NULL, (1,2)::custom_type, 2, true; +INSERT INTO :TEST_TABLE +SELECT generate_series('2018-12-01 00:00'::timestamp, '2018-12-04 08:00'::timestamp, '5 minute'), 'SEA', 'west', generate_series(25, 85, 0.0625), 75, 40, 70, NULL, (1,2)::custom_type, 2, true; +INSERT INTO :TEST_TABLE +SELECT generate_series('2018-12-01 00:00'::timestamp, '2018-12-04 08:00'::timestamp, '5 minute'), 'TAC', 'west', generate_series(25, 85, 0.0625), 75, 40, 70, NULL, (1,2)::custom_type, 2, true; +INSERT INTO :TEST_TABLE +SELECT generate_series('2018-12-01 00:00'::timestamp, '2018-12-04 08:00'::timestamp, '5 minute'), 'NYC', 'north-east', generate_series(29, 41, 0.0125), 45, 50, 40, NULL, (3,4)::custom_type, 4, false; +INSERT INTO :TEST_TABLE +SELECT generate_series('2018-12-01 00:00'::timestamp, '2018-12-04 08:00'::timestamp, '5 minute'), 'BOS', 'north-east', generate_series(29, 41, 0.0125), 45, 50, 40, NULL, (3,4)::custom_type, 4, false; +INSERT INTO :TEST_TABLE +SELECT generate_series('2018-12-01 00:00'::timestamp, '2018-12-04 08:00'::timestamp, '5 minute'), 'CHI', 'midwest', generate_series(29, 41, 0.0125), 45, 50, 40, NULL, (3,4)::custom_type, 4, false; +INSERT INTO :TEST_TABLE +SELECT generate_series('2018-12-01 00:00'::timestamp, '2018-12-04 08:00'::timestamp, '5 minute'), 'MIN', 'midwest', generate_series(29, 41, 0.0125), 45, 50, 40, NULL, (3,4)::custom_type, 4, false; +INSERT INTO :TEST_TABLE +SELECT generate_series('2018-12-01 00:00'::timestamp, '2018-12-04 08:00'::timestamp, '5 minute'), 'DET', 'midwest', generate_series(29, 41, 0.0125), 45, 50, 40, NULL, (3,4)::custom_type, 4, false; +INSERT INTO :TEST_TABLE +SELECT generate_series('2018-12-01 00:00'::timestamp, '2018-12-04 08:00'::timestamp, '5 minute'), 'LA', 'west', generate_series(61, 85, 0.025), 55, NULL, 28, NULL, NULL, 8, true; +INSERT INTO :TEST_TABLE +SELECT generate_series('2018-12-01 00:00'::timestamp, '2018-12-04 08:00'::timestamp, '5 minute'), 'SDG', 'west', generate_series(61, 85, 0.025), 55, NULL, 28, NULL, NULL, 8, true; +INSERT INTO :TEST_TABLE +SELECT generate_series('2018-12-01 00:00'::timestamp, '2018-12-04 08:00'::timestamp, '5 minute'), 'PHX', 'west', generate_series(61, 85, 0.025), 55, NULL, 28, NULL, NULL, 8, true; +INSERT INTO :TEST_TABLE +SELECT generate_series('2018-12-01 00:00'::timestamp, '2018-12-04 08:00'::timestamp, '5 minute'), 'DAL', 'south', generate_series(61, 85, 0.025), 55, NULL, 28, NULL, NULL, 8, true; +INSERT INTO :TEST_TABLE +SELECT generate_series('2018-12-01 00:00'::timestamp, '2018-12-04 08:00'::timestamp, '5 minute'), 'AUS', 'south', generate_series(61, 85, 0.025), 55, NULL, 28, NULL, NULL, 8, true; +SET enable_partitionwise_aggregate = ON; +SET timescaledb.remote_data_fetcher = 'cursor'; +-- Run an explain on the aggregate queries to make sure expected aggregates are being pushed down. +-- Grouping by the paritioning column should result in full aggregate pushdown where possible, +-- while using a non-partitioning column should result in a partial pushdown +\set PREFIX 'EXPLAIN (VERBOSE, COSTS OFF)' +\set GROUPING 'location' +\ir 'include/aggregate_queries.sql' +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- This files assumes the existence of some table with definition as seen in the aggregate_table.sql file. +-- All of these should be able to be pushed down if enabled +:PREFIX SELECT :GROUPING, + min(allnull) as min_allnull, + max(temperature) as max_temp, + sum(temperature)+sum(humidity) as agg_sum_expr, + avg(humidity), + ROUND(stddev(CAST(humidity AS INT)), 5), + bit_and(bit_int), + bit_or(bit_int), + bool_and(good_life), + every(temperature > 0), + bool_or(good_life), + count(*) as count_rows, + count(temperature) as count_temp, + count(allnull) as count_zero, + ROUND(CAST(corr(CAST(temperature AS INT), CAST(humidity AS INT)) AS NUMERIC), 5), + ROUND(CAST(covar_pop(CAST(temperature AS INT), CAST(humidity AS INT)) AS NUMERIC), 5), + ROUND(CAST(covar_samp(CAST(temperature AS INT), CAST(humidity AS INT)) AS NUMERIC), 5), + ROUND(CAST(regr_avgx(CAST(temperature AS INT), CAST(humidity AS INT)) AS NUMERIC), 5), + ROUND(CAST(regr_avgy(CAST(temperature AS INT), CAST(humidity AS INT)) AS NUMERIC), 5), + ROUND(CAST(regr_count(CAST(temperature AS INT), CAST(humidity AS INT)) AS NUMERIC), 5), + ROUND(CAST(regr_intercept(CAST(temperature AS INT), CAST(humidity AS INT)) AS NUMERIC), 5), + ROUND(CAST(regr_r2(CAST(temperature AS INT), CAST(humidity AS INT)) AS NUMERIC), 5), + ROUND(CAST(regr_slope(CAST(temperature AS INT), CAST(humidity AS INT)) AS NUMERIC), 5), + ROUND(CAST(regr_sxx(CAST(temperature AS INT), CAST(humidity AS INT)) AS NUMERIC), 5), + ROUND(CAST(regr_sxy(CAST(temperature AS INT), CAST(humidity AS INT)) AS NUMERIC), 5), + ROUND(CAST(regr_syy(CAST(temperature AS INT), CAST(humidity AS INT)) AS NUMERIC), 5), + ROUND(stddev(CAST(temperature AS INT)), 5) as stddev_temp, + ROUND(stddev_pop(CAST(temperature AS INT)), 5), + ROUND(stddev_samp(CAST(temperature AS INT)), 5), + ROUND(variance(CAST(temperature AS INT)), 5), + ROUND(var_pop(CAST(temperature AS INT)), 5), + ROUND(var_samp(CAST(temperature AS INT)), 5), + last(temperature, timec) as last_temp, + histogram(temperature, 0, 100, 1) + FROM :TEST_TABLE + GROUP BY :GROUPING, timec + ORDER BY :GROUPING, timec; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) + Output: location, (min(allnull)), (max(temperature)), ((sum(temperature) + sum(humidity))), (avg(humidity)), (round(stddev((humidity)::integer), 5)), (bit_and(bit_int)), (bit_or(bit_int)), (bool_and(good_life)), (every((temperature > '0'::double precision))), (bool_or(good_life)), (count(*)), (count(temperature)), (count(allnull)), (round((corr(((temperature)::integer)::double precision, ((humidity)::integer)::double precision))::numeric, 5)), (round((covar_pop(((temperature)::integer)::double precision, ((humidity)::integer)::double precision))::numeric, 5)), (round((covar_samp(((temperature)::integer)::double precision, ((humidity)::integer)::double precision))::numeric, 5)), (round((regr_avgx(((temperature)::integer)::double precision, ((humidity)::integer)::double precision))::numeric, 5)), (round((regr_avgy(((temperature)::integer)::double precision, ((humidity)::integer)::double precision))::numeric, 5)), (round((regr_count(((temperature)::integer)::double precision, ((humidity)::integer)::double precision))::numeric, 5)), (round((regr_intercept(((temperature)::integer)::double precision, ((humidity)::integer)::double precision))::numeric, 5)), (round((regr_r2(((temperature)::integer)::double precision, ((humidity)::integer)::double precision))::numeric, 5)), (round((regr_slope(((temperature)::integer)::double precision, ((humidity)::integer)::double precision))::numeric, 5)), (round((regr_sxx(((temperature)::integer)::double precision, ((humidity)::integer)::double precision))::numeric, 5)), (round((regr_sxy(((temperature)::integer)::double precision, ((humidity)::integer)::double precision))::numeric, 5)), (round((regr_syy(((temperature)::integer)::double precision, ((humidity)::integer)::double precision))::numeric, 5)), (round(stddev((temperature)::integer), 5)), (round(stddev_pop((temperature)::integer), 5)), (round(stddev_samp((temperature)::integer), 5)), (round(variance((temperature)::integer), 5)), (round(var_pop((temperature)::integer), 5)), (round(var_samp((temperature)::integer), 5)), (last(temperature, timec)), (histogram(temperature, '0'::double precision, '100'::double precision, 1)), timec + -> Merge Append + Sort Key: conditions.location, conditions.timec + -> Custom Scan (DataNodeScan) + Output: conditions.location, (min(conditions.allnull)), (max(conditions.temperature)), ((sum(conditions.temperature) + sum(conditions.humidity))), (avg(conditions.humidity)), (round(stddev((conditions.humidity)::integer), 5)), (bit_and(conditions.bit_int)), (bit_or(conditions.bit_int)), (bool_and(conditions.good_life)), (every((conditions.temperature > '0'::double precision))), (bool_or(conditions.good_life)), (count(*)), (count(conditions.temperature)), (count(conditions.allnull)), (round((corr(((conditions.temperature)::integer)::double precision, ((conditions.humidity)::integer)::double precision))::numeric, 5)), (round((covar_pop(((conditions.temperature)::integer)::double precision, ((conditions.humidity)::integer)::double precision))::numeric, 5)), (round((covar_samp(((conditions.temperature)::integer)::double precision, ((conditions.humidity)::integer)::double precision))::numeric, 5)), (round((regr_avgx(((conditions.temperature)::integer)::double precision, ((conditions.humidity)::integer)::double precision))::numeric, 5)), (round((regr_avgy(((conditions.temperature)::integer)::double precision, ((conditions.humidity)::integer)::double precision))::numeric, 5)), (round((regr_count(((conditions.temperature)::integer)::double precision, ((conditions.humidity)::integer)::double precision))::numeric, 5)), (round((regr_intercept(((conditions.temperature)::integer)::double precision, ((conditions.humidity)::integer)::double precision))::numeric, 5)), (round((regr_r2(((conditions.temperature)::integer)::double precision, ((conditions.humidity)::integer)::double precision))::numeric, 5)), (round((regr_slope(((conditions.temperature)::integer)::double precision, ((conditions.humidity)::integer)::double precision))::numeric, 5)), (round((regr_sxx(((conditions.temperature)::integer)::double precision, ((conditions.humidity)::integer)::double precision))::numeric, 5)), (round((regr_sxy(((conditions.temperature)::integer)::double precision, ((conditions.humidity)::integer)::double precision))::numeric, 5)), (round((regr_syy(((conditions.temperature)::integer)::double precision, ((conditions.humidity)::integer)::double precision))::numeric, 5)), (round(stddev((conditions.temperature)::integer), 5)), (round(stddev_pop((conditions.temperature)::integer), 5)), (round(stddev_samp((conditions.temperature)::integer), 5)), (round(variance((conditions.temperature)::integer), 5)), (round(var_pop((conditions.temperature)::integer), 5)), (round(var_samp((conditions.temperature)::integer), 5)), (last(conditions.temperature, conditions.timec)), (histogram(conditions.temperature, '0'::double precision, '100'::double precision, 1)), conditions.timec + Relations: Aggregate on (public.conditions) + Data node: db_dist_partial_agg_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk, _dist_hyper_1_4_chunk + Remote SQL: SELECT location, min(allnull), max(temperature), (sum(temperature) + sum(humidity)), avg(humidity), round(stddev(humidity::integer), 5), bit_and(bit_int), bit_or(bit_int), bool_and(good_life), every((temperature > 0::double precision)), bool_or(good_life), count(*), count(temperature), count(allnull), round(corr(temperature::integer, humidity::integer)::numeric, 5), round(covar_pop(temperature::integer, humidity::integer)::numeric, 5), round(covar_samp(temperature::integer, humidity::integer)::numeric, 5), round(regr_avgx(temperature::integer, humidity::integer)::numeric, 5), round(regr_avgy(temperature::integer, humidity::integer)::numeric, 5), round(regr_count(temperature::integer, humidity::integer)::numeric, 5), round(regr_intercept(temperature::integer, humidity::integer)::numeric, 5), round(regr_r2(temperature::integer, humidity::integer)::numeric, 5), round(regr_slope(temperature::integer, humidity::integer)::numeric, 5), round(regr_sxx(temperature::integer, humidity::integer)::numeric, 5), round(regr_sxy(temperature::integer, humidity::integer)::numeric, 5), round(regr_syy(temperature::integer, humidity::integer)::numeric, 5), round(stddev(temperature::integer), 5), round(stddev_pop(temperature::integer), 5), round(stddev_samp(temperature::integer), 5), round(variance(temperature::integer), 5), round(var_pop(temperature::integer), 5), round(var_samp(temperature::integer), 5), public.last(temperature, timec), public.histogram(temperature, 0::double precision, 100::double precision, 1), timec FROM public.conditions WHERE _timescaledb_functions.chunks_in(public.conditions.*, ARRAY[1, 2, 3, 4]) GROUP BY 1, 35 ORDER BY location ASC NULLS LAST, timec ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: conditions_1.location, (min(conditions_1.allnull)), (max(conditions_1.temperature)), ((sum(conditions_1.temperature) + sum(conditions_1.humidity))), (avg(conditions_1.humidity)), (round(stddev((conditions_1.humidity)::integer), 5)), (bit_and(conditions_1.bit_int)), (bit_or(conditions_1.bit_int)), (bool_and(conditions_1.good_life)), (every((conditions_1.temperature > '0'::double precision))), (bool_or(conditions_1.good_life)), (count(*)), (count(conditions_1.temperature)), (count(conditions_1.allnull)), (round((corr(((conditions_1.temperature)::integer)::double precision, ((conditions_1.humidity)::integer)::double precision))::numeric, 5)), (round((covar_pop(((conditions_1.temperature)::integer)::double precision, ((conditions_1.humidity)::integer)::double precision))::numeric, 5)), (round((covar_samp(((conditions_1.temperature)::integer)::double precision, ((conditions_1.humidity)::integer)::double precision))::numeric, 5)), (round((regr_avgx(((conditions_1.temperature)::integer)::double precision, ((conditions_1.humidity)::integer)::double precision))::numeric, 5)), (round((regr_avgy(((conditions_1.temperature)::integer)::double precision, ((conditions_1.humidity)::integer)::double precision))::numeric, 5)), (round((regr_count(((conditions_1.temperature)::integer)::double precision, ((conditions_1.humidity)::integer)::double precision))::numeric, 5)), (round((regr_intercept(((conditions_1.temperature)::integer)::double precision, ((conditions_1.humidity)::integer)::double precision))::numeric, 5)), (round((regr_r2(((conditions_1.temperature)::integer)::double precision, ((conditions_1.humidity)::integer)::double precision))::numeric, 5)), (round((regr_slope(((conditions_1.temperature)::integer)::double precision, ((conditions_1.humidity)::integer)::double precision))::numeric, 5)), (round((regr_sxx(((conditions_1.temperature)::integer)::double precision, ((conditions_1.humidity)::integer)::double precision))::numeric, 5)), (round((regr_sxy(((conditions_1.temperature)::integer)::double precision, ((conditions_1.humidity)::integer)::double precision))::numeric, 5)), (round((regr_syy(((conditions_1.temperature)::integer)::double precision, ((conditions_1.humidity)::integer)::double precision))::numeric, 5)), (round(stddev((conditions_1.temperature)::integer), 5)), (round(stddev_pop((conditions_1.temperature)::integer), 5)), (round(stddev_samp((conditions_1.temperature)::integer), 5)), (round(variance((conditions_1.temperature)::integer), 5)), (round(var_pop((conditions_1.temperature)::integer), 5)), (round(var_samp((conditions_1.temperature)::integer), 5)), (last(conditions_1.temperature, conditions_1.timec)), (histogram(conditions_1.temperature, '0'::double precision, '100'::double precision, 1)), conditions_1.timec + Relations: Aggregate on (public.conditions) + Data node: db_dist_partial_agg_2 + Chunks: _dist_hyper_1_9_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_12_chunk + Remote SQL: SELECT location, min(allnull), max(temperature), (sum(temperature) + sum(humidity)), avg(humidity), round(stddev(humidity::integer), 5), bit_and(bit_int), bit_or(bit_int), bool_and(good_life), every((temperature > 0::double precision)), bool_or(good_life), count(*), count(temperature), count(allnull), round(corr(temperature::integer, humidity::integer)::numeric, 5), round(covar_pop(temperature::integer, humidity::integer)::numeric, 5), round(covar_samp(temperature::integer, humidity::integer)::numeric, 5), round(regr_avgx(temperature::integer, humidity::integer)::numeric, 5), round(regr_avgy(temperature::integer, humidity::integer)::numeric, 5), round(regr_count(temperature::integer, humidity::integer)::numeric, 5), round(regr_intercept(temperature::integer, humidity::integer)::numeric, 5), round(regr_r2(temperature::integer, humidity::integer)::numeric, 5), round(regr_slope(temperature::integer, humidity::integer)::numeric, 5), round(regr_sxx(temperature::integer, humidity::integer)::numeric, 5), round(regr_sxy(temperature::integer, humidity::integer)::numeric, 5), round(regr_syy(temperature::integer, humidity::integer)::numeric, 5), round(stddev(temperature::integer), 5), round(stddev_pop(temperature::integer), 5), round(stddev_samp(temperature::integer), 5), round(variance(temperature::integer), 5), round(var_pop(temperature::integer), 5), round(var_samp(temperature::integer), 5), public.last(temperature, timec), public.histogram(temperature, 0::double precision, 100::double precision, 1), timec FROM public.conditions WHERE _timescaledb_functions.chunks_in(public.conditions.*, ARRAY[1, 2, 3, 4]) GROUP BY 1, 35 ORDER BY location ASC NULLS LAST, timec ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: conditions_2.location, (min(conditions_2.allnull)), (max(conditions_2.temperature)), ((sum(conditions_2.temperature) + sum(conditions_2.humidity))), (avg(conditions_2.humidity)), (round(stddev((conditions_2.humidity)::integer), 5)), (bit_and(conditions_2.bit_int)), (bit_or(conditions_2.bit_int)), (bool_and(conditions_2.good_life)), (every((conditions_2.temperature > '0'::double precision))), (bool_or(conditions_2.good_life)), (count(*)), (count(conditions_2.temperature)), (count(conditions_2.allnull)), (round((corr(((conditions_2.temperature)::integer)::double precision, ((conditions_2.humidity)::integer)::double precision))::numeric, 5)), (round((covar_pop(((conditions_2.temperature)::integer)::double precision, ((conditions_2.humidity)::integer)::double precision))::numeric, 5)), (round((covar_samp(((conditions_2.temperature)::integer)::double precision, ((conditions_2.humidity)::integer)::double precision))::numeric, 5)), (round((regr_avgx(((conditions_2.temperature)::integer)::double precision, ((conditions_2.humidity)::integer)::double precision))::numeric, 5)), (round((regr_avgy(((conditions_2.temperature)::integer)::double precision, ((conditions_2.humidity)::integer)::double precision))::numeric, 5)), (round((regr_count(((conditions_2.temperature)::integer)::double precision, ((conditions_2.humidity)::integer)::double precision))::numeric, 5)), (round((regr_intercept(((conditions_2.temperature)::integer)::double precision, ((conditions_2.humidity)::integer)::double precision))::numeric, 5)), (round((regr_r2(((conditions_2.temperature)::integer)::double precision, ((conditions_2.humidity)::integer)::double precision))::numeric, 5)), (round((regr_slope(((conditions_2.temperature)::integer)::double precision, ((conditions_2.humidity)::integer)::double precision))::numeric, 5)), (round((regr_sxx(((conditions_2.temperature)::integer)::double precision, ((conditions_2.humidity)::integer)::double precision))::numeric, 5)), (round((regr_sxy(((conditions_2.temperature)::integer)::double precision, ((conditions_2.humidity)::integer)::double precision))::numeric, 5)), (round((regr_syy(((conditions_2.temperature)::integer)::double precision, ((conditions_2.humidity)::integer)::double precision))::numeric, 5)), (round(stddev((conditions_2.temperature)::integer), 5)), (round(stddev_pop((conditions_2.temperature)::integer), 5)), (round(stddev_samp((conditions_2.temperature)::integer), 5)), (round(variance((conditions_2.temperature)::integer), 5)), (round(var_pop((conditions_2.temperature)::integer), 5)), (round(var_samp((conditions_2.temperature)::integer), 5)), (last(conditions_2.temperature, conditions_2.timec)), (histogram(conditions_2.temperature, '0'::double precision, '100'::double precision, 1)), conditions_2.timec + Relations: Aggregate on (public.conditions) + Data node: db_dist_partial_agg_3 + Chunks: _dist_hyper_1_5_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_8_chunk + Remote SQL: SELECT location, min(allnull), max(temperature), (sum(temperature) + sum(humidity)), avg(humidity), round(stddev(humidity::integer), 5), bit_and(bit_int), bit_or(bit_int), bool_and(good_life), every((temperature > 0::double precision)), bool_or(good_life), count(*), count(temperature), count(allnull), round(corr(temperature::integer, humidity::integer)::numeric, 5), round(covar_pop(temperature::integer, humidity::integer)::numeric, 5), round(covar_samp(temperature::integer, humidity::integer)::numeric, 5), round(regr_avgx(temperature::integer, humidity::integer)::numeric, 5), round(regr_avgy(temperature::integer, humidity::integer)::numeric, 5), round(regr_count(temperature::integer, humidity::integer)::numeric, 5), round(regr_intercept(temperature::integer, humidity::integer)::numeric, 5), round(regr_r2(temperature::integer, humidity::integer)::numeric, 5), round(regr_slope(temperature::integer, humidity::integer)::numeric, 5), round(regr_sxx(temperature::integer, humidity::integer)::numeric, 5), round(regr_sxy(temperature::integer, humidity::integer)::numeric, 5), round(regr_syy(temperature::integer, humidity::integer)::numeric, 5), round(stddev(temperature::integer), 5), round(stddev_pop(temperature::integer), 5), round(stddev_samp(temperature::integer), 5), round(variance(temperature::integer), 5), round(var_pop(temperature::integer), 5), round(var_samp(temperature::integer), 5), public.last(temperature, timec), public.histogram(temperature, 0::double precision, 100::double precision, 1), timec FROM public.conditions WHERE _timescaledb_functions.chunks_in(public.conditions.*, ARRAY[1, 2, 3, 4]) GROUP BY 1, 35 ORDER BY location ASC NULLS LAST, timec ASC NULLS LAST +(22 rows) + +-- Aggregates on custom types are not yet pushed down +:PREFIX SELECT :GROUPING, + last(highlow, timec) as last_hl, + first(highlow, timec) as first_hl + FROM :TEST_TABLE + GROUP BY :GROUPING, timec + ORDER BY :GROUPING, timec; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Merge Append + Sort Key: conditions.location, conditions.timec + -> GroupAggregate + Output: conditions.location, last(conditions.highlow, conditions.timec), first(conditions.highlow, conditions.timec), conditions.timec + Group Key: conditions.location, conditions.timec + -> Result + Output: conditions.location, conditions.timec, conditions.highlow + -> Custom Scan (DataNodeScan) on public.conditions + Output: conditions.location, conditions.highlow, conditions.timec + Data node: db_dist_partial_agg_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk, _dist_hyper_1_4_chunk + Remote SQL: SELECT timec, location, highlow FROM public.conditions WHERE _timescaledb_functions.chunks_in(public.conditions.*, ARRAY[1, 2, 3, 4]) ORDER BY location ASC NULLS LAST, timec ASC NULLS LAST + -> GroupAggregate + Output: conditions_1.location, last(conditions_1.highlow, conditions_1.timec), first(conditions_1.highlow, conditions_1.timec), conditions_1.timec + Group Key: conditions_1.location, conditions_1.timec + -> Result + Output: conditions_1.location, conditions_1.timec, conditions_1.highlow + -> Custom Scan (DataNodeScan) on public.conditions conditions_1 + Output: conditions_1.location, conditions_1.highlow, conditions_1.timec + Data node: db_dist_partial_agg_2 + Chunks: _dist_hyper_1_9_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_12_chunk + Remote SQL: SELECT timec, location, highlow FROM public.conditions WHERE _timescaledb_functions.chunks_in(public.conditions.*, ARRAY[1, 2, 3, 4]) ORDER BY location ASC NULLS LAST, timec ASC NULLS LAST + -> GroupAggregate + Output: conditions_2.location, last(conditions_2.highlow, conditions_2.timec), first(conditions_2.highlow, conditions_2.timec), conditions_2.timec + Group Key: conditions_2.location, conditions_2.timec + -> Result + Output: conditions_2.location, conditions_2.timec, conditions_2.highlow + -> Custom Scan (DataNodeScan) on public.conditions conditions_2 + Output: conditions_2.location, conditions_2.highlow, conditions_2.timec + Data node: db_dist_partial_agg_3 + Chunks: _dist_hyper_1_5_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_8_chunk + Remote SQL: SELECT timec, location, highlow FROM public.conditions WHERE _timescaledb_functions.chunks_in(public.conditions.*, ARRAY[1, 2, 3, 4]) ORDER BY location ASC NULLS LAST, timec ASC NULLS LAST +(32 rows) + +-- Mix of aggregates that push down and those that don't +:PREFIX SELECT :GROUPING, + min(allnull) as min_allnull, + max(temperature) as max_temp, + sum(temperature)+sum(humidity) as agg_sum_expr, + avg(humidity), + ROUND(stddev(CAST(humidity AS INT)), 5), + bit_and(bit_int), + bit_or(bit_int), + bool_and(good_life), + every(temperature > 0), + bool_or(good_life), + first(highlow, timec) as first_hl + FROM :TEST_TABLE + GROUP BY :GROUPING, timec + ORDER BY :GROUPING, timec; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Append + Sort Key: conditions.location, conditions.timec + -> GroupAggregate + Output: conditions.location, min(conditions.allnull), max(conditions.temperature), (sum(conditions.temperature) + sum(conditions.humidity)), avg(conditions.humidity), round(stddev((conditions.humidity)::integer), 5), bit_and(conditions.bit_int), bit_or(conditions.bit_int), bool_and(conditions.good_life), every((conditions.temperature > '0'::double precision)), bool_or(conditions.good_life), first(conditions.highlow, conditions.timec), conditions.timec + Group Key: conditions.location, conditions.timec + -> Result + Output: conditions.location, conditions.timec, conditions.allnull, conditions.temperature, conditions.humidity, conditions.bit_int, conditions.good_life, conditions.highlow + -> Custom Scan (DataNodeScan) on public.conditions + Output: conditions.location, conditions.allnull, conditions.temperature, conditions.humidity, conditions.bit_int, conditions.good_life, conditions.highlow, conditions.timec + Data node: db_dist_partial_agg_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk, _dist_hyper_1_4_chunk + Remote SQL: SELECT timec, location, temperature, humidity, allnull, highlow, bit_int, good_life FROM public.conditions WHERE _timescaledb_functions.chunks_in(public.conditions.*, ARRAY[1, 2, 3, 4]) ORDER BY location ASC NULLS LAST, timec ASC NULLS LAST + -> GroupAggregate + Output: conditions_1.location, min(conditions_1.allnull), max(conditions_1.temperature), (sum(conditions_1.temperature) + sum(conditions_1.humidity)), avg(conditions_1.humidity), round(stddev((conditions_1.humidity)::integer), 5), bit_and(conditions_1.bit_int), bit_or(conditions_1.bit_int), bool_and(conditions_1.good_life), every((conditions_1.temperature > '0'::double precision)), bool_or(conditions_1.good_life), first(conditions_1.highlow, conditions_1.timec), conditions_1.timec + Group Key: conditions_1.location, conditions_1.timec + -> Result + Output: conditions_1.location, conditions_1.timec, conditions_1.allnull, conditions_1.temperature, conditions_1.humidity, conditions_1.bit_int, conditions_1.good_life, conditions_1.highlow + -> Custom Scan (DataNodeScan) on public.conditions conditions_1 + Output: conditions_1.location, conditions_1.allnull, conditions_1.temperature, conditions_1.humidity, conditions_1.bit_int, conditions_1.good_life, conditions_1.highlow, conditions_1.timec + Data node: db_dist_partial_agg_2 + Chunks: _dist_hyper_1_9_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_12_chunk + Remote SQL: SELECT timec, location, temperature, humidity, allnull, highlow, bit_int, good_life FROM public.conditions WHERE _timescaledb_functions.chunks_in(public.conditions.*, ARRAY[1, 2, 3, 4]) ORDER BY location ASC NULLS LAST, timec ASC NULLS LAST + -> GroupAggregate + Output: conditions_2.location, min(conditions_2.allnull), max(conditions_2.temperature), (sum(conditions_2.temperature) + sum(conditions_2.humidity)), avg(conditions_2.humidity), round(stddev((conditions_2.humidity)::integer), 5), bit_and(conditions_2.bit_int), bit_or(conditions_2.bit_int), bool_and(conditions_2.good_life), every((conditions_2.temperature > '0'::double precision)), bool_or(conditions_2.good_life), first(conditions_2.highlow, conditions_2.timec), conditions_2.timec + Group Key: conditions_2.location, conditions_2.timec + -> Result + Output: conditions_2.location, conditions_2.timec, conditions_2.allnull, conditions_2.temperature, conditions_2.humidity, conditions_2.bit_int, conditions_2.good_life, conditions_2.highlow + -> Custom Scan (DataNodeScan) on public.conditions conditions_2 + Output: conditions_2.location, conditions_2.allnull, conditions_2.temperature, conditions_2.humidity, conditions_2.bit_int, conditions_2.good_life, conditions_2.highlow, conditions_2.timec + Data node: db_dist_partial_agg_3 + Chunks: _dist_hyper_1_5_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_8_chunk + Remote SQL: SELECT timec, location, temperature, humidity, allnull, highlow, bit_int, good_life FROM public.conditions WHERE _timescaledb_functions.chunks_in(public.conditions.*, ARRAY[1, 2, 3, 4]) ORDER BY location ASC NULLS LAST, timec ASC NULLS LAST +(32 rows) + +-- Aggregates nested in expressions and no top-level aggregate #3672 +:PREFIX SELECT :GROUPING, + sum(temperature)+sum(humidity) as agg_sum_expr + FROM :TEST_TABLE + GROUP BY :GROUPING, timec + ORDER BY :GROUPING, timec; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) + Output: location, ((sum(temperature) + sum(humidity))), timec + -> Merge Append + Sort Key: conditions.location, conditions.timec + -> Custom Scan (DataNodeScan) + Output: conditions.location, ((sum(conditions.temperature) + sum(conditions.humidity))), conditions.timec + Relations: Aggregate on (public.conditions) + Data node: db_dist_partial_agg_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk, _dist_hyper_1_4_chunk + Remote SQL: SELECT location, (sum(temperature) + sum(humidity)), timec FROM public.conditions WHERE _timescaledb_functions.chunks_in(public.conditions.*, ARRAY[1, 2, 3, 4]) GROUP BY 1, 3 ORDER BY location ASC NULLS LAST, timec ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: conditions_1.location, ((sum(conditions_1.temperature) + sum(conditions_1.humidity))), conditions_1.timec + Relations: Aggregate on (public.conditions) + Data node: db_dist_partial_agg_2 + Chunks: _dist_hyper_1_9_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_12_chunk + Remote SQL: SELECT location, (sum(temperature) + sum(humidity)), timec FROM public.conditions WHERE _timescaledb_functions.chunks_in(public.conditions.*, ARRAY[1, 2, 3, 4]) GROUP BY 1, 3 ORDER BY location ASC NULLS LAST, timec ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: conditions_2.location, ((sum(conditions_2.temperature) + sum(conditions_2.humidity))), conditions_2.timec + Relations: Aggregate on (public.conditions) + Data node: db_dist_partial_agg_3 + Chunks: _dist_hyper_1_5_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_8_chunk + Remote SQL: SELECT location, (sum(temperature) + sum(humidity)), timec FROM public.conditions WHERE _timescaledb_functions.chunks_in(public.conditions.*, ARRAY[1, 2, 3, 4]) GROUP BY 1, 3 ORDER BY location ASC NULLS LAST, timec ASC NULLS LAST +(22 rows) + +-- Aggregates with no aggregate reference in targetlist #3664 +:PREFIX SELECT :GROUPING + FROM :TEST_TABLE + GROUP BY :GROUPING, timec + HAVING avg(temperature) > 20 + ORDER BY :GROUPING, timec; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) + Output: location, timec + -> Merge Append + Sort Key: conditions.location, conditions.timec + -> Custom Scan (DataNodeScan) + Output: conditions.location, conditions.timec + Relations: Aggregate on (public.conditions) + Data node: db_dist_partial_agg_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk, _dist_hyper_1_4_chunk + Remote SQL: SELECT location, timec FROM public.conditions WHERE _timescaledb_functions.chunks_in(public.conditions.*, ARRAY[1, 2, 3, 4]) GROUP BY 1, 2 HAVING ((avg(temperature) > 20::double precision)) ORDER BY location ASC NULLS LAST, timec ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: conditions_1.location, conditions_1.timec + Relations: Aggregate on (public.conditions) + Data node: db_dist_partial_agg_2 + Chunks: _dist_hyper_1_9_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_12_chunk + Remote SQL: SELECT location, timec FROM public.conditions WHERE _timescaledb_functions.chunks_in(public.conditions.*, ARRAY[1, 2, 3, 4]) GROUP BY 1, 2 HAVING ((avg(temperature) > 20::double precision)) ORDER BY location ASC NULLS LAST, timec ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: conditions_2.location, conditions_2.timec + Relations: Aggregate on (public.conditions) + Data node: db_dist_partial_agg_3 + Chunks: _dist_hyper_1_5_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_8_chunk + Remote SQL: SELECT location, timec FROM public.conditions WHERE _timescaledb_functions.chunks_in(public.conditions.*, ARRAY[1, 2, 3, 4]) GROUP BY 1, 2 HAVING ((avg(temperature) > 20::double precision)) ORDER BY location ASC NULLS LAST, timec ASC NULLS LAST +(22 rows) + +\set GROUPING 'region, temperature' +\ir 'include/aggregate_queries.sql' +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- This files assumes the existence of some table with definition as seen in the aggregate_table.sql file. +-- All of these should be able to be pushed down if enabled +:PREFIX SELECT :GROUPING, + min(allnull) as min_allnull, + max(temperature) as max_temp, + sum(temperature)+sum(humidity) as agg_sum_expr, + avg(humidity), + ROUND(stddev(CAST(humidity AS INT)), 5), + bit_and(bit_int), + bit_or(bit_int), + bool_and(good_life), + every(temperature > 0), + bool_or(good_life), + count(*) as count_rows, + count(temperature) as count_temp, + count(allnull) as count_zero, + ROUND(CAST(corr(CAST(temperature AS INT), CAST(humidity AS INT)) AS NUMERIC), 5), + ROUND(CAST(covar_pop(CAST(temperature AS INT), CAST(humidity AS INT)) AS NUMERIC), 5), + ROUND(CAST(covar_samp(CAST(temperature AS INT), CAST(humidity AS INT)) AS NUMERIC), 5), + ROUND(CAST(regr_avgx(CAST(temperature AS INT), CAST(humidity AS INT)) AS NUMERIC), 5), + ROUND(CAST(regr_avgy(CAST(temperature AS INT), CAST(humidity AS INT)) AS NUMERIC), 5), + ROUND(CAST(regr_count(CAST(temperature AS INT), CAST(humidity AS INT)) AS NUMERIC), 5), + ROUND(CAST(regr_intercept(CAST(temperature AS INT), CAST(humidity AS INT)) AS NUMERIC), 5), + ROUND(CAST(regr_r2(CAST(temperature AS INT), CAST(humidity AS INT)) AS NUMERIC), 5), + ROUND(CAST(regr_slope(CAST(temperature AS INT), CAST(humidity AS INT)) AS NUMERIC), 5), + ROUND(CAST(regr_sxx(CAST(temperature AS INT), CAST(humidity AS INT)) AS NUMERIC), 5), + ROUND(CAST(regr_sxy(CAST(temperature AS INT), CAST(humidity AS INT)) AS NUMERIC), 5), + ROUND(CAST(regr_syy(CAST(temperature AS INT), CAST(humidity AS INT)) AS NUMERIC), 5), + ROUND(stddev(CAST(temperature AS INT)), 5) as stddev_temp, + ROUND(stddev_pop(CAST(temperature AS INT)), 5), + ROUND(stddev_samp(CAST(temperature AS INT)), 5), + ROUND(variance(CAST(temperature AS INT)), 5), + ROUND(var_pop(CAST(temperature AS INT)), 5), + ROUND(var_samp(CAST(temperature AS INT)), 5), + last(temperature, timec) as last_temp, + histogram(temperature, 0, 100, 1) + FROM :TEST_TABLE + GROUP BY :GROUPING, timec + ORDER BY :GROUPING, timec; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Finalize GroupAggregate + Output: region, temperature, min(allnull), max(temperature), (sum(temperature) + sum(humidity)), avg(humidity), round(stddev((humidity)::integer), 5), bit_and(bit_int), bit_or(bit_int), bool_and(good_life), every((temperature > '0'::double precision)), bool_or(good_life), count(*), count(temperature), count(allnull), round((corr(((temperature)::integer)::double precision, ((humidity)::integer)::double precision))::numeric, 5), round((covar_pop(((temperature)::integer)::double precision, ((humidity)::integer)::double precision))::numeric, 5), round((covar_samp(((temperature)::integer)::double precision, ((humidity)::integer)::double precision))::numeric, 5), round((regr_avgx(((temperature)::integer)::double precision, ((humidity)::integer)::double precision))::numeric, 5), round((regr_avgy(((temperature)::integer)::double precision, ((humidity)::integer)::double precision))::numeric, 5), round((regr_count(((temperature)::integer)::double precision, ((humidity)::integer)::double precision))::numeric, 5), round((regr_intercept(((temperature)::integer)::double precision, ((humidity)::integer)::double precision))::numeric, 5), round((regr_r2(((temperature)::integer)::double precision, ((humidity)::integer)::double precision))::numeric, 5), round((regr_slope(((temperature)::integer)::double precision, ((humidity)::integer)::double precision))::numeric, 5), round((regr_sxx(((temperature)::integer)::double precision, ((humidity)::integer)::double precision))::numeric, 5), round((regr_sxy(((temperature)::integer)::double precision, ((humidity)::integer)::double precision))::numeric, 5), round((regr_syy(((temperature)::integer)::double precision, ((humidity)::integer)::double precision))::numeric, 5), round(stddev((temperature)::integer), 5), round(stddev_pop((temperature)::integer), 5), round(stddev_samp((temperature)::integer), 5), round(variance((temperature)::integer), 5), round(var_pop((temperature)::integer), 5), round(var_samp((temperature)::integer), 5), last(temperature, timec), histogram(temperature, '0'::double precision, '100'::double precision, 1), timec + Group Key: region, temperature, timec + -> Custom Scan (AsyncAppend) + Output: region, temperature, timec, (PARTIAL min(allnull)), (PARTIAL max(temperature)), (PARTIAL sum(temperature)), (PARTIAL sum(humidity)), (PARTIAL avg(humidity)), (PARTIAL stddev((humidity)::integer)), (PARTIAL bit_and(bit_int)), (PARTIAL bit_or(bit_int)), (PARTIAL bool_and(good_life)), (PARTIAL every((temperature > '0'::double precision))), (PARTIAL bool_or(good_life)), (PARTIAL count(*)), (PARTIAL count(temperature)), (PARTIAL count(allnull)), (PARTIAL corr(((temperature)::integer)::double precision, ((humidity)::integer)::double precision)), (PARTIAL covar_pop(((temperature)::integer)::double precision, ((humidity)::integer)::double precision)), (PARTIAL covar_samp(((temperature)::integer)::double precision, ((humidity)::integer)::double precision)), (PARTIAL regr_avgx(((temperature)::integer)::double precision, ((humidity)::integer)::double precision)), (PARTIAL regr_avgy(((temperature)::integer)::double precision, ((humidity)::integer)::double precision)), (PARTIAL regr_count(((temperature)::integer)::double precision, ((humidity)::integer)::double precision)), (PARTIAL regr_intercept(((temperature)::integer)::double precision, ((humidity)::integer)::double precision)), (PARTIAL regr_r2(((temperature)::integer)::double precision, ((humidity)::integer)::double precision)), (PARTIAL regr_slope(((temperature)::integer)::double precision, ((humidity)::integer)::double precision)), (PARTIAL regr_sxx(((temperature)::integer)::double precision, ((humidity)::integer)::double precision)), (PARTIAL regr_sxy(((temperature)::integer)::double precision, ((humidity)::integer)::double precision)), (PARTIAL regr_syy(((temperature)::integer)::double precision, ((humidity)::integer)::double precision)), (PARTIAL stddev((temperature)::integer)), (PARTIAL stddev_pop((temperature)::integer)), (PARTIAL stddev_samp((temperature)::integer)), (PARTIAL variance((temperature)::integer)), (PARTIAL var_pop((temperature)::integer)), (PARTIAL var_samp((temperature)::integer)), (PARTIAL last(temperature, timec)), (PARTIAL histogram(temperature, '0'::double precision, '100'::double precision, 1)) + -> Merge Append + Sort Key: conditions.region, conditions.temperature, conditions.timec + -> Custom Scan (DataNodeScan) + Output: conditions.region, conditions.temperature, conditions.timec, (PARTIAL min(conditions.allnull)), (PARTIAL max(conditions.temperature)), (PARTIAL sum(conditions.temperature)), (PARTIAL sum(conditions.humidity)), (PARTIAL avg(conditions.humidity)), (PARTIAL stddev((conditions.humidity)::integer)), (PARTIAL bit_and(conditions.bit_int)), (PARTIAL bit_or(conditions.bit_int)), (PARTIAL bool_and(conditions.good_life)), (PARTIAL every((conditions.temperature > '0'::double precision))), (PARTIAL bool_or(conditions.good_life)), (PARTIAL count(*)), (PARTIAL count(conditions.temperature)), (PARTIAL count(conditions.allnull)), (PARTIAL corr(((conditions.temperature)::integer)::double precision, ((conditions.humidity)::integer)::double precision)), (PARTIAL covar_pop(((conditions.temperature)::integer)::double precision, ((conditions.humidity)::integer)::double precision)), (PARTIAL covar_samp(((conditions.temperature)::integer)::double precision, ((conditions.humidity)::integer)::double precision)), (PARTIAL regr_avgx(((conditions.temperature)::integer)::double precision, ((conditions.humidity)::integer)::double precision)), (PARTIAL regr_avgy(((conditions.temperature)::integer)::double precision, ((conditions.humidity)::integer)::double precision)), (PARTIAL regr_count(((conditions.temperature)::integer)::double precision, ((conditions.humidity)::integer)::double precision)), (PARTIAL regr_intercept(((conditions.temperature)::integer)::double precision, ((conditions.humidity)::integer)::double precision)), (PARTIAL regr_r2(((conditions.temperature)::integer)::double precision, ((conditions.humidity)::integer)::double precision)), (PARTIAL regr_slope(((conditions.temperature)::integer)::double precision, ((conditions.humidity)::integer)::double precision)), (PARTIAL regr_sxx(((conditions.temperature)::integer)::double precision, ((conditions.humidity)::integer)::double precision)), (PARTIAL regr_sxy(((conditions.temperature)::integer)::double precision, ((conditions.humidity)::integer)::double precision)), (PARTIAL regr_syy(((conditions.temperature)::integer)::double precision, ((conditions.humidity)::integer)::double precision)), (PARTIAL stddev((conditions.temperature)::integer)), (PARTIAL stddev_pop((conditions.temperature)::integer)), (PARTIAL stddev_samp((conditions.temperature)::integer)), (PARTIAL variance((conditions.temperature)::integer)), (PARTIAL var_pop((conditions.temperature)::integer)), (PARTIAL var_samp((conditions.temperature)::integer)), (PARTIAL last(conditions.temperature, conditions.timec)), (PARTIAL histogram(conditions.temperature, '0'::double precision, '100'::double precision, 1)) + Relations: Aggregate on (public.conditions) + Data node: db_dist_partial_agg_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk, _dist_hyper_1_4_chunk + Remote SQL: SELECT region, temperature, timec, _timescaledb_functions.partialize_agg(min(allnull)), _timescaledb_functions.partialize_agg(max(temperature)), _timescaledb_functions.partialize_agg(sum(temperature)), _timescaledb_functions.partialize_agg(sum(humidity)), _timescaledb_functions.partialize_agg(avg(humidity)), _timescaledb_functions.partialize_agg(stddev(humidity::integer)), _timescaledb_functions.partialize_agg(bit_and(bit_int)), _timescaledb_functions.partialize_agg(bit_or(bit_int)), _timescaledb_functions.partialize_agg(bool_and(good_life)), _timescaledb_functions.partialize_agg(every((temperature > 0::double precision))), _timescaledb_functions.partialize_agg(bool_or(good_life)), _timescaledb_functions.partialize_agg(count(*)), _timescaledb_functions.partialize_agg(count(temperature)), _timescaledb_functions.partialize_agg(count(allnull)), _timescaledb_functions.partialize_agg(corr(temperature::integer, humidity::integer)), _timescaledb_functions.partialize_agg(covar_pop(temperature::integer, humidity::integer)), _timescaledb_functions.partialize_agg(covar_samp(temperature::integer, humidity::integer)), _timescaledb_functions.partialize_agg(regr_avgx(temperature::integer, humidity::integer)), _timescaledb_functions.partialize_agg(regr_avgy(temperature::integer, humidity::integer)), _timescaledb_functions.partialize_agg(regr_count(temperature::integer, humidity::integer)), _timescaledb_functions.partialize_agg(regr_intercept(temperature::integer, humidity::integer)), _timescaledb_functions.partialize_agg(regr_r2(temperature::integer, humidity::integer)), _timescaledb_functions.partialize_agg(regr_slope(temperature::integer, humidity::integer)), _timescaledb_functions.partialize_agg(regr_sxx(temperature::integer, humidity::integer)), _timescaledb_functions.partialize_agg(regr_sxy(temperature::integer, humidity::integer)), _timescaledb_functions.partialize_agg(regr_syy(temperature::integer, humidity::integer)), _timescaledb_functions.partialize_agg(stddev(temperature::integer)), _timescaledb_functions.partialize_agg(stddev_pop(temperature::integer)), _timescaledb_functions.partialize_agg(stddev_samp(temperature::integer)), _timescaledb_functions.partialize_agg(variance(temperature::integer)), _timescaledb_functions.partialize_agg(var_pop(temperature::integer)), _timescaledb_functions.partialize_agg(var_samp(temperature::integer)), _timescaledb_functions.partialize_agg(public.last(temperature, timec)), _timescaledb_functions.partialize_agg(public.histogram(temperature, 0::double precision, 100::double precision, 1)) FROM public.conditions WHERE _timescaledb_functions.chunks_in(public.conditions.*, ARRAY[1, 2, 3, 4]) GROUP BY 1, 2, 3 ORDER BY region ASC NULLS LAST, temperature ASC NULLS LAST, timec ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: conditions_1.region, conditions_1.temperature, conditions_1.timec, (PARTIAL min(conditions_1.allnull)), (PARTIAL max(conditions_1.temperature)), (PARTIAL sum(conditions_1.temperature)), (PARTIAL sum(conditions_1.humidity)), (PARTIAL avg(conditions_1.humidity)), (PARTIAL stddev((conditions_1.humidity)::integer)), (PARTIAL bit_and(conditions_1.bit_int)), (PARTIAL bit_or(conditions_1.bit_int)), (PARTIAL bool_and(conditions_1.good_life)), (PARTIAL every((conditions_1.temperature > '0'::double precision))), (PARTIAL bool_or(conditions_1.good_life)), (PARTIAL count(*)), (PARTIAL count(conditions_1.temperature)), (PARTIAL count(conditions_1.allnull)), (PARTIAL corr(((conditions_1.temperature)::integer)::double precision, ((conditions_1.humidity)::integer)::double precision)), (PARTIAL covar_pop(((conditions_1.temperature)::integer)::double precision, ((conditions_1.humidity)::integer)::double precision)), (PARTIAL covar_samp(((conditions_1.temperature)::integer)::double precision, ((conditions_1.humidity)::integer)::double precision)), (PARTIAL regr_avgx(((conditions_1.temperature)::integer)::double precision, ((conditions_1.humidity)::integer)::double precision)), (PARTIAL regr_avgy(((conditions_1.temperature)::integer)::double precision, ((conditions_1.humidity)::integer)::double precision)), (PARTIAL regr_count(((conditions_1.temperature)::integer)::double precision, ((conditions_1.humidity)::integer)::double precision)), (PARTIAL regr_intercept(((conditions_1.temperature)::integer)::double precision, ((conditions_1.humidity)::integer)::double precision)), (PARTIAL regr_r2(((conditions_1.temperature)::integer)::double precision, ((conditions_1.humidity)::integer)::double precision)), (PARTIAL regr_slope(((conditions_1.temperature)::integer)::double precision, ((conditions_1.humidity)::integer)::double precision)), (PARTIAL regr_sxx(((conditions_1.temperature)::integer)::double precision, ((conditions_1.humidity)::integer)::double precision)), (PARTIAL regr_sxy(((conditions_1.temperature)::integer)::double precision, ((conditions_1.humidity)::integer)::double precision)), (PARTIAL regr_syy(((conditions_1.temperature)::integer)::double precision, ((conditions_1.humidity)::integer)::double precision)), (PARTIAL stddev((conditions_1.temperature)::integer)), (PARTIAL stddev_pop((conditions_1.temperature)::integer)), (PARTIAL stddev_samp((conditions_1.temperature)::integer)), (PARTIAL variance((conditions_1.temperature)::integer)), (PARTIAL var_pop((conditions_1.temperature)::integer)), (PARTIAL var_samp((conditions_1.temperature)::integer)), (PARTIAL last(conditions_1.temperature, conditions_1.timec)), (PARTIAL histogram(conditions_1.temperature, '0'::double precision, '100'::double precision, 1)) + Relations: Aggregate on (public.conditions) + Data node: db_dist_partial_agg_2 + Chunks: _dist_hyper_1_9_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_12_chunk + Remote SQL: SELECT region, temperature, timec, _timescaledb_functions.partialize_agg(min(allnull)), _timescaledb_functions.partialize_agg(max(temperature)), _timescaledb_functions.partialize_agg(sum(temperature)), _timescaledb_functions.partialize_agg(sum(humidity)), _timescaledb_functions.partialize_agg(avg(humidity)), _timescaledb_functions.partialize_agg(stddev(humidity::integer)), _timescaledb_functions.partialize_agg(bit_and(bit_int)), _timescaledb_functions.partialize_agg(bit_or(bit_int)), _timescaledb_functions.partialize_agg(bool_and(good_life)), _timescaledb_functions.partialize_agg(every((temperature > 0::double precision))), _timescaledb_functions.partialize_agg(bool_or(good_life)), _timescaledb_functions.partialize_agg(count(*)), _timescaledb_functions.partialize_agg(count(temperature)), _timescaledb_functions.partialize_agg(count(allnull)), _timescaledb_functions.partialize_agg(corr(temperature::integer, humidity::integer)), _timescaledb_functions.partialize_agg(covar_pop(temperature::integer, humidity::integer)), _timescaledb_functions.partialize_agg(covar_samp(temperature::integer, humidity::integer)), _timescaledb_functions.partialize_agg(regr_avgx(temperature::integer, humidity::integer)), _timescaledb_functions.partialize_agg(regr_avgy(temperature::integer, humidity::integer)), _timescaledb_functions.partialize_agg(regr_count(temperature::integer, humidity::integer)), _timescaledb_functions.partialize_agg(regr_intercept(temperature::integer, humidity::integer)), _timescaledb_functions.partialize_agg(regr_r2(temperature::integer, humidity::integer)), _timescaledb_functions.partialize_agg(regr_slope(temperature::integer, humidity::integer)), _timescaledb_functions.partialize_agg(regr_sxx(temperature::integer, humidity::integer)), _timescaledb_functions.partialize_agg(regr_sxy(temperature::integer, humidity::integer)), _timescaledb_functions.partialize_agg(regr_syy(temperature::integer, humidity::integer)), _timescaledb_functions.partialize_agg(stddev(temperature::integer)), _timescaledb_functions.partialize_agg(stddev_pop(temperature::integer)), _timescaledb_functions.partialize_agg(stddev_samp(temperature::integer)), _timescaledb_functions.partialize_agg(variance(temperature::integer)), _timescaledb_functions.partialize_agg(var_pop(temperature::integer)), _timescaledb_functions.partialize_agg(var_samp(temperature::integer)), _timescaledb_functions.partialize_agg(public.last(temperature, timec)), _timescaledb_functions.partialize_agg(public.histogram(temperature, 0::double precision, 100::double precision, 1)) FROM public.conditions WHERE _timescaledb_functions.chunks_in(public.conditions.*, ARRAY[1, 2, 3, 4]) GROUP BY 1, 2, 3 ORDER BY region ASC NULLS LAST, temperature ASC NULLS LAST, timec ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: conditions_2.region, conditions_2.temperature, conditions_2.timec, (PARTIAL min(conditions_2.allnull)), (PARTIAL max(conditions_2.temperature)), (PARTIAL sum(conditions_2.temperature)), (PARTIAL sum(conditions_2.humidity)), (PARTIAL avg(conditions_2.humidity)), (PARTIAL stddev((conditions_2.humidity)::integer)), (PARTIAL bit_and(conditions_2.bit_int)), (PARTIAL bit_or(conditions_2.bit_int)), (PARTIAL bool_and(conditions_2.good_life)), (PARTIAL every((conditions_2.temperature > '0'::double precision))), (PARTIAL bool_or(conditions_2.good_life)), (PARTIAL count(*)), (PARTIAL count(conditions_2.temperature)), (PARTIAL count(conditions_2.allnull)), (PARTIAL corr(((conditions_2.temperature)::integer)::double precision, ((conditions_2.humidity)::integer)::double precision)), (PARTIAL covar_pop(((conditions_2.temperature)::integer)::double precision, ((conditions_2.humidity)::integer)::double precision)), (PARTIAL covar_samp(((conditions_2.temperature)::integer)::double precision, ((conditions_2.humidity)::integer)::double precision)), (PARTIAL regr_avgx(((conditions_2.temperature)::integer)::double precision, ((conditions_2.humidity)::integer)::double precision)), (PARTIAL regr_avgy(((conditions_2.temperature)::integer)::double precision, ((conditions_2.humidity)::integer)::double precision)), (PARTIAL regr_count(((conditions_2.temperature)::integer)::double precision, ((conditions_2.humidity)::integer)::double precision)), (PARTIAL regr_intercept(((conditions_2.temperature)::integer)::double precision, ((conditions_2.humidity)::integer)::double precision)), (PARTIAL regr_r2(((conditions_2.temperature)::integer)::double precision, ((conditions_2.humidity)::integer)::double precision)), (PARTIAL regr_slope(((conditions_2.temperature)::integer)::double precision, ((conditions_2.humidity)::integer)::double precision)), (PARTIAL regr_sxx(((conditions_2.temperature)::integer)::double precision, ((conditions_2.humidity)::integer)::double precision)), (PARTIAL regr_sxy(((conditions_2.temperature)::integer)::double precision, ((conditions_2.humidity)::integer)::double precision)), (PARTIAL regr_syy(((conditions_2.temperature)::integer)::double precision, ((conditions_2.humidity)::integer)::double precision)), (PARTIAL stddev((conditions_2.temperature)::integer)), (PARTIAL stddev_pop((conditions_2.temperature)::integer)), (PARTIAL stddev_samp((conditions_2.temperature)::integer)), (PARTIAL variance((conditions_2.temperature)::integer)), (PARTIAL var_pop((conditions_2.temperature)::integer)), (PARTIAL var_samp((conditions_2.temperature)::integer)), (PARTIAL last(conditions_2.temperature, conditions_2.timec)), (PARTIAL histogram(conditions_2.temperature, '0'::double precision, '100'::double precision, 1)) + Relations: Aggregate on (public.conditions) + Data node: db_dist_partial_agg_3 + Chunks: _dist_hyper_1_5_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_8_chunk + Remote SQL: SELECT region, temperature, timec, _timescaledb_functions.partialize_agg(min(allnull)), _timescaledb_functions.partialize_agg(max(temperature)), _timescaledb_functions.partialize_agg(sum(temperature)), _timescaledb_functions.partialize_agg(sum(humidity)), _timescaledb_functions.partialize_agg(avg(humidity)), _timescaledb_functions.partialize_agg(stddev(humidity::integer)), _timescaledb_functions.partialize_agg(bit_and(bit_int)), _timescaledb_functions.partialize_agg(bit_or(bit_int)), _timescaledb_functions.partialize_agg(bool_and(good_life)), _timescaledb_functions.partialize_agg(every((temperature > 0::double precision))), _timescaledb_functions.partialize_agg(bool_or(good_life)), _timescaledb_functions.partialize_agg(count(*)), _timescaledb_functions.partialize_agg(count(temperature)), _timescaledb_functions.partialize_agg(count(allnull)), _timescaledb_functions.partialize_agg(corr(temperature::integer, humidity::integer)), _timescaledb_functions.partialize_agg(covar_pop(temperature::integer, humidity::integer)), _timescaledb_functions.partialize_agg(covar_samp(temperature::integer, humidity::integer)), _timescaledb_functions.partialize_agg(regr_avgx(temperature::integer, humidity::integer)), _timescaledb_functions.partialize_agg(regr_avgy(temperature::integer, humidity::integer)), _timescaledb_functions.partialize_agg(regr_count(temperature::integer, humidity::integer)), _timescaledb_functions.partialize_agg(regr_intercept(temperature::integer, humidity::integer)), _timescaledb_functions.partialize_agg(regr_r2(temperature::integer, humidity::integer)), _timescaledb_functions.partialize_agg(regr_slope(temperature::integer, humidity::integer)), _timescaledb_functions.partialize_agg(regr_sxx(temperature::integer, humidity::integer)), _timescaledb_functions.partialize_agg(regr_sxy(temperature::integer, humidity::integer)), _timescaledb_functions.partialize_agg(regr_syy(temperature::integer, humidity::integer)), _timescaledb_functions.partialize_agg(stddev(temperature::integer)), _timescaledb_functions.partialize_agg(stddev_pop(temperature::integer)), _timescaledb_functions.partialize_agg(stddev_samp(temperature::integer)), _timescaledb_functions.partialize_agg(variance(temperature::integer)), _timescaledb_functions.partialize_agg(var_pop(temperature::integer)), _timescaledb_functions.partialize_agg(var_samp(temperature::integer)), _timescaledb_functions.partialize_agg(public.last(temperature, timec)), _timescaledb_functions.partialize_agg(public.histogram(temperature, 0::double precision, 100::double precision, 1)) FROM public.conditions WHERE _timescaledb_functions.chunks_in(public.conditions.*, ARRAY[1, 2, 3, 4]) GROUP BY 1, 2, 3 ORDER BY region ASC NULLS LAST, temperature ASC NULLS LAST, timec ASC NULLS LAST +(25 rows) + +-- Aggregates on custom types are not yet pushed down +:PREFIX SELECT :GROUPING, + last(highlow, timec) as last_hl, + first(highlow, timec) as first_hl + FROM :TEST_TABLE + GROUP BY :GROUPING, timec + ORDER BY :GROUPING, timec; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Finalize GroupAggregate + Output: conditions.region, conditions.temperature, last(conditions.highlow, conditions.timec), first(conditions.highlow, conditions.timec), conditions.timec + Group Key: conditions.region, conditions.temperature, conditions.timec + -> Merge Append + Sort Key: conditions.region, conditions.temperature, conditions.timec + -> Partial GroupAggregate + Output: conditions.region, conditions.temperature, conditions.timec, PARTIAL last(conditions.highlow, conditions.timec), PARTIAL first(conditions.highlow, conditions.timec) + Group Key: conditions.region, conditions.temperature, conditions.timec + -> Result + Output: conditions.region, conditions.temperature, conditions.timec, conditions.highlow + -> Custom Scan (DataNodeScan) on public.conditions + Output: conditions.region, conditions.temperature, conditions.highlow, conditions.timec + Data node: db_dist_partial_agg_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk, _dist_hyper_1_4_chunk + Remote SQL: SELECT timec, region, temperature, highlow FROM public.conditions WHERE _timescaledb_functions.chunks_in(public.conditions.*, ARRAY[1, 2, 3, 4]) ORDER BY region ASC NULLS LAST, temperature ASC NULLS LAST, timec ASC NULLS LAST + -> Partial GroupAggregate + Output: conditions_1.region, conditions_1.temperature, conditions_1.timec, PARTIAL last(conditions_1.highlow, conditions_1.timec), PARTIAL first(conditions_1.highlow, conditions_1.timec) + Group Key: conditions_1.region, conditions_1.temperature, conditions_1.timec + -> Result + Output: conditions_1.region, conditions_1.temperature, conditions_1.timec, conditions_1.highlow + -> Custom Scan (DataNodeScan) on public.conditions conditions_1 + Output: conditions_1.region, conditions_1.temperature, conditions_1.highlow, conditions_1.timec + Data node: db_dist_partial_agg_2 + Chunks: _dist_hyper_1_9_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_12_chunk + Remote SQL: SELECT timec, region, temperature, highlow FROM public.conditions WHERE _timescaledb_functions.chunks_in(public.conditions.*, ARRAY[1, 2, 3, 4]) ORDER BY region ASC NULLS LAST, temperature ASC NULLS LAST, timec ASC NULLS LAST + -> Partial GroupAggregate + Output: conditions_2.region, conditions_2.temperature, conditions_2.timec, PARTIAL last(conditions_2.highlow, conditions_2.timec), PARTIAL first(conditions_2.highlow, conditions_2.timec) + Group Key: conditions_2.region, conditions_2.temperature, conditions_2.timec + -> Result + Output: conditions_2.region, conditions_2.temperature, conditions_2.timec, conditions_2.highlow + -> Custom Scan (DataNodeScan) on public.conditions conditions_2 + Output: conditions_2.region, conditions_2.temperature, conditions_2.highlow, conditions_2.timec + Data node: db_dist_partial_agg_3 + Chunks: _dist_hyper_1_5_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_8_chunk + Remote SQL: SELECT timec, region, temperature, highlow FROM public.conditions WHERE _timescaledb_functions.chunks_in(public.conditions.*, ARRAY[1, 2, 3, 4]) ORDER BY region ASC NULLS LAST, temperature ASC NULLS LAST, timec ASC NULLS LAST +(35 rows) + +-- Mix of aggregates that push down and those that don't +:PREFIX SELECT :GROUPING, + min(allnull) as min_allnull, + max(temperature) as max_temp, + sum(temperature)+sum(humidity) as agg_sum_expr, + avg(humidity), + ROUND(stddev(CAST(humidity AS INT)), 5), + bit_and(bit_int), + bit_or(bit_int), + bool_and(good_life), + every(temperature > 0), + bool_or(good_life), + first(highlow, timec) as first_hl + FROM :TEST_TABLE + GROUP BY :GROUPING, timec + ORDER BY :GROUPING, timec; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Finalize GroupAggregate + Output: conditions.region, conditions.temperature, min(conditions.allnull), max(conditions.temperature), (sum(conditions.temperature) + sum(conditions.humidity)), avg(conditions.humidity), round(stddev((conditions.humidity)::integer), 5), bit_and(conditions.bit_int), bit_or(conditions.bit_int), bool_and(conditions.good_life), every((conditions.temperature > '0'::double precision)), bool_or(conditions.good_life), first(conditions.highlow, conditions.timec), conditions.timec + Group Key: conditions.region, conditions.temperature, conditions.timec + -> Merge Append + Sort Key: conditions.region, conditions.temperature, conditions.timec + -> Partial GroupAggregate + Output: conditions.region, conditions.temperature, conditions.timec, PARTIAL min(conditions.allnull), PARTIAL max(conditions.temperature), PARTIAL sum(conditions.temperature), PARTIAL sum(conditions.humidity), PARTIAL avg(conditions.humidity), PARTIAL stddev((conditions.humidity)::integer), PARTIAL bit_and(conditions.bit_int), PARTIAL bit_or(conditions.bit_int), PARTIAL bool_and(conditions.good_life), PARTIAL every((conditions.temperature > '0'::double precision)), PARTIAL bool_or(conditions.good_life), PARTIAL first(conditions.highlow, conditions.timec) + Group Key: conditions.region, conditions.temperature, conditions.timec + -> Result + Output: conditions.region, conditions.temperature, conditions.timec, conditions.allnull, conditions.humidity, conditions.bit_int, conditions.good_life, conditions.highlow + -> Custom Scan (DataNodeScan) on public.conditions + Output: conditions.region, conditions.temperature, conditions.allnull, conditions.humidity, conditions.bit_int, conditions.good_life, conditions.highlow, conditions.timec + Data node: db_dist_partial_agg_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk, _dist_hyper_1_4_chunk + Remote SQL: SELECT timec, region, temperature, humidity, allnull, highlow, bit_int, good_life FROM public.conditions WHERE _timescaledb_functions.chunks_in(public.conditions.*, ARRAY[1, 2, 3, 4]) ORDER BY region ASC NULLS LAST, temperature ASC NULLS LAST, timec ASC NULLS LAST + -> Partial GroupAggregate + Output: conditions_1.region, conditions_1.temperature, conditions_1.timec, PARTIAL min(conditions_1.allnull), PARTIAL max(conditions_1.temperature), PARTIAL sum(conditions_1.temperature), PARTIAL sum(conditions_1.humidity), PARTIAL avg(conditions_1.humidity), PARTIAL stddev((conditions_1.humidity)::integer), PARTIAL bit_and(conditions_1.bit_int), PARTIAL bit_or(conditions_1.bit_int), PARTIAL bool_and(conditions_1.good_life), PARTIAL every((conditions_1.temperature > '0'::double precision)), PARTIAL bool_or(conditions_1.good_life), PARTIAL first(conditions_1.highlow, conditions_1.timec) + Group Key: conditions_1.region, conditions_1.temperature, conditions_1.timec + -> Result + Output: conditions_1.region, conditions_1.temperature, conditions_1.timec, conditions_1.allnull, conditions_1.humidity, conditions_1.bit_int, conditions_1.good_life, conditions_1.highlow + -> Custom Scan (DataNodeScan) on public.conditions conditions_1 + Output: conditions_1.region, conditions_1.temperature, conditions_1.allnull, conditions_1.humidity, conditions_1.bit_int, conditions_1.good_life, conditions_1.highlow, conditions_1.timec + Data node: db_dist_partial_agg_2 + Chunks: _dist_hyper_1_9_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_12_chunk + Remote SQL: SELECT timec, region, temperature, humidity, allnull, highlow, bit_int, good_life FROM public.conditions WHERE _timescaledb_functions.chunks_in(public.conditions.*, ARRAY[1, 2, 3, 4]) ORDER BY region ASC NULLS LAST, temperature ASC NULLS LAST, timec ASC NULLS LAST + -> Partial GroupAggregate + Output: conditions_2.region, conditions_2.temperature, conditions_2.timec, PARTIAL min(conditions_2.allnull), PARTIAL max(conditions_2.temperature), PARTIAL sum(conditions_2.temperature), PARTIAL sum(conditions_2.humidity), PARTIAL avg(conditions_2.humidity), PARTIAL stddev((conditions_2.humidity)::integer), PARTIAL bit_and(conditions_2.bit_int), PARTIAL bit_or(conditions_2.bit_int), PARTIAL bool_and(conditions_2.good_life), PARTIAL every((conditions_2.temperature > '0'::double precision)), PARTIAL bool_or(conditions_2.good_life), PARTIAL first(conditions_2.highlow, conditions_2.timec) + Group Key: conditions_2.region, conditions_2.temperature, conditions_2.timec + -> Result + Output: conditions_2.region, conditions_2.temperature, conditions_2.timec, conditions_2.allnull, conditions_2.humidity, conditions_2.bit_int, conditions_2.good_life, conditions_2.highlow + -> Custom Scan (DataNodeScan) on public.conditions conditions_2 + Output: conditions_2.region, conditions_2.temperature, conditions_2.allnull, conditions_2.humidity, conditions_2.bit_int, conditions_2.good_life, conditions_2.highlow, conditions_2.timec + Data node: db_dist_partial_agg_3 + Chunks: _dist_hyper_1_5_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_8_chunk + Remote SQL: SELECT timec, region, temperature, humidity, allnull, highlow, bit_int, good_life FROM public.conditions WHERE _timescaledb_functions.chunks_in(public.conditions.*, ARRAY[1, 2, 3, 4]) ORDER BY region ASC NULLS LAST, temperature ASC NULLS LAST, timec ASC NULLS LAST +(35 rows) + +-- Aggregates nested in expressions and no top-level aggregate #3672 +:PREFIX SELECT :GROUPING, + sum(temperature)+sum(humidity) as agg_sum_expr + FROM :TEST_TABLE + GROUP BY :GROUPING, timec + ORDER BY :GROUPING, timec; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Finalize GroupAggregate + Output: region, temperature, (sum(temperature) + sum(humidity)), timec + Group Key: region, temperature, timec + -> Custom Scan (AsyncAppend) + Output: region, temperature, timec, (PARTIAL sum(temperature)), (PARTIAL sum(humidity)) + -> Merge Append + Sort Key: conditions.region, conditions.temperature, conditions.timec + -> Custom Scan (DataNodeScan) + Output: conditions.region, conditions.temperature, conditions.timec, (PARTIAL sum(conditions.temperature)), (PARTIAL sum(conditions.humidity)) + Relations: Aggregate on (public.conditions) + Data node: db_dist_partial_agg_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk, _dist_hyper_1_4_chunk + Remote SQL: SELECT region, temperature, timec, _timescaledb_functions.partialize_agg(sum(temperature)), _timescaledb_functions.partialize_agg(sum(humidity)) FROM public.conditions WHERE _timescaledb_functions.chunks_in(public.conditions.*, ARRAY[1, 2, 3, 4]) GROUP BY 1, 2, 3 ORDER BY region ASC NULLS LAST, temperature ASC NULLS LAST, timec ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: conditions_1.region, conditions_1.temperature, conditions_1.timec, (PARTIAL sum(conditions_1.temperature)), (PARTIAL sum(conditions_1.humidity)) + Relations: Aggregate on (public.conditions) + Data node: db_dist_partial_agg_2 + Chunks: _dist_hyper_1_9_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_12_chunk + Remote SQL: SELECT region, temperature, timec, _timescaledb_functions.partialize_agg(sum(temperature)), _timescaledb_functions.partialize_agg(sum(humidity)) FROM public.conditions WHERE _timescaledb_functions.chunks_in(public.conditions.*, ARRAY[1, 2, 3, 4]) GROUP BY 1, 2, 3 ORDER BY region ASC NULLS LAST, temperature ASC NULLS LAST, timec ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: conditions_2.region, conditions_2.temperature, conditions_2.timec, (PARTIAL sum(conditions_2.temperature)), (PARTIAL sum(conditions_2.humidity)) + Relations: Aggregate on (public.conditions) + Data node: db_dist_partial_agg_3 + Chunks: _dist_hyper_1_5_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_8_chunk + Remote SQL: SELECT region, temperature, timec, _timescaledb_functions.partialize_agg(sum(temperature)), _timescaledb_functions.partialize_agg(sum(humidity)) FROM public.conditions WHERE _timescaledb_functions.chunks_in(public.conditions.*, ARRAY[1, 2, 3, 4]) GROUP BY 1, 2, 3 ORDER BY region ASC NULLS LAST, temperature ASC NULLS LAST, timec ASC NULLS LAST +(25 rows) + +-- Aggregates with no aggregate reference in targetlist #3664 +:PREFIX SELECT :GROUPING + FROM :TEST_TABLE + GROUP BY :GROUPING, timec + HAVING avg(temperature) > 20 + ORDER BY :GROUPING, timec; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Finalize GroupAggregate + Output: region, temperature, timec + Group Key: region, temperature, timec + Filter: (avg(temperature) > '20'::double precision) + -> Custom Scan (AsyncAppend) + Output: region, temperature, timec, (PARTIAL avg(temperature)) + -> Merge Append + Sort Key: conditions.region, conditions.temperature, conditions.timec + -> Custom Scan (DataNodeScan) + Output: conditions.region, conditions.temperature, conditions.timec, (PARTIAL avg(conditions.temperature)) + Relations: Aggregate on (public.conditions) + Data node: db_dist_partial_agg_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk, _dist_hyper_1_4_chunk + Remote SQL: SELECT region, temperature, timec, _timescaledb_functions.partialize_agg(avg(temperature)) FROM public.conditions WHERE _timescaledb_functions.chunks_in(public.conditions.*, ARRAY[1, 2, 3, 4]) GROUP BY 1, 2, 3 ORDER BY region ASC NULLS LAST, temperature ASC NULLS LAST, timec ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: conditions_1.region, conditions_1.temperature, conditions_1.timec, (PARTIAL avg(conditions_1.temperature)) + Relations: Aggregate on (public.conditions) + Data node: db_dist_partial_agg_2 + Chunks: _dist_hyper_1_9_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_12_chunk + Remote SQL: SELECT region, temperature, timec, _timescaledb_functions.partialize_agg(avg(temperature)) FROM public.conditions WHERE _timescaledb_functions.chunks_in(public.conditions.*, ARRAY[1, 2, 3, 4]) GROUP BY 1, 2, 3 ORDER BY region ASC NULLS LAST, temperature ASC NULLS LAST, timec ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: conditions_2.region, conditions_2.temperature, conditions_2.timec, (PARTIAL avg(conditions_2.temperature)) + Relations: Aggregate on (public.conditions) + Data node: db_dist_partial_agg_3 + Chunks: _dist_hyper_1_5_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_8_chunk + Remote SQL: SELECT region, temperature, timec, _timescaledb_functions.partialize_agg(avg(temperature)) FROM public.conditions WHERE _timescaledb_functions.chunks_in(public.conditions.*, ARRAY[1, 2, 3, 4]) GROUP BY 1, 2, 3 ORDER BY region ASC NULLS LAST, temperature ASC NULLS LAST, timec ASC NULLS LAST +(26 rows) + +-- Full aggregate pushdown correctness check, compare location grouped query results with partionwise aggregates on and off +\set GROUPING 'location' +SELECT format('%s/results/dist_agg_loc_results_test.out', :'TEST_OUTPUT_DIR') as "RESULTS_TEST1", + format('%s/results/dist_agg_loc_results_control.out', :'TEST_OUTPUT_DIR') as "RESULTS_CONTROL1" +\gset +SELECT format('\! diff %s %s', :'RESULTS_CONTROL1', :'RESULTS_TEST1') as "DIFF_CMD1" +\gset +--generate the results into two different files +\set ECHO errors +:DIFF_CMD1 +-- Partial aggregate pushdown correctness check, compare region grouped query results with partionwise aggregates on and off +\set GROUPING 'region' +SELECT format('%s/results/dist_agg_region_results_test.out', :'TEST_OUTPUT_DIR') as "RESULTS_TEST2", + format('%s/results/dist_agg_region_results_control.out', :'TEST_OUTPUT_DIR') as "RESULTS_CONTROL2" +\gset +SELECT format('\! diff %s %s', :'RESULTS_CONTROL2', :'RESULTS_TEST2') as "DIFF_CMD2" +\gset +--generate the results into two different files +\set ECHO errors +:DIFF_CMD2 +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER +DROP DATABASE :DATA_NODE_1 WITH (FORCE); +DROP DATABASE :DATA_NODE_2 WITH (FORCE); +DROP DATABASE :DATA_NODE_3 WITH (FORCE); diff --git a/tsl/test/expected/dist_query-16.out b/tsl/test/expected/dist_query-16.out new file mode 100644 index 00000000000..64b7f53c050 --- /dev/null +++ b/tsl/test/expected/dist_query-16.out @@ -0,0 +1,6047 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; +\set TEST_BASE_NAME dist_query +-- Run +SELECT format('include/%s_load.sql', :'TEST_BASE_NAME') AS "TEST_LOAD_NAME", + format('include/%s_run.sql', :'TEST_BASE_NAME') AS "TEST_QUERY_NAME", + format('%s/results/%s_results_reference.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') AS "TEST_RESULTS_REFERENCE", + format('%s/results/%s_results_repartitioning_reference.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') AS "TEST_RESULTS_REPART_REFERENCE", + format('%s/results/%s_results_optimized.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') AS "TEST_RESULTS_OPTIMIZED", + format('%s/results/%s_results_repartitioning_optimized.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') AS "TEST_RESULTS_REPART_OPTIMIZED", + format('%s/results/%s_results_unoptimized.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') AS "TEST_RESULTS_UNOPTIMIZED", + format('%s/results/%s_results_1dim.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') AS "TEST_RESULTS_1DIM" +\gset +SELECT format('\! diff %s %s', :'TEST_RESULTS_UNOPTIMIZED', :'TEST_RESULTS_REFERENCE') AS "DIFF_CMD_UNOPT", + format('\! diff %s %s', :'TEST_RESULTS_OPTIMIZED', :'TEST_RESULTS_REFERENCE') AS "DIFF_CMD_OPT", + format('\! diff %s %s', :'TEST_RESULTS_REPART_OPTIMIZED', :'TEST_RESULTS_REPART_REFERENCE') AS "DIFF_CMD_REPART", + format('\! diff %s %s', :'TEST_RESULTS_1DIM', :'TEST_RESULTS_REPART_REFERENCE') AS "DIFF_CMD_1DIM" +\gset +-- Use a small fetch size to make sure that result are fetched across +-- multiple fetches. +--ALTER FOREIGN DATA WRAPPER timescaledb_fdw OPTIONS (ADD fetch_size '500'); +SET client_min_messages TO notice; +-- Load the data +\ir :TEST_LOAD_NAME +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\ir debugsupport.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +CREATE OR REPLACE FUNCTION test.tsl_override_current_timestamptz(new_value TIMESTAMPTZ) +RETURNS VOID AS :TSL_MODULE_PATHNAME, 'ts_test_override_current_timestamptz' LANGUAGE C VOLATILE; +\set DATA_NODE_1 :TEST_DBNAME _1 +\set DATA_NODE_2 :TEST_DBNAME _2 +\set DATA_NODE_3 :TEST_DBNAME _3 +-- Add data nodes +SET ROLE :ROLE_CLUSTER_SUPERUSER; +SELECT node_name, database, node_created, database_created, extension_created +FROM ( + SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* + FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) +) a; + node_name | database | node_created | database_created | extension_created +-----------------+-----------------+--------------+------------------+------------------- + db_dist_query_1 | db_dist_query_1 | t | t | t + db_dist_query_2 | db_dist_query_2 | t | t | t + db_dist_query_3 | db_dist_query_3 | t | t | t +(3 rows) + +GRANT USAGE ON FOREIGN SERVER :DATA_NODE_1, :DATA_NODE_2, :DATA_NODE_3 TO :ROLE_1; +GRANT CREATE ON SCHEMA public TO :ROLE_1; +SET ROLE :ROLE_1; +-- Create a "normal" PG table as reference, one two-dimensional +-- distributed hypertable, and a one-dimensional distributed +-- hypertable +CREATE TABLE reference (time timestamptz NOT NULL, device int, location int, temp float); +CREATE TABLE hyper (LIKE reference); +CREATE TABLE hyper1d (LIKE reference); +SELECT create_distributed_hypertable('hyper', 'time', 'device', 3, + chunk_time_interval => interval '18 hours'); + create_distributed_hypertable +------------------------------- + (1,public,hyper,t) +(1 row) + +SELECT create_distributed_hypertable('hyper1d', 'time', chunk_time_interval => interval '36 hours'); + create_distributed_hypertable +------------------------------- + (2,public,hyper1d,t) +(1 row) + +SELECT setseed(1); + setseed +--------- + +(1 row) + +INSERT INTO reference +SELECT t, (abs(timestamp_hash(t::timestamp)) % 10) + 1, (random() * 20)::int, random() * 80 +FROM generate_series('2019-01-01'::timestamptz, '2019-01-04'::timestamptz, '1 minute') as t; +-- Insert the same data into the hypertable but repartition the data +-- set so that we can test the "safeness" of some push-downs across +-- the repartitioning boundary. +INSERT INTO hyper +SELECT * FROM reference +WHERE time < '2019-01-02 05:10'::timestamptz +ORDER BY time; +SELECT * FROM set_number_partitions('hyper', 2); +psql:include/dist_query_load.sql:47: WARNING: insufficient number of partitions for dimension "device" + set_number_partitions +----------------------- + +(1 row) + +INSERT INTO hyper +SELECT * FROM reference +WHERE time >= '2019-01-02 05:10'::timestamptz +AND time < '2019-01-03 01:22'::timestamptz +ORDER BY time; +SELECT * FROM set_number_partitions('hyper', 5); + set_number_partitions +----------------------- + +(1 row) + +INSERT INTO hyper +SELECT * FROM reference +WHERE time >= '2019-01-03 01:22'::timestamptz +ORDER BY time; +INSERT INTO hyper1d +SELECT * FROM reference ORDER BY time; +SELECT d.hypertable_id, d.id, ds.range_start, ds.range_end +FROM _timescaledb_catalog.dimension d, _timescaledb_catalog.dimension_slice ds +WHERE num_slices IS NOT NULL +AND d.id = ds.dimension_id +ORDER BY 1, 2, 3, 4; + hypertable_id | id | range_start | range_end +---------------+----+----------------------+--------------------- + 1 | 2 | -9223372036854775808 | 429496729 + 1 | 2 | -9223372036854775808 | 715827882 + 1 | 2 | -9223372036854775808 | 1073741823 + 1 | 2 | 429496729 | 858993458 + 1 | 2 | 715827882 | 1431655764 + 1 | 2 | 858993458 | 1288490187 + 1 | 2 | 1073741823 | 9223372036854775807 + 1 | 2 | 1288490187 | 1717986916 + 1 | 2 | 1431655764 | 9223372036854775807 + 1 | 2 | 1717986916 | 9223372036854775807 +(10 rows) + +-- Set the max time we can query without hitting the repartitioned +-- chunks. Note that this is before the given repartitioning time +-- above because chunk boundaries do not align exactly with the given +-- timestamp +\set REPARTITIONED_TIME_RANGE 'time >= ''2019-01-01''' +\set CLEAN_PARTITIONING_TIME_RANGE 'time BETWEEN ''2019-01-01'' AND ''2019-01-01 15:00''' +-- Custom agg func for push down tests +CREATE AGGREGATE custom_sum(int4) ( + SFUNC = int4_sum, + STYPE = int8 +); +-- Set seed on all data nodes for ANALYZE to sample consistently +CALL distributed_exec($$ SELECT setseed(1); $$); +ANALYZE reference; +ANALYZE hyper; +ANALYZE hyper1d; +SELECT hypertable_schema, hypertable_name, num_dimensions, num_chunks +FROM timescaledb_information.hypertables +ORDER BY 1,2; + hypertable_schema | hypertable_name | num_dimensions | num_chunks +-------------------+-----------------+----------------+------------ + public | hyper | 2 | 18 + public | hyper1d | 1 | 3 +(2 rows) + +SELECT count(*) FROM hyper; + count +------- + 4321 +(1 row) + +SELECT count(*) FROM hyper WHERE :CLEAN_PARTITIONING_TIME_RANGE; + count +------- + 901 +(1 row) + +SET enable_partitionwise_aggregate = ON; +\set ECHO errors + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%% RUNNING TESTS on table: hyper +%%% PREFIX: EXPLAIN (verbose, costs off) +%%% WHERE_CLAUSE: :CLEAN_PARTITIONING_TIME_RANGE +%%% ORDER_BY_1: +%%% ORDER_BY_1_2: +%%% LIMIT: +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + setseed +--------- + +(1 row) + + +######### Grouping on time only (partial aggregation) + +EXPLAIN (verbose, costs off) +SELECT time, avg(temp) +FROM hyper +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' +GROUP BY 1 + + + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Finalize GroupAggregate + Output: "time", avg(temp) + Group Key: "time" + -> Sort + Output: "time", (PARTIAL avg(temp)) + Sort Key: "time" + -> Custom Scan (AsyncAppend) + Output: "time", (PARTIAL avg(temp)) + -> Append + -> Custom Scan (DataNodeScan) + Output: hyper."time", (PARTIAL avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT "time", _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1 + -> Custom Scan (DataNodeScan) + Output: hyper_1."time", (PARTIAL avg(hyper_1.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT "time", _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1 + -> Custom Scan (DataNodeScan) + Output: hyper_2."time", (PARTIAL avg(hyper_2.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT "time", _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1 +(27 rows) + + +######### Grouping on time only (partial aggregation) + +EXPLAIN (verbose, costs off) +SELECT time_bucket('2 days', time) AS time, avg(temp) +FROM hyper +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' +GROUP BY 1 + + + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Finalize GroupAggregate + Output: (time_bucket('@ 2 days'::interval, "time")), avg(temp) + Group Key: (time_bucket('@ 2 days'::interval, "time")) + -> Sort + Output: (time_bucket('@ 2 days'::interval, "time")), (PARTIAL avg(temp)) + Sort Key: (time_bucket('@ 2 days'::interval, "time")) + -> Custom Scan (AsyncAppend) + Output: (time_bucket('@ 2 days'::interval, "time")), (PARTIAL avg(temp)) + -> Append + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper."time")), (PARTIAL avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1 + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper_1."time")), (PARTIAL avg(hyper_1.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1 + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper_2."time")), (PARTIAL avg(hyper_2.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1 +(27 rows) + + +######### Grouping on time and device (full aggregation) + +EXPLAIN (verbose, costs off) +SELECT time, device, avg(temp) +FROM hyper +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' +GROUP BY 1,2 + + + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) + Output: "time", device, (avg(temp)) + -> Append + -> Custom Scan (DataNodeScan) + Output: hyper."time", hyper.device, (avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 + -> Custom Scan (DataNodeScan) + Output: hyper_1."time", hyper_1.device, (avg(hyper_1.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 + -> Custom Scan (DataNodeScan) + Output: hyper_2."time", hyper_2.device, (avg(hyper_2.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 +(21 rows) + + +######### Grouping on time and device (full aggregation) + +EXPLAIN (verbose, costs off) +SELECT time_bucket('2 days', time) AS time, device, avg(temp) +FROM hyper +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' +GROUP BY 1,2 + + + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) + Output: (time_bucket('@ 2 days'::interval, "time")), device, (avg(temp)) + -> Append + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper."time")), hyper.device, (avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper_1."time")), hyper_1.device, (avg(hyper_1.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper_2."time")), hyper_2.device, (avg(hyper_2.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 +(21 rows) + + +######### Grouping on time and device (full aggregation) + +EXPLAIN (verbose, costs off) +SELECT date_trunc('month', time) AS time, device, avg(temp) +FROM hyper +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' +GROUP BY 1,2 + + + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) + Output: (date_trunc('month'::text, "time")), device, (avg(temp)) + -> Append + -> Custom Scan (DataNodeScan) + Output: (date_trunc('month'::text, hyper."time")), hyper.device, (avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT date_trunc('month'::text, "time"), device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 + -> Custom Scan (DataNodeScan) + Output: (date_trunc('month'::text, hyper_1."time")), hyper_1.device, (avg(hyper_1.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT date_trunc('month'::text, "time"), device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 + -> Custom Scan (DataNodeScan) + Output: (date_trunc('month'::text, hyper_2."time")), hyper_2.device, (avg(hyper_2.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT date_trunc('month'::text, "time"), device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 +(21 rows) + + +######### Grouping on time and device (full aggregation) + +EXPLAIN (verbose, costs off) +SELECT time_bucket('2 days', time) AS time, device, avg(temp) +FROM hyper +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' +GROUP BY 1,2 +HAVING device > 4 + + + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) + Output: (time_bucket('@ 2 days'::interval, "time")), device, (avg(temp)) + -> Append + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper."time")), hyper.device, (avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) AND ((device > 4)) GROUP BY 1, 2 + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper_1."time")), hyper_1.device, (avg(hyper_1.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) AND ((device > 4)) GROUP BY 1, 2 + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper_2."time")), hyper_2.device, (avg(hyper_2.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) AND ((device > 4)) GROUP BY 1, 2 +(21 rows) + +EXPLAIN (verbose, costs off) +SELECT time_bucket('2 days', time) AS time, device, avg(temp) +FROM hyper +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' +GROUP BY 1,2 +HAVING avg(temp) > 40 AND max(temp) < 70 + + + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) + Output: (time_bucket('@ 2 days'::interval, "time")), device, (avg(temp)) + -> Append + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper."time")), hyper.device, (avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 HAVING ((avg(temp) > 40::double precision)) AND ((max(temp) < 70::double precision)) + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper_1."time")), hyper_1.device, (avg(hyper_1.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 HAVING ((avg(temp) > 40::double precision)) AND ((max(temp) < 70::double precision)) + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper_2."time")), hyper_2.device, (avg(hyper_2.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 HAVING ((avg(temp) > 40::double precision)) AND ((max(temp) < 70::double precision)) +(21 rows) + + +######### Grouping on device only (full aggregation) + +EXPLAIN (verbose, costs off) +SELECT device, avg(temp) +FROM hyper +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' +GROUP BY 1 + + + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) + Output: device, (avg(temp)) + -> Append + -> Custom Scan (DataNodeScan) + Output: hyper.device, (avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1 + -> Custom Scan (DataNodeScan) + Output: hyper_1.device, (avg(hyper_1.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1 + -> Custom Scan (DataNodeScan) + Output: hyper_2.device, (avg(hyper_2.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1 +(21 rows) + + +######### No push down on some functions + +EXPLAIN (verbose, costs off) +SELECT location, avg(temp) +FROM hyper +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' AND (temp * random() >= 0) +GROUP BY 1 + + + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: hyper.location, avg(hyper.temp) + Group Key: hyper.location + -> Custom Scan (AsyncAppend) + Output: hyper.location, hyper.temp + -> Merge Append + Sort Key: hyper_1.location + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1.location, hyper_1.temp + Filter: ((hyper_1.temp * random()) >= '0'::double precision) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT location, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY location ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2.location, hyper_2.temp + Filter: ((hyper_2.temp * random()) >= '0'::double precision) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT location, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY location ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3.location, hyper_3.temp + Filter: ((hyper_3.temp * random()) >= '0'::double precision) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT location, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY location ASC NULLS LAST +(25 rows) + + +######### No push down on some functions + +EXPLAIN (verbose, costs off) +SELECT time_bucket('2 days', time) AS time, device, avg(temp), sum(temp * (random() <= 1)::int) as sum +FROM hyper +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' +GROUP BY 1,2 + + + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Append + -> GroupAggregate + Output: (time_bucket('@ 2 days'::interval, hyper."time")), hyper.device, avg(hyper.temp), sum((hyper.temp * (((random() <= '1'::double precision))::integer)::double precision)) + Group Key: time_bucket('@ 2 days'::interval, hyper."time"), hyper.device + -> Result + Output: time_bucket('@ 2 days'::interval, hyper."time"), hyper.device, hyper.temp + -> Custom Scan (DataNodeScan) on public.hyper + Output: hyper."time", hyper.device, hyper.temp + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> GroupAggregate + Output: (time_bucket('@ 2 days'::interval, hyper_1."time")), hyper_1.device, avg(hyper_1.temp), sum((hyper_1.temp * (((random() <= '1'::double precision))::integer)::double precision)) + Group Key: time_bucket('@ 2 days'::interval, hyper_1."time"), hyper_1.device + -> Result + Output: time_bucket('@ 2 days'::interval, hyper_1."time"), hyper_1.device, hyper_1.temp + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1."time", hyper_1.device, hyper_1.temp + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> GroupAggregate + Output: (time_bucket('@ 2 days'::interval, hyper_2."time")), hyper_2.device, avg(hyper_2.temp), sum((hyper_2.temp * (((random() <= '1'::double precision))::integer)::double precision)) + Group Key: time_bucket('@ 2 days'::interval, hyper_2."time"), hyper_2.device + -> Result + Output: time_bucket('@ 2 days'::interval, hyper_2."time"), hyper_2.device, hyper_2.temp + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2."time", hyper_2.device, hyper_2.temp + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST +(31 rows) + + +######### No push down on some functions + +EXPLAIN (verbose, costs off) +SELECT time, device, avg(temp) +FROM hyper +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' +GROUP BY 1,2 +HAVING avg(temp) * custom_sum(device) > 0.8 + + + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Custom Scan (AsyncAppend) + Output: "time", device, (avg(temp)) + -> Append + -> GroupAggregate + Output: hyper."time", hyper.device, avg(hyper.temp) + Group Key: hyper."time", hyper.device + Filter: ((avg(hyper.temp) * (custom_sum(hyper.device))::double precision) > '0.8'::double precision) + -> Custom Scan (DataNodeScan) on public.hyper + Output: hyper."time", hyper.device, hyper.temp + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> GroupAggregate + Output: hyper_1."time", hyper_1.device, avg(hyper_1.temp) + Group Key: hyper_1."time", hyper_1.device + Filter: ((avg(hyper_1.temp) * (custom_sum(hyper_1.device))::double precision) > '0.8'::double precision) + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1."time", hyper_1.device, hyper_1.temp + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> GroupAggregate + Output: hyper_2."time", hyper_2.device, avg(hyper_2.temp) + Group Key: hyper_2."time", hyper_2.device + Filter: ((avg(hyper_2.temp) * (custom_sum(hyper_2.device))::double precision) > '0.8'::double precision) + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2."time", hyper_2.device, hyper_2.temp + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST +(30 rows) + + +######### No push down on some functions + +EXPLAIN (verbose, costs off) +SELECT time, device, avg(temp), custom_sum(device) +FROM hyper +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' +GROUP BY 1,2 + + + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Custom Scan (AsyncAppend) + Output: "time", device, (avg(temp)), (custom_sum(device)) + -> Append + -> GroupAggregate + Output: hyper."time", hyper.device, avg(hyper.temp), custom_sum(hyper.device) + Group Key: hyper."time", hyper.device + -> Custom Scan (DataNodeScan) on public.hyper + Output: hyper."time", hyper.device, hyper.temp + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> GroupAggregate + Output: hyper_1."time", hyper_1.device, avg(hyper_1.temp), custom_sum(hyper_1.device) + Group Key: hyper_1."time", hyper_1.device + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1."time", hyper_1.device, hyper_1.temp + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> GroupAggregate + Output: hyper_2."time", hyper_2.device, avg(hyper_2.temp), custom_sum(hyper_2.device) + Group Key: hyper_2."time", hyper_2.device + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2."time", hyper_2.device, hyper_2.temp + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST +(27 rows) + + +######### Constification and runtime push down of time-related functions + + tsl_override_current_timestamptz +---------------------------------- + +(1 row) + + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) + Output: "time", device, (avg(temp)) + -> Append + -> Custom Scan (DataNodeScan) + Output: hyper."time", hyper.device, (avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 + -> Custom Scan (DataNodeScan) + Output: hyper_1."time", hyper_1.device, (avg(hyper_1.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 + -> Custom Scan (DataNodeScan) + Output: hyper_2."time", hyper_2.device, (avg(hyper_2.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 +(21 rows) + + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) + Output: "time", device, (avg(temp)) + -> Append + -> Custom Scan (DataNodeScan) + Output: hyper."time", hyper.device, (avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 + -> Custom Scan (DataNodeScan) + Output: hyper_1."time", hyper_1.device, (avg(hyper_1.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 + -> Custom Scan (DataNodeScan) + Output: hyper_2."time", hyper_2.device, (avg(hyper_2.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 +(21 rows) + + tsl_override_current_timestamptz +---------------------------------- + +(1 row) + + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) + Output: "time", device, (avg(temp)) + -> Append + -> Custom Scan (DataNodeScan) + Output: hyper."time", hyper.device, (avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 + -> Custom Scan (DataNodeScan) + Output: hyper_1."time", hyper_1.device, (avg(hyper_1.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 + -> Custom Scan (DataNodeScan) + Output: hyper_2."time", hyper_2.device, (avg(hyper_2.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 +(21 rows) + + +######### LIMIT push down cases + +EXPLAIN (verbose, costs off) +SELECT time, device +FROM hyper + +LIMIT 10 + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: hyper."time", hyper.device + -> Custom Scan (AsyncAppend) + Output: hyper."time", hyper.device + -> Append + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1."time", hyper_1.device + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) LIMIT 10 + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2."time", hyper_2.device + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) LIMIT 10 + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3."time", hyper_3.device + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) LIMIT 10 +(20 rows) + + +######### LIMIT push down cases + +EXPLAIN (verbose, costs off) +SELECT time, device +FROM hyper + +LIMIT 5 +OFFSET 5 + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: hyper."time", hyper.device + -> Custom Scan (AsyncAppend) + Output: hyper."time", hyper.device + -> Append + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1."time", hyper_1.device + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) LIMIT 10 + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2."time", hyper_2.device + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) LIMIT 10 + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3."time", hyper_3.device + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) LIMIT 10 +(20 rows) + + +######### LIMIT push down cases + +EXPLAIN (verbose, costs off) +SELECT time, device +FROM hyper + +LIMIT 0 + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: hyper."time", hyper.device + -> Custom Scan (AsyncAppend) + Output: hyper."time", hyper.device + -> Append + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1."time", hyper_1.device + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) LIMIT 1 + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2."time", hyper_2.device + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) LIMIT 1 + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3."time", hyper_3.device + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) LIMIT 1 +(20 rows) + + +######### LIMIT push down cases + +EXPLAIN (verbose, costs off) +SELECT time, device +FROM hyper + +LIMIT extract(year from date '2000-01-01') + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: hyper."time", hyper.device + -> Custom Scan (AsyncAppend) + Output: hyper."time", hyper.device + -> Append + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1."time", hyper_1.device + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) LIMIT 2000 + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2."time", hyper_2.device + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) LIMIT 2000 + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3."time", hyper_3.device + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) LIMIT 2000 +(20 rows) + + +######### LIMIT push down cases + +EXPLAIN (verbose, costs off) +SELECT time, device +FROM hyper + +LIMIT greatest(random(), 10.0) + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: hyper."time", hyper.device + -> Custom Scan (AsyncAppend) + Output: hyper."time", hyper.device + -> Append + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1."time", hyper_1.device + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2."time", hyper_2.device + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3."time", hyper_3.device + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) +(20 rows) + + +######### LIMIT push down cases + +EXPLAIN (verbose, costs off) +SELECT time, device, avg(temp) OVER (PARTITION BY device) +FROM hyper + +LIMIT 10 + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: hyper."time", hyper.device, (avg(hyper.temp) OVER (?)) + -> WindowAgg + Output: hyper."time", hyper.device, avg(hyper.temp) OVER (?) + -> Custom Scan (AsyncAppend) + Output: hyper.device, hyper."time", hyper.temp + -> Merge Append + Sort Key: hyper_1.device + -> Result + Output: hyper_1.device, hyper_1."time", hyper_1.temp + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1."time", hyper_1.device, hyper_1.temp + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY device ASC NULLS LAST + -> Result + Output: hyper_2.device, hyper_2."time", hyper_2.temp + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2."time", hyper_2.device, hyper_2.temp + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY device ASC NULLS LAST + -> Result + Output: hyper_3.device, hyper_3."time", hyper_3.temp + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3."time", hyper_3.device, hyper_3.temp + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) ORDER BY device ASC NULLS LAST +(29 rows) + + +######### LIMIT push down cases + +EXPLAIN (verbose, costs off) +SELECT DISTINCT device, time +FROM hyper + +LIMIT 10 + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: hyper.device, hyper."time" + -> Unique + Output: hyper.device, hyper."time" + -> Custom Scan (AsyncAppend) + Output: hyper.device, hyper."time" + -> Merge Append + Sort Key: hyper_1.device, hyper_1."time" + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1.device, hyper_1."time" + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT DISTINCT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY device ASC NULLS LAST, "time" ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2.device, hyper_2."time" + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT DISTINCT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY device ASC NULLS LAST, "time" ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3.device, hyper_3."time" + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT DISTINCT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) ORDER BY device ASC NULLS LAST, "time" ASC NULLS LAST +(23 rows) + + +######### LIMIT push down cases + +EXPLAIN (verbose, costs off) +SELECT DISTINCT ON (device) device, time +FROM hyper + +LIMIT 10 + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: hyper.device, hyper."time" + -> Unique + Output: hyper.device, hyper."time" + -> Custom Scan (AsyncAppend) + Output: hyper.device, hyper."time" + -> Merge Append + Sort Key: hyper_1.device + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1.device, hyper_1."time" + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT DISTINCT ON (device) "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY device ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2.device, hyper_2."time" + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT DISTINCT ON (device) "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY device ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3.device, hyper_3."time" + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT DISTINCT ON (device) "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) ORDER BY device ASC NULLS LAST +(23 rows) + + +######### LIMIT push down cases + + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: t."time" + -> Nested Loop + Output: t."time" + Join Filter: (t.device = join_test.device) + -> Custom Scan (AsyncAppend) + Output: t."time", t.device + -> Append + -> Custom Scan (DataNodeScan) on public.hyper t_1 + Output: t_1."time", t_1.device + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) + -> Custom Scan (DataNodeScan) on public.hyper t_2 + Output: t_2."time", t_2.device + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) + -> Custom Scan (DataNodeScan) on public.hyper t_3 + Output: t_3."time", t_3.device + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) + -> Materialize + Output: join_test.device + -> Seq Scan on public.join_test + Output: join_test.device +(27 rows) + + +######### CTEs/Sub-queries + +EXPLAIN (verbose, costs off) +WITH top_n AS ( + SELECT device, avg(temp) + FROM hyper + WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' + GROUP BY 1 + ORDER BY 2 DESC + LIMIT 10 +) +SELECT time_bucket('60s', time) AS "time", device, avg(temp) +FROM hyper INNER JOIN top_n USING (device) +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' +GROUP BY 1,2 +ORDER BY 1,2 + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + GroupAggregate + Output: (time_bucket('@ 1 min'::interval, hyper."time")), hyper.device, avg(hyper.temp) + Group Key: time_bucket('@ 1 min'::interval, hyper."time"), hyper.device + -> Nested Loop + Output: time_bucket('@ 1 min'::interval, hyper."time"), hyper.device, hyper.temp + Inner Unique: true + Join Filter: (hyper.device = top_n.device) + -> Custom Scan (AsyncAppend) + Output: hyper."time", hyper.device, hyper.temp + -> Merge Append + Sort Key: (time_bucket('@ 1 min'::interval, hyper_1."time")), hyper_1.device + -> Result + Output: hyper_1."time", hyper_1.device, hyper_1.temp, time_bucket('@ 1 min'::interval, hyper_1."time") + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1."time", hyper_1.device, hyper_1.temp + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('00:01:00'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Result + Output: hyper_2."time", hyper_2.device, hyper_2.temp, time_bucket('@ 1 min'::interval, hyper_2."time") + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2."time", hyper_2.device, hyper_2.temp + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('00:01:00'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Result + Output: hyper_3."time", hyper_3.device, hyper_3.temp, time_bucket('@ 1 min'::interval, hyper_3."time") + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3."time", hyper_3.device, hyper_3.temp + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('00:01:00'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Materialize + Output: top_n.device + -> Subquery Scan on top_n + Output: top_n.device + -> Limit + Output: device, (avg(temp)) + -> Sort + Output: device, (avg(temp)) + Sort Key: (avg(temp)) DESC + -> Custom Scan (AsyncAppend) + Output: device, (avg(temp)) + -> Append + -> Custom Scan (DataNodeScan) + Output: hyper_4.device, (avg(hyper_4.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1 ORDER BY device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: hyper_5.device, (avg(hyper_5.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1 ORDER BY device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: hyper_6.device, (avg(hyper_6.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1 ORDER BY device ASC NULLS LAST +(62 rows) + + +######### CTEs/Sub-queries + +EXPLAIN (verbose, costs off) +SELECT time_bucket('60s', h1.time) AS "time", h1.device, avg(h1.temp), max(h2.temp) +FROM hyper h1 INNER JOIN hyper1d h2 ON (time_bucket('60', h1.time) = time_bucket('60', h2.time) AND h1.device = h2.device) +WHERE h1.time BETWEEN '2019-01-01' AND '2019-01-01 15:00' AND + h2.time BETWEEN '2019-01-01' AND '2019-01-01 15:00' +GROUP BY 1,2 +ORDER BY 1,2 + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + GroupAggregate + Output: (time_bucket('@ 1 min'::interval, h1."time")), h1.device, avg(h1.temp), max(h2.temp) + Group Key: time_bucket('@ 1 min'::interval, h1."time"), h1.device + -> Nested Loop + Output: time_bucket('@ 1 min'::interval, h1."time"), h1.device, h1.temp, h2.temp + Join Filter: ((h1.device = h2.device) AND ((time_bucket('@ 1 min'::interval, h2."time")) = time_bucket('@ 1 min'::interval, h1."time"))) + -> Result + Output: h2.temp, h2."time", h2.device, time_bucket('@ 1 min'::interval, h2."time") + -> Custom Scan (DataNodeScan) on public.hyper1d h2 + Output: h2.temp, h2."time", h2.device + Data node: db_dist_query_3 + Chunks: _dist_hyper_2_19_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[5]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('00:01:00'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Materialize + Output: h1."time", h1.device, h1.temp + -> Custom Scan (AsyncAppend) + Output: h1."time", h1.device, h1.temp + -> Append + -> Custom Scan (DataNodeScan) on public.hyper h1_1 + Output: h1_1."time", h1_1.device, h1_1.temp + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('00:01:00'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper h1_2 + Output: h1_2."time", h1_2.device, h1_2.temp + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('00:01:00'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper h1_3 + Output: h1_3."time", h1_3.device, h1_3.temp + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('00:01:00'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST +(33 rows) + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%% RUNNING TESTS on table: hyper +%%% PREFIX: EXPLAIN (verbose, costs off) +%%% WHERE_CLAUSE: :CLEAN_PARTITIONING_TIME_RANGE +%%% ORDER_BY_1: ORDER BY 1 +%%% ORDER_BY_1_2: ORDER BY 1,2 +%%% LIMIT: +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + setseed +--------- + +(1 row) + + +######### Grouping on time only (partial aggregation) + +EXPLAIN (verbose, costs off) +SELECT time, avg(temp) +FROM hyper +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' +GROUP BY 1 +ORDER BY 1 + + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Finalize GroupAggregate + Output: "time", avg(temp) + Group Key: "time" + -> Custom Scan (AsyncAppend) + Output: "time", (PARTIAL avg(temp)) + -> Merge Append + Sort Key: hyper."time" + -> Custom Scan (DataNodeScan) + Output: hyper."time", (PARTIAL avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT "time", _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1 ORDER BY "time" ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: hyper_1."time", (PARTIAL avg(hyper_1.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT "time", _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1 ORDER BY "time" ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: hyper_2."time", (PARTIAL avg(hyper_2.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT "time", _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1 ORDER BY "time" ASC NULLS LAST +(25 rows) + + +######### Grouping on time only (partial aggregation) + +EXPLAIN (verbose, costs off) +SELECT time_bucket('2 days', time) AS time, avg(temp) +FROM hyper +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' +GROUP BY 1 +ORDER BY 1 + + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Finalize GroupAggregate + Output: (time_bucket('@ 2 days'::interval, "time")), avg(temp) + Group Key: (time_bucket('@ 2 days'::interval, "time")) + -> Custom Scan (AsyncAppend) + Output: (time_bucket('@ 2 days'::interval, "time")), (PARTIAL avg(temp)) + -> Merge Append + Sort Key: (time_bucket('@ 2 days'::interval, hyper."time")) + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper."time")), (PARTIAL avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1 ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper_1."time")), (PARTIAL avg(hyper_1.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1 ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper_2."time")), (PARTIAL avg(hyper_2.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1 ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST +(25 rows) + + +######### Grouping on time and device (full aggregation) + +EXPLAIN (verbose, costs off) +SELECT time, device, avg(temp) +FROM hyper +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' +GROUP BY 1,2 +ORDER BY 1,2 + + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) + Output: "time", device, (avg(temp)) + -> Merge Append + Sort Key: hyper."time", hyper.device + -> Custom Scan (DataNodeScan) + Output: hyper."time", hyper.device, (avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: hyper_1."time", hyper_1.device, (avg(hyper_1.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: hyper_2."time", hyper_2.device, (avg(hyper_2.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST +(22 rows) + + +######### Grouping on time and device (full aggregation) + +EXPLAIN (verbose, costs off) +SELECT time_bucket('2 days', time) AS time, device, avg(temp) +FROM hyper +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' +GROUP BY 1,2 +ORDER BY 1,2 + + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) + Output: (time_bucket('@ 2 days'::interval, "time")), device, (avg(temp)) + -> Merge Append + Sort Key: (time_bucket('@ 2 days'::interval, hyper."time")), hyper.device + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper."time")), hyper.device, (avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper_1."time")), hyper_1.device, (avg(hyper_1.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper_2."time")), hyper_2.device, (avg(hyper_2.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST +(22 rows) + + +######### Grouping on time and device (full aggregation) + +EXPLAIN (verbose, costs off) +SELECT date_trunc('month', time) AS time, device, avg(temp) +FROM hyper +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' +GROUP BY 1,2 +ORDER BY 1,2 + + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) + Output: (date_trunc('month'::text, "time")), device, (avg(temp)) + -> Merge Append + Sort Key: (date_trunc('month'::text, hyper."time")), hyper.device + -> Custom Scan (DataNodeScan) + Output: (date_trunc('month'::text, hyper."time")), hyper.device, (avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT date_trunc('month'::text, "time"), device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY date_trunc('month'::text, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: (date_trunc('month'::text, hyper_1."time")), hyper_1.device, (avg(hyper_1.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT date_trunc('month'::text, "time"), device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY date_trunc('month'::text, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: (date_trunc('month'::text, hyper_2."time")), hyper_2.device, (avg(hyper_2.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT date_trunc('month'::text, "time"), device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY date_trunc('month'::text, "time") ASC NULLS LAST, device ASC NULLS LAST +(22 rows) + + +######### Grouping on time and device (full aggregation) + +EXPLAIN (verbose, costs off) +SELECT time_bucket('2 days', time) AS time, device, avg(temp) +FROM hyper +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' +GROUP BY 1,2 +HAVING device > 4 +ORDER BY 1,2 + + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Custom Scan (AsyncAppend) + Output: (time_bucket('@ 2 days'::interval, "time")), device, (avg(temp)) + -> Merge Append + Sort Key: (time_bucket('@ 2 days'::interval, hyper."time")), hyper.device + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper."time")), hyper.device, (avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) AND ((device > 4)) GROUP BY 1, 2 ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper_1."time")), hyper_1.device, (avg(hyper_1.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) AND ((device > 4)) GROUP BY 1, 2 ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper_2."time")), hyper_2.device, (avg(hyper_2.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) AND ((device > 4)) GROUP BY 1, 2 ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST +(22 rows) + +EXPLAIN (verbose, costs off) +SELECT time_bucket('2 days', time) AS time, device, avg(temp) +FROM hyper +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' +GROUP BY 1,2 +HAVING avg(temp) > 40 AND max(temp) < 70 +ORDER BY 1,2 + + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Sort + Output: (time_bucket('@ 2 days'::interval, "time")), device, (avg(temp)) + Sort Key: (time_bucket('@ 2 days'::interval, "time")), device + -> Custom Scan (AsyncAppend) + Output: (time_bucket('@ 2 days'::interval, "time")), device, (avg(temp)) + -> Append + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper."time")), hyper.device, (avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 HAVING ((avg(temp) > 40::double precision)) AND ((max(temp) < 70::double precision)) ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper_1."time")), hyper_1.device, (avg(hyper_1.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 HAVING ((avg(temp) > 40::double precision)) AND ((max(temp) < 70::double precision)) ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper_2."time")), hyper_2.device, (avg(hyper_2.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 HAVING ((avg(temp) > 40::double precision)) AND ((max(temp) < 70::double precision)) ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST +(24 rows) + + +######### Grouping on device only (full aggregation) + +EXPLAIN (verbose, costs off) +SELECT device, avg(temp) +FROM hyper +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' +GROUP BY 1 +ORDER BY 1 + + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) + Output: device, (avg(temp)) + -> Merge Append + Sort Key: hyper.device + -> Custom Scan (DataNodeScan) + Output: hyper.device, (avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1 ORDER BY device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: hyper_1.device, (avg(hyper_1.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1 ORDER BY device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: hyper_2.device, (avg(hyper_2.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1 ORDER BY device ASC NULLS LAST +(22 rows) + + +######### No push down on some functions + +EXPLAIN (verbose, costs off) +SELECT location, avg(temp) +FROM hyper +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' AND (temp * random() >= 0) +GROUP BY 1 +ORDER BY 1 + + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: hyper.location, avg(hyper.temp) + Group Key: hyper.location + -> Custom Scan (AsyncAppend) + Output: hyper.location, hyper.temp + -> Merge Append + Sort Key: hyper_1.location + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1.location, hyper_1.temp + Filter: ((hyper_1.temp * random()) >= '0'::double precision) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT location, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY location ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2.location, hyper_2.temp + Filter: ((hyper_2.temp * random()) >= '0'::double precision) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT location, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY location ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3.location, hyper_3.temp + Filter: ((hyper_3.temp * random()) >= '0'::double precision) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT location, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY location ASC NULLS LAST +(25 rows) + + +######### No push down on some functions + +EXPLAIN (verbose, costs off) +SELECT time_bucket('2 days', time) AS time, device, avg(temp), sum(temp * (random() <= 1)::int) as sum +FROM hyper +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' +GROUP BY 1,2 +ORDER BY 1,2 + + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: (time_bucket('@ 2 days'::interval, hyper."time")), hyper.device, avg(hyper.temp), sum((hyper.temp * (((random() <= '1'::double precision))::integer)::double precision)) + Group Key: (time_bucket('@ 2 days'::interval, hyper."time")), hyper.device + -> Custom Scan (AsyncAppend) + Output: (time_bucket('@ 2 days'::interval, hyper."time")), hyper.device, hyper.temp + -> Merge Append + Sort Key: (time_bucket('@ 2 days'::interval, hyper_1."time")), hyper_1.device + -> Result + Output: time_bucket('@ 2 days'::interval, hyper_1."time"), hyper_1.device, hyper_1.temp + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1."time", hyper_1.device, hyper_1.temp + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Result + Output: time_bucket('@ 2 days'::interval, hyper_2."time"), hyper_2.device, hyper_2.temp + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2."time", hyper_2.device, hyper_2.temp + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Result + Output: time_bucket('@ 2 days'::interval, hyper_3."time"), hyper_3.device, hyper_3.temp + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3."time", hyper_3.device, hyper_3.temp + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST +(28 rows) + + +######### No push down on some functions + +EXPLAIN (verbose, costs off) +SELECT time, device, avg(temp) +FROM hyper +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' +GROUP BY 1,2 +HAVING avg(temp) * custom_sum(device) > 0.8 +ORDER BY 1,2 + + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + GroupAggregate + Output: hyper."time", hyper.device, avg(hyper.temp) + Group Key: hyper."time", hyper.device + Filter: ((avg(hyper.temp) * (custom_sum(hyper.device))::double precision) > '0.8'::double precision) + -> Custom Scan (AsyncAppend) + Output: hyper."time", hyper.device, hyper.temp + -> Merge Append + Sort Key: hyper_1."time", hyper_1.device + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1."time", hyper_1.device, hyper_1.temp + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2."time", hyper_2.device, hyper_2.temp + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3."time", hyper_3.device, hyper_3.temp + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST +(23 rows) + + +######### No push down on some functions + +EXPLAIN (verbose, costs off) +SELECT time, device, avg(temp), custom_sum(device) +FROM hyper +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' +GROUP BY 1,2 +ORDER BY 1,2 + + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + GroupAggregate + Output: hyper."time", hyper.device, avg(hyper.temp), custom_sum(hyper.device) + Group Key: hyper."time", hyper.device + -> Custom Scan (AsyncAppend) + Output: hyper."time", hyper.device, hyper.temp + -> Merge Append + Sort Key: hyper_1."time", hyper_1.device + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1."time", hyper_1.device, hyper_1.temp + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2."time", hyper_2.device, hyper_2.temp + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3."time", hyper_3.device, hyper_3.temp + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST +(22 rows) + + +######### Constification and runtime push down of time-related functions + + tsl_override_current_timestamptz +---------------------------------- + +(1 row) + + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) + Output: "time", device, (avg(temp)) + -> Merge Append + Sort Key: hyper."time", hyper.device + -> Custom Scan (DataNodeScan) + Output: hyper."time", hyper.device, (avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: hyper_1."time", hyper_1.device, (avg(hyper_1.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: hyper_2."time", hyper_2.device, (avg(hyper_2.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST +(22 rows) + + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) + Output: "time", device, (avg(temp)) + -> Merge Append + Sort Key: hyper."time", hyper.device + -> Custom Scan (DataNodeScan) + Output: hyper."time", hyper.device, (avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: hyper_1."time", hyper_1.device, (avg(hyper_1.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: hyper_2."time", hyper_2.device, (avg(hyper_2.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST +(22 rows) + + tsl_override_current_timestamptz +---------------------------------- + +(1 row) + + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) + Output: "time", device, (avg(temp)) + -> Merge Append + Sort Key: hyper."time", hyper.device + -> Custom Scan (DataNodeScan) + Output: hyper."time", hyper.device, (avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: hyper_1."time", hyper_1.device, (avg(hyper_1.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: hyper_2."time", hyper_2.device, (avg(hyper_2.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST +(22 rows) + + +######### LIMIT push down cases + +EXPLAIN (verbose, costs off) +SELECT time, device +FROM hyper +ORDER BY 1,2 +LIMIT 10 + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: hyper."time", hyper.device + -> Custom Scan (AsyncAppend) + Output: hyper."time", hyper.device + -> Merge Append + Sort Key: hyper_1."time", hyper_1.device + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1."time", hyper_1.device + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST LIMIT 10 + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2."time", hyper_2.device + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST LIMIT 10 + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3."time", hyper_3.device + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST LIMIT 10 +(21 rows) + + +######### LIMIT push down cases + +EXPLAIN (verbose, costs off) +SELECT time, device +FROM hyper +ORDER BY 1,2 +LIMIT 5 +OFFSET 5 + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: hyper."time", hyper.device + -> Custom Scan (AsyncAppend) + Output: hyper."time", hyper.device + -> Merge Append + Sort Key: hyper_1."time", hyper_1.device + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1."time", hyper_1.device + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST LIMIT 10 + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2."time", hyper_2.device + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST LIMIT 10 + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3."time", hyper_3.device + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST LIMIT 10 +(21 rows) + + +######### LIMIT push down cases + +EXPLAIN (verbose, costs off) +SELECT time, device +FROM hyper +ORDER BY 1,2 +LIMIT 0 + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: hyper."time", hyper.device + -> Custom Scan (AsyncAppend) + Output: hyper."time", hyper.device + -> Merge Append + Sort Key: hyper_1."time", hyper_1.device + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1."time", hyper_1.device + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST LIMIT 1 + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2."time", hyper_2.device + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST LIMIT 1 + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3."time", hyper_3.device + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST LIMIT 1 +(21 rows) + + +######### LIMIT push down cases + +EXPLAIN (verbose, costs off) +SELECT time, device +FROM hyper +ORDER BY 1,2 +LIMIT extract(year from date '2000-01-01') + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: hyper."time", hyper.device + -> Custom Scan (AsyncAppend) + Output: hyper."time", hyper.device + -> Merge Append + Sort Key: hyper_1."time", hyper_1.device + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1."time", hyper_1.device + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST LIMIT 2000 + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2."time", hyper_2.device + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST LIMIT 2000 + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3."time", hyper_3.device + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST LIMIT 2000 +(21 rows) + + +######### LIMIT push down cases + +EXPLAIN (verbose, costs off) +SELECT time, device +FROM hyper +ORDER BY 1,2 +LIMIT greatest(random(), 10.0) + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Limit + Output: hyper."time", hyper.device + -> Custom Scan (AsyncAppend) + Output: hyper."time", hyper.device + -> Merge Append + Sort Key: hyper_1."time", hyper_1.device + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1."time", hyper_1.device + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2."time", hyper_2.device + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3."time", hyper_3.device + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST +(21 rows) + + +######### LIMIT push down cases + +EXPLAIN (verbose, costs off) +SELECT time, device, avg(temp) OVER (PARTITION BY device) +FROM hyper +ORDER BY 1,2 +LIMIT 10 + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: hyper."time", hyper.device, (avg(hyper.temp) OVER (?)) + -> Sort + Output: hyper."time", hyper.device, (avg(hyper.temp) OVER (?)) + Sort Key: hyper."time", hyper.device + -> WindowAgg + Output: hyper."time", hyper.device, avg(hyper.temp) OVER (?) + -> Custom Scan (AsyncAppend) + Output: hyper.device, hyper."time", hyper.temp + -> Merge Append + Sort Key: hyper_1.device + -> Result + Output: hyper_1.device, hyper_1."time", hyper_1.temp + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1."time", hyper_1.device, hyper_1.temp + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY device ASC NULLS LAST + -> Result + Output: hyper_2.device, hyper_2."time", hyper_2.temp + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2."time", hyper_2.device, hyper_2.temp + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY device ASC NULLS LAST + -> Result + Output: hyper_3.device, hyper_3."time", hyper_3.temp + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3."time", hyper_3.device, hyper_3.temp + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) ORDER BY device ASC NULLS LAST +(32 rows) + + +######### LIMIT push down cases + +EXPLAIN (verbose, costs off) +SELECT DISTINCT device, time +FROM hyper +ORDER BY 1,2 +LIMIT 10 + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: hyper.device, hyper."time" + -> Unique + Output: hyper.device, hyper."time" + -> Custom Scan (AsyncAppend) + Output: hyper.device, hyper."time" + -> Merge Append + Sort Key: hyper_1.device, hyper_1."time" + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1.device, hyper_1."time" + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT DISTINCT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY device ASC NULLS LAST, "time" ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2.device, hyper_2."time" + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT DISTINCT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY device ASC NULLS LAST, "time" ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3.device, hyper_3."time" + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT DISTINCT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) ORDER BY device ASC NULLS LAST, "time" ASC NULLS LAST +(23 rows) + + +######### LIMIT push down cases + +EXPLAIN (verbose, costs off) +SELECT DISTINCT ON (device) device, time +FROM hyper +ORDER BY 1,2 +LIMIT 10 + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: hyper.device, hyper."time" + -> Unique + Output: hyper.device, hyper."time" + -> Custom Scan (AsyncAppend) + Output: hyper.device, hyper."time" + -> Merge Append + Sort Key: hyper_1.device, hyper_1."time" + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1.device, hyper_1."time" + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT DISTINCT ON (device) "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY device ASC NULLS LAST, "time" ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2.device, hyper_2."time" + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT DISTINCT ON (device) "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY device ASC NULLS LAST, "time" ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3.device, hyper_3."time" + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT DISTINCT ON (device) "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) ORDER BY device ASC NULLS LAST, "time" ASC NULLS LAST +(23 rows) + + +######### LIMIT push down cases + + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: t."time" + -> Nested Loop + Output: t."time" + Join Filter: (t.device = join_test.device) + -> Custom Scan (AsyncAppend) + Output: t."time", t.device + -> Append + -> Custom Scan (DataNodeScan) on public.hyper t_1 + Output: t_1."time", t_1.device + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) + -> Custom Scan (DataNodeScan) on public.hyper t_2 + Output: t_2."time", t_2.device + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) + -> Custom Scan (DataNodeScan) on public.hyper t_3 + Output: t_3."time", t_3.device + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) + -> Materialize + Output: join_test.device + -> Seq Scan on public.join_test + Output: join_test.device +(27 rows) + + +######### CTEs/Sub-queries + +EXPLAIN (verbose, costs off) +WITH top_n AS ( + SELECT device, avg(temp) + FROM hyper + WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' + GROUP BY 1 + ORDER BY 2 DESC + LIMIT 10 +) +SELECT time_bucket('60s', time) AS "time", device, avg(temp) +FROM hyper INNER JOIN top_n USING (device) +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' +GROUP BY 1,2 +ORDER BY 1,2 + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + GroupAggregate + Output: (time_bucket('@ 1 min'::interval, hyper."time")), hyper.device, avg(hyper.temp) + Group Key: time_bucket('@ 1 min'::interval, hyper."time"), hyper.device + -> Nested Loop + Output: time_bucket('@ 1 min'::interval, hyper."time"), hyper.device, hyper.temp + Inner Unique: true + Join Filter: (hyper.device = top_n.device) + -> Custom Scan (AsyncAppend) + Output: hyper."time", hyper.device, hyper.temp + -> Merge Append + Sort Key: (time_bucket('@ 1 min'::interval, hyper_1."time")), hyper_1.device + -> Result + Output: hyper_1."time", hyper_1.device, hyper_1.temp, time_bucket('@ 1 min'::interval, hyper_1."time") + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1."time", hyper_1.device, hyper_1.temp + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('00:01:00'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Result + Output: hyper_2."time", hyper_2.device, hyper_2.temp, time_bucket('@ 1 min'::interval, hyper_2."time") + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2."time", hyper_2.device, hyper_2.temp + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('00:01:00'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Result + Output: hyper_3."time", hyper_3.device, hyper_3.temp, time_bucket('@ 1 min'::interval, hyper_3."time") + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3."time", hyper_3.device, hyper_3.temp + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('00:01:00'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Materialize + Output: top_n.device + -> Subquery Scan on top_n + Output: top_n.device + -> Limit + Output: device, (avg(temp)) + -> Sort + Output: device, (avg(temp)) + Sort Key: (avg(temp)) DESC + -> Custom Scan (AsyncAppend) + Output: device, (avg(temp)) + -> Append + -> Custom Scan (DataNodeScan) + Output: hyper_4.device, (avg(hyper_4.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1 ORDER BY device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: hyper_5.device, (avg(hyper_5.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1 ORDER BY device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: hyper_6.device, (avg(hyper_6.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1 ORDER BY device ASC NULLS LAST +(62 rows) + + +######### CTEs/Sub-queries + +EXPLAIN (verbose, costs off) +SELECT time_bucket('60s', h1.time) AS "time", h1.device, avg(h1.temp), max(h2.temp) +FROM hyper h1 INNER JOIN hyper1d h2 ON (time_bucket('60', h1.time) = time_bucket('60', h2.time) AND h1.device = h2.device) +WHERE h1.time BETWEEN '2019-01-01' AND '2019-01-01 15:00' AND + h2.time BETWEEN '2019-01-01' AND '2019-01-01 15:00' +GROUP BY 1,2 +ORDER BY 1,2 + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + GroupAggregate + Output: (time_bucket('@ 1 min'::interval, h1."time")), h1.device, avg(h1.temp), max(h2.temp) + Group Key: time_bucket('@ 1 min'::interval, h1."time"), h1.device + -> Nested Loop + Output: time_bucket('@ 1 min'::interval, h1."time"), h1.device, h1.temp, h2.temp + Join Filter: ((h1.device = h2.device) AND ((time_bucket('@ 1 min'::interval, h2."time")) = time_bucket('@ 1 min'::interval, h1."time"))) + -> Result + Output: h2.temp, h2."time", h2.device, time_bucket('@ 1 min'::interval, h2."time") + -> Custom Scan (DataNodeScan) on public.hyper1d h2 + Output: h2.temp, h2."time", h2.device + Data node: db_dist_query_3 + Chunks: _dist_hyper_2_19_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[5]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('00:01:00'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Materialize + Output: h1."time", h1.device, h1.temp + -> Custom Scan (AsyncAppend) + Output: h1."time", h1.device, h1.temp + -> Append + -> Custom Scan (DataNodeScan) on public.hyper h1_1 + Output: h1_1."time", h1_1.device, h1_1.temp + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('00:01:00'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper h1_2 + Output: h1_2."time", h1_2.device, h1_2.temp + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('00:01:00'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper h1_3 + Output: h1_3."time", h1_3.device, h1_3.temp + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('00:01:00'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST +(33 rows) + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%% RUNNING TESTS on table: hyper +%%% PREFIX: EXPLAIN (verbose, costs off) +%%% WHERE_CLAUSE: :CLEAN_PARTITIONING_TIME_RANGE AND device = 1 +%%% ORDER_BY_1: ORDER BY 1 +%%% ORDER_BY_1_2: ORDER BY 1,2 +%%% LIMIT: +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + setseed +--------- + +(1 row) + + +######### Grouping on time only (partial aggregation) + +EXPLAIN (verbose, costs off) +SELECT time, avg(temp) +FROM hyper +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' AND device = 1 +GROUP BY 1 +ORDER BY 1 + + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (DataNodeScan) + Output: hyper."time", (avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT "time", avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) AND ((device = 1)) GROUP BY 1 ORDER BY "time" ASC NULLS LAST +(6 rows) + + +######### Grouping on time only (partial aggregation) + +EXPLAIN (verbose, costs off) +SELECT time_bucket('2 days', time) AS time, avg(temp) +FROM hyper +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' AND device = 1 +GROUP BY 1 +ORDER BY 1 + + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper."time")), (avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) AND ((device = 1)) GROUP BY 1 ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST +(6 rows) + + +######### Grouping on time and device (full aggregation) + +EXPLAIN (verbose, costs off) +SELECT time, device, avg(temp) +FROM hyper +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' AND device = 1 +GROUP BY 1,2 +ORDER BY 1,2 + + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (DataNodeScan) + Output: hyper."time", hyper.device, (avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) AND ((device = 1)) GROUP BY 1, 2 ORDER BY "time" ASC NULLS LAST +(6 rows) + + +######### Grouping on time and device (full aggregation) + +EXPLAIN (verbose, costs off) +SELECT time_bucket('2 days', time) AS time, device, avg(temp) +FROM hyper +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' AND device = 1 +GROUP BY 1,2 +ORDER BY 1,2 + + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper."time")), hyper.device, (avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) AND ((device = 1)) GROUP BY 1, 2 ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST +(6 rows) + + +######### Grouping on time and device (full aggregation) + +EXPLAIN (verbose, costs off) +SELECT date_trunc('month', time) AS time, device, avg(temp) +FROM hyper +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' AND device = 1 +GROUP BY 1,2 +ORDER BY 1,2 + + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (DataNodeScan) + Output: (date_trunc('month'::text, hyper."time")), hyper.device, (avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT date_trunc('month'::text, "time"), device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) AND ((device = 1)) GROUP BY 1, 2 ORDER BY date_trunc('month'::text, "time") ASC NULLS LAST +(6 rows) + + +######### Grouping on time and device (full aggregation) + +EXPLAIN (verbose, costs off) +SELECT time_bucket('2 days', time) AS time, device, avg(temp) +FROM hyper +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' AND device = 1 +GROUP BY 1,2 +HAVING device > 4 +ORDER BY 1,2 + + QUERY PLAN +------------------------------------------------------------------------------------------------- + GroupAggregate + Output: (time_bucket('@ 2 days'::interval, hyper."time")), hyper.device, avg(hyper.temp) + Group Key: (time_bucket('@ 2 days'::interval, hyper."time")) + -> Sort + Output: (time_bucket('@ 2 days'::interval, hyper."time")), hyper.device, hyper.temp + Sort Key: (time_bucket('@ 2 days'::interval, hyper."time")) + -> Result + Output: time_bucket('@ 2 days'::interval, hyper."time"), hyper.device, hyper.temp + One-Time Filter: false +(9 rows) + +EXPLAIN (verbose, costs off) +SELECT time_bucket('2 days', time) AS time, device, avg(temp) +FROM hyper +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' AND device = 1 +GROUP BY 1,2 +HAVING avg(temp) > 40 AND max(temp) < 70 +ORDER BY 1,2 + + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper."time")), hyper.device, (avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) AND ((device = 1)) GROUP BY 1, 2 HAVING ((avg(temp) > 40::double precision)) AND ((max(temp) < 70::double precision)) ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST +(6 rows) + + +######### Grouping on device only (full aggregation) + +EXPLAIN (verbose, costs off) +SELECT device, avg(temp) +FROM hyper +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' AND device = 1 +GROUP BY 1 +ORDER BY 1 + + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: hyper.device, avg(hyper.temp) + -> Custom Scan (DataNodeScan) on public.hyper + Output: hyper.device, hyper.temp + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) AND ((device = 1)) +(7 rows) + + +######### No push down on some functions + +EXPLAIN (verbose, costs off) +SELECT location, avg(temp) +FROM hyper +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' AND device = 1 AND (temp * random() >= 0) +GROUP BY 1 +ORDER BY 1 + + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: hyper.location, avg(hyper.temp) + Group Key: hyper.location + -> Custom Scan (DataNodeScan) on public.hyper + Output: hyper.location, hyper.temp + Filter: ((hyper.temp * random()) >= '0'::double precision) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT location, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) AND ((device = 1)) ORDER BY location ASC NULLS LAST +(9 rows) + + +######### No push down on some functions + +EXPLAIN (verbose, costs off) +SELECT time_bucket('2 days', time) AS time, device, avg(temp), sum(temp * (random() <= 1)::int) as sum +FROM hyper +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' AND device = 1 +GROUP BY 1,2 +ORDER BY 1,2 + + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + GroupAggregate + Output: (time_bucket('@ 2 days'::interval, hyper."time")), hyper.device, avg(hyper.temp), sum((hyper.temp * (((random() <= '1'::double precision))::integer)::double precision)) + Group Key: time_bucket('@ 2 days'::interval, hyper."time") + -> Result + Output: time_bucket('@ 2 days'::interval, hyper."time"), hyper.device, hyper.temp + -> Custom Scan (DataNodeScan) on public.hyper + Output: hyper."time", hyper.device, hyper.temp + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) AND ((device = 1)) ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST +(10 rows) + + +######### No push down on some functions + +EXPLAIN (verbose, costs off) +SELECT time, device, avg(temp) +FROM hyper +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' AND device = 1 +GROUP BY 1,2 +HAVING avg(temp) * custom_sum(device) > 0.8 +ORDER BY 1,2 + + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: hyper."time", hyper.device, avg(hyper.temp) + Group Key: hyper."time" + Filter: ((avg(hyper.temp) * (custom_sum(hyper.device))::double precision) > '0.8'::double precision) + -> Custom Scan (DataNodeScan) on public.hyper + Output: hyper."time", hyper.device, hyper.temp + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) AND ((device = 1)) ORDER BY "time" ASC NULLS LAST +(9 rows) + + +######### No push down on some functions + +EXPLAIN (verbose, costs off) +SELECT time, device, avg(temp), custom_sum(device) +FROM hyper +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' AND device = 1 +GROUP BY 1,2 +ORDER BY 1,2 + + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: hyper."time", hyper.device, avg(hyper.temp), custom_sum(hyper.device) + Group Key: hyper."time" + -> Custom Scan (DataNodeScan) on public.hyper + Output: hyper."time", hyper.device, hyper.temp + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) AND ((device = 1)) ORDER BY "time" ASC NULLS LAST +(8 rows) + + +######### Constification and runtime push down of time-related functions + + tsl_override_current_timestamptz +---------------------------------- + +(1 row) + + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (DataNodeScan) + Output: hyper."time", hyper.device, (avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) AND ((device = 1)) GROUP BY 1, 2 ORDER BY "time" ASC NULLS LAST +(6 rows) + + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (DataNodeScan) + Output: hyper."time", hyper.device, (avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) AND ((device = 1)) GROUP BY 1, 2 ORDER BY "time" ASC NULLS LAST +(6 rows) + + tsl_override_current_timestamptz +---------------------------------- + +(1 row) + + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (DataNodeScan) + Output: hyper."time", hyper.device, (avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) AND ((device = 1)) GROUP BY 1, 2 ORDER BY "time" ASC NULLS LAST +(6 rows) + + +######### LIMIT push down cases + +EXPLAIN (verbose, costs off) +SELECT time, device +FROM hyper +ORDER BY 1,2 +LIMIT 10 + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: hyper."time", hyper.device + -> Custom Scan (AsyncAppend) + Output: hyper."time", hyper.device + -> Merge Append + Sort Key: hyper_1."time", hyper_1.device + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1."time", hyper_1.device + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST LIMIT 10 + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2."time", hyper_2.device + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST LIMIT 10 + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3."time", hyper_3.device + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST LIMIT 10 +(21 rows) + + +######### LIMIT push down cases + +EXPLAIN (verbose, costs off) +SELECT time, device +FROM hyper +ORDER BY 1,2 +LIMIT 5 +OFFSET 5 + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: hyper."time", hyper.device + -> Custom Scan (AsyncAppend) + Output: hyper."time", hyper.device + -> Merge Append + Sort Key: hyper_1."time", hyper_1.device + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1."time", hyper_1.device + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST LIMIT 10 + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2."time", hyper_2.device + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST LIMIT 10 + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3."time", hyper_3.device + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST LIMIT 10 +(21 rows) + + +######### LIMIT push down cases + +EXPLAIN (verbose, costs off) +SELECT time, device +FROM hyper +ORDER BY 1,2 +LIMIT 0 + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: hyper."time", hyper.device + -> Custom Scan (AsyncAppend) + Output: hyper."time", hyper.device + -> Merge Append + Sort Key: hyper_1."time", hyper_1.device + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1."time", hyper_1.device + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST LIMIT 1 + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2."time", hyper_2.device + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST LIMIT 1 + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3."time", hyper_3.device + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST LIMIT 1 +(21 rows) + + +######### LIMIT push down cases + +EXPLAIN (verbose, costs off) +SELECT time, device +FROM hyper +ORDER BY 1,2 +LIMIT extract(year from date '2000-01-01') + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: hyper."time", hyper.device + -> Custom Scan (AsyncAppend) + Output: hyper."time", hyper.device + -> Merge Append + Sort Key: hyper_1."time", hyper_1.device + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1."time", hyper_1.device + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST LIMIT 2000 + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2."time", hyper_2.device + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST LIMIT 2000 + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3."time", hyper_3.device + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST LIMIT 2000 +(21 rows) + + +######### LIMIT push down cases + +EXPLAIN (verbose, costs off) +SELECT time, device +FROM hyper +ORDER BY 1,2 +LIMIT greatest(random(), 10.0) + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Limit + Output: hyper."time", hyper.device + -> Custom Scan (AsyncAppend) + Output: hyper."time", hyper.device + -> Merge Append + Sort Key: hyper_1."time", hyper_1.device + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1."time", hyper_1.device + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2."time", hyper_2.device + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3."time", hyper_3.device + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST +(21 rows) + + +######### LIMIT push down cases + +EXPLAIN (verbose, costs off) +SELECT time, device, avg(temp) OVER (PARTITION BY device) +FROM hyper +ORDER BY 1,2 +LIMIT 10 + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: hyper."time", hyper.device, (avg(hyper.temp) OVER (?)) + -> Sort + Output: hyper."time", hyper.device, (avg(hyper.temp) OVER (?)) + Sort Key: hyper."time", hyper.device + -> WindowAgg + Output: hyper."time", hyper.device, avg(hyper.temp) OVER (?) + -> Custom Scan (AsyncAppend) + Output: hyper.device, hyper."time", hyper.temp + -> Merge Append + Sort Key: hyper_1.device + -> Result + Output: hyper_1.device, hyper_1."time", hyper_1.temp + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1."time", hyper_1.device, hyper_1.temp + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY device ASC NULLS LAST + -> Result + Output: hyper_2.device, hyper_2."time", hyper_2.temp + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2."time", hyper_2.device, hyper_2.temp + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY device ASC NULLS LAST + -> Result + Output: hyper_3.device, hyper_3."time", hyper_3.temp + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3."time", hyper_3.device, hyper_3.temp + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) ORDER BY device ASC NULLS LAST +(32 rows) + + +######### LIMIT push down cases + +EXPLAIN (verbose, costs off) +SELECT DISTINCT device, time +FROM hyper +ORDER BY 1,2 +LIMIT 10 + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: hyper.device, hyper."time" + -> Unique + Output: hyper.device, hyper."time" + -> Custom Scan (AsyncAppend) + Output: hyper.device, hyper."time" + -> Merge Append + Sort Key: hyper_1.device, hyper_1."time" + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1.device, hyper_1."time" + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT DISTINCT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY device ASC NULLS LAST, "time" ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2.device, hyper_2."time" + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT DISTINCT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY device ASC NULLS LAST, "time" ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3.device, hyper_3."time" + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT DISTINCT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) ORDER BY device ASC NULLS LAST, "time" ASC NULLS LAST +(23 rows) + + +######### LIMIT push down cases + +EXPLAIN (verbose, costs off) +SELECT DISTINCT ON (device) device, time +FROM hyper +ORDER BY 1,2 +LIMIT 10 + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: hyper.device, hyper."time" + -> Unique + Output: hyper.device, hyper."time" + -> Custom Scan (AsyncAppend) + Output: hyper.device, hyper."time" + -> Merge Append + Sort Key: hyper_1.device, hyper_1."time" + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1.device, hyper_1."time" + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT DISTINCT ON (device) "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY device ASC NULLS LAST, "time" ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2.device, hyper_2."time" + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT DISTINCT ON (device) "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY device ASC NULLS LAST, "time" ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3.device, hyper_3."time" + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT DISTINCT ON (device) "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) ORDER BY device ASC NULLS LAST, "time" ASC NULLS LAST +(23 rows) + + +######### LIMIT push down cases + + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: t."time" + -> Nested Loop + Output: t."time" + Join Filter: (t.device = join_test.device) + -> Custom Scan (AsyncAppend) + Output: t."time", t.device + -> Append + -> Custom Scan (DataNodeScan) on public.hyper t_1 + Output: t_1."time", t_1.device + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) + -> Custom Scan (DataNodeScan) on public.hyper t_2 + Output: t_2."time", t_2.device + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) + -> Custom Scan (DataNodeScan) on public.hyper t_3 + Output: t_3."time", t_3.device + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) + -> Materialize + Output: join_test.device + -> Seq Scan on public.join_test + Output: join_test.device +(27 rows) + + +######### CTEs/Sub-queries + +EXPLAIN (verbose, costs off) +WITH top_n AS ( + SELECT device, avg(temp) + FROM hyper + WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' AND device = 1 + GROUP BY 1 + ORDER BY 2 DESC + LIMIT 10 +) +SELECT time_bucket('60s', time) AS "time", device, avg(temp) +FROM hyper INNER JOIN top_n USING (device) +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' AND device = 1 +GROUP BY 1,2 +ORDER BY 1,2 + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: (time_bucket('@ 1 min'::interval, hyper."time")), hyper.device, avg(hyper.temp) + Group Key: (time_bucket('@ 1 min'::interval, hyper."time")) + -> Nested Loop + Output: (time_bucket('@ 1 min'::interval, hyper."time")), hyper.device, hyper.temp + -> Result + Output: hyper."time", hyper.device, hyper.temp, time_bucket('@ 1 min'::interval, hyper."time") + -> Custom Scan (DataNodeScan) on public.hyper + Output: hyper."time", hyper.device, hyper.temp + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) AND ((device = 1)) ORDER BY public.time_bucket('00:01:00'::interval, "time") ASC NULLS LAST + -> Materialize + Output: top_n.device + -> Subquery Scan on top_n + Output: top_n.device + Filter: (top_n.device = 1) + -> Limit + Output: hyper_1.device, (avg(hyper_1.temp)) + -> Custom Scan (DataNodeScan) + Output: hyper_1.device, (avg(hyper_1.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) AND ((device = 1)) GROUP BY 1 ORDER BY avg(temp) DESC NULLS FIRST +(25 rows) + + +######### CTEs/Sub-queries + +EXPLAIN (verbose, costs off) +SELECT time_bucket('60s', h1.time) AS "time", h1.device, avg(h1.temp), max(h2.temp) +FROM hyper h1 INNER JOIN hyper1d h2 ON (time_bucket('60', h1.time) = time_bucket('60', h2.time) AND h1.device = h2.device) +WHERE h1.time BETWEEN '2019-01-01' AND '2019-01-01 15:00' AND + h2.time BETWEEN '2019-01-01' AND '2019-01-01 15:00' +GROUP BY 1,2 +ORDER BY 1,2 + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + GroupAggregate + Output: (time_bucket('@ 1 min'::interval, h1."time")), h1.device, avg(h1.temp), max(h2.temp) + Group Key: time_bucket('@ 1 min'::interval, h1."time"), h1.device + -> Nested Loop + Output: time_bucket('@ 1 min'::interval, h1."time"), h1.device, h1.temp, h2.temp + Join Filter: ((h1.device = h2.device) AND ((time_bucket('@ 1 min'::interval, h2."time")) = time_bucket('@ 1 min'::interval, h1."time"))) + -> Result + Output: h2.temp, h2."time", h2.device, time_bucket('@ 1 min'::interval, h2."time") + -> Custom Scan (DataNodeScan) on public.hyper1d h2 + Output: h2.temp, h2."time", h2.device + Data node: db_dist_query_3 + Chunks: _dist_hyper_2_19_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[5]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('00:01:00'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Materialize + Output: h1."time", h1.device, h1.temp + -> Custom Scan (AsyncAppend) + Output: h1."time", h1.device, h1.temp + -> Append + -> Custom Scan (DataNodeScan) on public.hyper h1_1 + Output: h1_1."time", h1_1.device, h1_1.temp + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('00:01:00'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper h1_2 + Output: h1_2."time", h1_2.device, h1_2.temp + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('00:01:00'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper h1_3 + Output: h1_3."time", h1_3.device, h1_3.temp + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('00:01:00'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST +(33 rows) + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%% RUNNING TESTS on table: hyper +%%% PREFIX: EXPLAIN (verbose, costs off) +%%% WHERE_CLAUSE: :CLEAN_PARTITIONING_TIME_RANGE +%%% ORDER_BY_1: ORDER BY 1 +%%% ORDER_BY_1_2: ORDER BY 1,2 +%%% LIMIT: LIMIT 10 +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + setseed +--------- + +(1 row) + + +######### Grouping on time only (partial aggregation) + +EXPLAIN (verbose, costs off) +SELECT time, avg(temp) +FROM hyper +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' +GROUP BY 1 +ORDER BY 1 +LIMIT 10 + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Limit + Output: "time", (avg(temp)) + -> Finalize GroupAggregate + Output: "time", avg(temp) + Group Key: "time" + -> Custom Scan (AsyncAppend) + Output: "time", (PARTIAL avg(temp)) + -> Merge Append + Sort Key: hyper."time" + -> Custom Scan (DataNodeScan) + Output: hyper."time", (PARTIAL avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT "time", _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1 ORDER BY "time" ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: hyper_1."time", (PARTIAL avg(hyper_1.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT "time", _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1 ORDER BY "time" ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: hyper_2."time", (PARTIAL avg(hyper_2.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT "time", _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1 ORDER BY "time" ASC NULLS LAST +(27 rows) + + +######### Grouping on time only (partial aggregation) + +EXPLAIN (verbose, costs off) +SELECT time_bucket('2 days', time) AS time, avg(temp) +FROM hyper +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' +GROUP BY 1 +ORDER BY 1 +LIMIT 10 + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: (time_bucket('@ 2 days'::interval, "time")), (avg(temp)) + -> Finalize GroupAggregate + Output: (time_bucket('@ 2 days'::interval, "time")), avg(temp) + Group Key: (time_bucket('@ 2 days'::interval, "time")) + -> Custom Scan (AsyncAppend) + Output: (time_bucket('@ 2 days'::interval, "time")), (PARTIAL avg(temp)) + -> Merge Append + Sort Key: (time_bucket('@ 2 days'::interval, hyper."time")) + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper."time")), (PARTIAL avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1 ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper_1."time")), (PARTIAL avg(hyper_1.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1 ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper_2."time")), (PARTIAL avg(hyper_2.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1 ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST +(27 rows) + + +######### Grouping on time and device (full aggregation) + +EXPLAIN (verbose, costs off) +SELECT time, device, avg(temp) +FROM hyper +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' +GROUP BY 1,2 +ORDER BY 1,2 +LIMIT 10 + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: "time", device, (avg(temp)) + -> Custom Scan (AsyncAppend) + Output: "time", device, (avg(temp)) + -> Merge Append + Sort Key: hyper."time", hyper.device + -> Custom Scan (DataNodeScan) + Output: hyper."time", hyper.device, (avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: hyper_1."time", hyper_1.device, (avg(hyper_1.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: hyper_2."time", hyper_2.device, (avg(hyper_2.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST +(24 rows) + + +######### Grouping on time and device (full aggregation) + +EXPLAIN (verbose, costs off) +SELECT time_bucket('2 days', time) AS time, device, avg(temp) +FROM hyper +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' +GROUP BY 1,2 +ORDER BY 1,2 +LIMIT 10 + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: (time_bucket('@ 2 days'::interval, "time")), device, (avg(temp)) + -> Custom Scan (AsyncAppend) + Output: (time_bucket('@ 2 days'::interval, "time")), device, (avg(temp)) + -> Merge Append + Sort Key: (time_bucket('@ 2 days'::interval, hyper."time")), hyper.device + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper."time")), hyper.device, (avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper_1."time")), hyper_1.device, (avg(hyper_1.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper_2."time")), hyper_2.device, (avg(hyper_2.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST +(24 rows) + + +######### Grouping on time and device (full aggregation) + +EXPLAIN (verbose, costs off) +SELECT date_trunc('month', time) AS time, device, avg(temp) +FROM hyper +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' +GROUP BY 1,2 +ORDER BY 1,2 +LIMIT 10 + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: (date_trunc('month'::text, "time")), device, (avg(temp)) + -> Custom Scan (AsyncAppend) + Output: (date_trunc('month'::text, "time")), device, (avg(temp)) + -> Merge Append + Sort Key: (date_trunc('month'::text, hyper."time")), hyper.device + -> Custom Scan (DataNodeScan) + Output: (date_trunc('month'::text, hyper."time")), hyper.device, (avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT date_trunc('month'::text, "time"), device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY date_trunc('month'::text, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: (date_trunc('month'::text, hyper_1."time")), hyper_1.device, (avg(hyper_1.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT date_trunc('month'::text, "time"), device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY date_trunc('month'::text, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: (date_trunc('month'::text, hyper_2."time")), hyper_2.device, (avg(hyper_2.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT date_trunc('month'::text, "time"), device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY date_trunc('month'::text, "time") ASC NULLS LAST, device ASC NULLS LAST +(24 rows) + + +######### Grouping on time and device (full aggregation) + +EXPLAIN (verbose, costs off) +SELECT time_bucket('2 days', time) AS time, device, avg(temp) +FROM hyper +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' +GROUP BY 1,2 +HAVING device > 4 +ORDER BY 1,2 +LIMIT 10 + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Limit + Output: (time_bucket('@ 2 days'::interval, "time")), device, (avg(temp)) + -> Custom Scan (AsyncAppend) + Output: (time_bucket('@ 2 days'::interval, "time")), device, (avg(temp)) + -> Merge Append + Sort Key: (time_bucket('@ 2 days'::interval, hyper."time")), hyper.device + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper."time")), hyper.device, (avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) AND ((device > 4)) GROUP BY 1, 2 ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper_1."time")), hyper_1.device, (avg(hyper_1.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) AND ((device > 4)) GROUP BY 1, 2 ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper_2."time")), hyper_2.device, (avg(hyper_2.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) AND ((device > 4)) GROUP BY 1, 2 ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST +(24 rows) + +EXPLAIN (verbose, costs off) +SELECT time_bucket('2 days', time) AS time, device, avg(temp) +FROM hyper +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' +GROUP BY 1,2 +HAVING avg(temp) > 40 AND max(temp) < 70 +ORDER BY 1,2 +LIMIT 10 + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Limit + Output: (time_bucket('@ 2 days'::interval, "time")), device, (avg(temp)) + -> Sort + Output: (time_bucket('@ 2 days'::interval, "time")), device, (avg(temp)) + Sort Key: (time_bucket('@ 2 days'::interval, "time")), device + -> Custom Scan (AsyncAppend) + Output: (time_bucket('@ 2 days'::interval, "time")), device, (avg(temp)) + -> Append + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper."time")), hyper.device, (avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 HAVING ((avg(temp) > 40::double precision)) AND ((max(temp) < 70::double precision)) ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper_1."time")), hyper_1.device, (avg(hyper_1.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 HAVING ((avg(temp) > 40::double precision)) AND ((max(temp) < 70::double precision)) ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper_2."time")), hyper_2.device, (avg(hyper_2.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 HAVING ((avg(temp) > 40::double precision)) AND ((max(temp) < 70::double precision)) ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST +(26 rows) + + +######### Grouping on device only (full aggregation) + +EXPLAIN (verbose, costs off) +SELECT device, avg(temp) +FROM hyper +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' +GROUP BY 1 +ORDER BY 1 +LIMIT 10 + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: device, (avg(temp)) + -> Custom Scan (AsyncAppend) + Output: device, (avg(temp)) + -> Merge Append + Sort Key: hyper.device + -> Custom Scan (DataNodeScan) + Output: hyper.device, (avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1 ORDER BY device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: hyper_1.device, (avg(hyper_1.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1 ORDER BY device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: hyper_2.device, (avg(hyper_2.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1 ORDER BY device ASC NULLS LAST +(24 rows) + + +######### No push down on some functions + +EXPLAIN (verbose, costs off) +SELECT location, avg(temp) +FROM hyper +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' AND (temp * random() >= 0) +GROUP BY 1 +ORDER BY 1 +LIMIT 10 + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: hyper.location, (avg(hyper.temp)) + -> GroupAggregate + Output: hyper.location, avg(hyper.temp) + Group Key: hyper.location + -> Custom Scan (AsyncAppend) + Output: hyper.location, hyper.temp + -> Merge Append + Sort Key: hyper_1.location + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1.location, hyper_1.temp + Filter: ((hyper_1.temp * random()) >= '0'::double precision) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT location, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY location ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2.location, hyper_2.temp + Filter: ((hyper_2.temp * random()) >= '0'::double precision) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT location, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY location ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3.location, hyper_3.temp + Filter: ((hyper_3.temp * random()) >= '0'::double precision) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT location, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY location ASC NULLS LAST +(27 rows) + + +######### No push down on some functions + +EXPLAIN (verbose, costs off) +SELECT time_bucket('2 days', time) AS time, device, avg(temp), sum(temp * (random() <= 1)::int) as sum +FROM hyper +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' +GROUP BY 1,2 +ORDER BY 1,2 +LIMIT 10 + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: (time_bucket('@ 2 days'::interval, hyper."time")), hyper.device, (avg(hyper.temp)), (sum((hyper.temp * (((random() <= '1'::double precision))::integer)::double precision))) + -> GroupAggregate + Output: (time_bucket('@ 2 days'::interval, hyper."time")), hyper.device, avg(hyper.temp), sum((hyper.temp * (((random() <= '1'::double precision))::integer)::double precision)) + Group Key: (time_bucket('@ 2 days'::interval, hyper."time")), hyper.device + -> Custom Scan (AsyncAppend) + Output: (time_bucket('@ 2 days'::interval, hyper."time")), hyper.device, hyper.temp + -> Merge Append + Sort Key: (time_bucket('@ 2 days'::interval, hyper_1."time")), hyper_1.device + -> Result + Output: time_bucket('@ 2 days'::interval, hyper_1."time"), hyper_1.device, hyper_1.temp + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1."time", hyper_1.device, hyper_1.temp + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Result + Output: time_bucket('@ 2 days'::interval, hyper_2."time"), hyper_2.device, hyper_2.temp + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2."time", hyper_2.device, hyper_2.temp + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Result + Output: time_bucket('@ 2 days'::interval, hyper_3."time"), hyper_3.device, hyper_3.temp + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3."time", hyper_3.device, hyper_3.temp + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST +(30 rows) + + +######### No push down on some functions + +EXPLAIN (verbose, costs off) +SELECT time, device, avg(temp) +FROM hyper +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' +GROUP BY 1,2 +HAVING avg(temp) * custom_sum(device) > 0.8 +ORDER BY 1,2 +LIMIT 10 + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Limit + Output: hyper."time", hyper.device, (avg(hyper.temp)) + -> GroupAggregate + Output: hyper."time", hyper.device, avg(hyper.temp) + Group Key: hyper."time", hyper.device + Filter: ((avg(hyper.temp) * (custom_sum(hyper.device))::double precision) > '0.8'::double precision) + -> Custom Scan (AsyncAppend) + Output: hyper."time", hyper.device, hyper.temp + -> Merge Append + Sort Key: hyper_1."time", hyper_1.device + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1."time", hyper_1.device, hyper_1.temp + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2."time", hyper_2.device, hyper_2.temp + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3."time", hyper_3.device, hyper_3.temp + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST +(25 rows) + + +######### No push down on some functions + +EXPLAIN (verbose, costs off) +SELECT time, device, avg(temp), custom_sum(device) +FROM hyper +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' +GROUP BY 1,2 +ORDER BY 1,2 +LIMIT 10 + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Limit + Output: hyper."time", hyper.device, (avg(hyper.temp)), (custom_sum(hyper.device)) + -> GroupAggregate + Output: hyper."time", hyper.device, avg(hyper.temp), custom_sum(hyper.device) + Group Key: hyper."time", hyper.device + -> Custom Scan (AsyncAppend) + Output: hyper."time", hyper.device, hyper.temp + -> Merge Append + Sort Key: hyper_1."time", hyper_1.device + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1."time", hyper_1.device, hyper_1.temp + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2."time", hyper_2.device, hyper_2.temp + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3."time", hyper_3.device, hyper_3.temp + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST +(24 rows) + + +######### Constification and runtime push down of time-related functions + + tsl_override_current_timestamptz +---------------------------------- + +(1 row) + + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: "time", device, (avg(temp)) + -> Custom Scan (AsyncAppend) + Output: "time", device, (avg(temp)) + -> Merge Append + Sort Key: hyper."time", hyper.device + -> Custom Scan (DataNodeScan) + Output: hyper."time", hyper.device, (avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: hyper_1."time", hyper_1.device, (avg(hyper_1.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: hyper_2."time", hyper_2.device, (avg(hyper_2.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST +(24 rows) + + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: "time", device, (avg(temp)) + -> Custom Scan (AsyncAppend) + Output: "time", device, (avg(temp)) + -> Merge Append + Sort Key: hyper."time", hyper.device + -> Custom Scan (DataNodeScan) + Output: hyper."time", hyper.device, (avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: hyper_1."time", hyper_1.device, (avg(hyper_1.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: hyper_2."time", hyper_2.device, (avg(hyper_2.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST +(24 rows) + + tsl_override_current_timestamptz +---------------------------------- + +(1 row) + + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: "time", device, (avg(temp)) + -> Custom Scan (AsyncAppend) + Output: "time", device, (avg(temp)) + -> Merge Append + Sort Key: hyper."time", hyper.device + -> Custom Scan (DataNodeScan) + Output: hyper."time", hyper.device, (avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: hyper_1."time", hyper_1.device, (avg(hyper_1.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: hyper_2."time", hyper_2.device, (avg(hyper_2.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST +(24 rows) + + +######### LIMIT push down cases + +EXPLAIN (verbose, costs off) +SELECT time, device +FROM hyper +ORDER BY 1,2 +LIMIT 10 + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: hyper."time", hyper.device + -> Custom Scan (AsyncAppend) + Output: hyper."time", hyper.device + -> Merge Append + Sort Key: hyper_1."time", hyper_1.device + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1."time", hyper_1.device + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST LIMIT 10 + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2."time", hyper_2.device + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST LIMIT 10 + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3."time", hyper_3.device + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST LIMIT 10 +(21 rows) + + +######### LIMIT push down cases + +EXPLAIN (verbose, costs off) +SELECT time, device +FROM hyper +ORDER BY 1,2 +LIMIT 5 +OFFSET 5 + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: hyper."time", hyper.device + -> Custom Scan (AsyncAppend) + Output: hyper."time", hyper.device + -> Merge Append + Sort Key: hyper_1."time", hyper_1.device + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1."time", hyper_1.device + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST LIMIT 10 + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2."time", hyper_2.device + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST LIMIT 10 + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3."time", hyper_3.device + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST LIMIT 10 +(21 rows) + + +######### LIMIT push down cases + +EXPLAIN (verbose, costs off) +SELECT time, device +FROM hyper +ORDER BY 1,2 +LIMIT 0 + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: hyper."time", hyper.device + -> Custom Scan (AsyncAppend) + Output: hyper."time", hyper.device + -> Merge Append + Sort Key: hyper_1."time", hyper_1.device + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1."time", hyper_1.device + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST LIMIT 1 + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2."time", hyper_2.device + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST LIMIT 1 + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3."time", hyper_3.device + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST LIMIT 1 +(21 rows) + + +######### LIMIT push down cases + +EXPLAIN (verbose, costs off) +SELECT time, device +FROM hyper +ORDER BY 1,2 +LIMIT extract(year from date '2000-01-01') + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: hyper."time", hyper.device + -> Custom Scan (AsyncAppend) + Output: hyper."time", hyper.device + -> Merge Append + Sort Key: hyper_1."time", hyper_1.device + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1."time", hyper_1.device + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST LIMIT 2000 + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2."time", hyper_2.device + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST LIMIT 2000 + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3."time", hyper_3.device + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST LIMIT 2000 +(21 rows) + + +######### LIMIT push down cases + +EXPLAIN (verbose, costs off) +SELECT time, device +FROM hyper +ORDER BY 1,2 +LIMIT greatest(random(), 10.0) + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Limit + Output: hyper."time", hyper.device + -> Custom Scan (AsyncAppend) + Output: hyper."time", hyper.device + -> Merge Append + Sort Key: hyper_1."time", hyper_1.device + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1."time", hyper_1.device + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2."time", hyper_2.device + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3."time", hyper_3.device + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST +(21 rows) + + +######### LIMIT push down cases + +EXPLAIN (verbose, costs off) +SELECT time, device, avg(temp) OVER (PARTITION BY device) +FROM hyper +ORDER BY 1,2 +LIMIT 10 + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: hyper."time", hyper.device, (avg(hyper.temp) OVER (?)) + -> Sort + Output: hyper."time", hyper.device, (avg(hyper.temp) OVER (?)) + Sort Key: hyper."time", hyper.device + -> WindowAgg + Output: hyper."time", hyper.device, avg(hyper.temp) OVER (?) + -> Custom Scan (AsyncAppend) + Output: hyper.device, hyper."time", hyper.temp + -> Merge Append + Sort Key: hyper_1.device + -> Result + Output: hyper_1.device, hyper_1."time", hyper_1.temp + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1."time", hyper_1.device, hyper_1.temp + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY device ASC NULLS LAST + -> Result + Output: hyper_2.device, hyper_2."time", hyper_2.temp + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2."time", hyper_2.device, hyper_2.temp + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY device ASC NULLS LAST + -> Result + Output: hyper_3.device, hyper_3."time", hyper_3.temp + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3."time", hyper_3.device, hyper_3.temp + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) ORDER BY device ASC NULLS LAST +(32 rows) + + +######### LIMIT push down cases + +EXPLAIN (verbose, costs off) +SELECT DISTINCT device, time +FROM hyper +ORDER BY 1,2 +LIMIT 10 + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: hyper.device, hyper."time" + -> Unique + Output: hyper.device, hyper."time" + -> Custom Scan (AsyncAppend) + Output: hyper.device, hyper."time" + -> Merge Append + Sort Key: hyper_1.device, hyper_1."time" + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1.device, hyper_1."time" + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT DISTINCT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY device ASC NULLS LAST, "time" ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2.device, hyper_2."time" + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT DISTINCT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY device ASC NULLS LAST, "time" ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3.device, hyper_3."time" + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT DISTINCT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) ORDER BY device ASC NULLS LAST, "time" ASC NULLS LAST +(23 rows) + + +######### LIMIT push down cases + +EXPLAIN (verbose, costs off) +SELECT DISTINCT ON (device) device, time +FROM hyper +ORDER BY 1,2 +LIMIT 10 + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: hyper.device, hyper."time" + -> Unique + Output: hyper.device, hyper."time" + -> Custom Scan (AsyncAppend) + Output: hyper.device, hyper."time" + -> Merge Append + Sort Key: hyper_1.device, hyper_1."time" + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1.device, hyper_1."time" + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT DISTINCT ON (device) "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY device ASC NULLS LAST, "time" ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2.device, hyper_2."time" + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT DISTINCT ON (device) "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY device ASC NULLS LAST, "time" ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3.device, hyper_3."time" + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT DISTINCT ON (device) "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) ORDER BY device ASC NULLS LAST, "time" ASC NULLS LAST +(23 rows) + + +######### LIMIT push down cases + + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: t."time" + -> Nested Loop + Output: t."time" + Join Filter: (t.device = join_test.device) + -> Custom Scan (AsyncAppend) + Output: t."time", t.device + -> Append + -> Custom Scan (DataNodeScan) on public.hyper t_1 + Output: t_1."time", t_1.device + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) + -> Custom Scan (DataNodeScan) on public.hyper t_2 + Output: t_2."time", t_2.device + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) + -> Custom Scan (DataNodeScan) on public.hyper t_3 + Output: t_3."time", t_3.device + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) + -> Materialize + Output: join_test.device + -> Seq Scan on public.join_test + Output: join_test.device +(27 rows) + + +######### CTEs/Sub-queries + +EXPLAIN (verbose, costs off) +WITH top_n AS ( + SELECT device, avg(temp) + FROM hyper + WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' + GROUP BY 1 + ORDER BY 2 DESC + LIMIT 10 +) +SELECT time_bucket('60s', time) AS "time", device, avg(temp) +FROM hyper INNER JOIN top_n USING (device) +WHERE time BETWEEN '2019-01-01' AND '2019-01-01 15:00' +GROUP BY 1,2 +ORDER BY 1,2 + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + GroupAggregate + Output: (time_bucket('@ 1 min'::interval, hyper."time")), hyper.device, avg(hyper.temp) + Group Key: time_bucket('@ 1 min'::interval, hyper."time"), hyper.device + -> Nested Loop + Output: time_bucket('@ 1 min'::interval, hyper."time"), hyper.device, hyper.temp + Inner Unique: true + Join Filter: (hyper.device = top_n.device) + -> Custom Scan (AsyncAppend) + Output: hyper."time", hyper.device, hyper.temp + -> Merge Append + Sort Key: (time_bucket('@ 1 min'::interval, hyper_1."time")), hyper_1.device + -> Result + Output: hyper_1."time", hyper_1.device, hyper_1.temp, time_bucket('@ 1 min'::interval, hyper_1."time") + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1."time", hyper_1.device, hyper_1.temp + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('00:01:00'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Result + Output: hyper_2."time", hyper_2.device, hyper_2.temp, time_bucket('@ 1 min'::interval, hyper_2."time") + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2."time", hyper_2.device, hyper_2.temp + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('00:01:00'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Result + Output: hyper_3."time", hyper_3.device, hyper_3.temp, time_bucket('@ 1 min'::interval, hyper_3."time") + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3."time", hyper_3.device, hyper_3.temp + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('00:01:00'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Materialize + Output: top_n.device + -> Subquery Scan on top_n + Output: top_n.device + -> Limit + Output: device, (avg(temp)) + -> Sort + Output: device, (avg(temp)) + Sort Key: (avg(temp)) DESC + -> Custom Scan (AsyncAppend) + Output: device, (avg(temp)) + -> Append + -> Custom Scan (DataNodeScan) + Output: hyper_4.device, (avg(hyper_4.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1 ORDER BY device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: hyper_5.device, (avg(hyper_5.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1 ORDER BY device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: hyper_6.device, (avg(hyper_6.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) GROUP BY 1 ORDER BY device ASC NULLS LAST +(62 rows) + + +######### CTEs/Sub-queries + +EXPLAIN (verbose, costs off) +SELECT time_bucket('60s', h1.time) AS "time", h1.device, avg(h1.temp), max(h2.temp) +FROM hyper h1 INNER JOIN hyper1d h2 ON (time_bucket('60', h1.time) = time_bucket('60', h2.time) AND h1.device = h2.device) +WHERE h1.time BETWEEN '2019-01-01' AND '2019-01-01 15:00' AND + h2.time BETWEEN '2019-01-01' AND '2019-01-01 15:00' +GROUP BY 1,2 +ORDER BY 1,2 + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + GroupAggregate + Output: (time_bucket('@ 1 min'::interval, h1."time")), h1.device, avg(h1.temp), max(h2.temp) + Group Key: time_bucket('@ 1 min'::interval, h1."time"), h1.device + -> Nested Loop + Output: time_bucket('@ 1 min'::interval, h1."time"), h1.device, h1.temp, h2.temp + Join Filter: ((h1.device = h2.device) AND ((time_bucket('@ 1 min'::interval, h2."time")) = time_bucket('@ 1 min'::interval, h1."time"))) + -> Result + Output: h2.temp, h2."time", h2.device, time_bucket('@ 1 min'::interval, h2."time") + -> Custom Scan (DataNodeScan) on public.hyper1d h2 + Output: h2.temp, h2."time", h2.device + Data node: db_dist_query_3 + Chunks: _dist_hyper_2_19_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[5]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('00:01:00'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Materialize + Output: h1."time", h1.device, h1.temp + -> Custom Scan (AsyncAppend) + Output: h1."time", h1.device, h1.temp + -> Append + -> Custom Scan (DataNodeScan) on public.hyper h1_1 + Output: h1_1."time", h1_1.device, h1_1.temp + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('00:01:00'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper h1_2 + Output: h1_2."time", h1_2.device, h1_2.temp + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('00:01:00'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper h1_3 + Output: h1_3."time", h1_3.device, h1_3.temp + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('00:01:00'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST +(33 rows) + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%% RUNNING TESTS on table: hyper +%%% PREFIX: EXPLAIN (verbose, costs off) +%%% WHERE_CLAUSE: :REPARTITIONED_TIME_RANGE +%%% ORDER_BY_1: +%%% ORDER_BY_1_2: ORDER BY 1,2 +%%% LIMIT: +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + setseed +--------- + +(1 row) + + +######### Grouping on time only (partial aggregation) + +EXPLAIN (verbose, costs off) +SELECT time, avg(temp) +FROM hyper +WHERE time >= '2019-01-01' +GROUP BY 1 + + + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Finalize GroupAggregate + Output: "time", avg(temp) + Group Key: "time" + -> Sort + Output: "time", (PARTIAL avg(temp)) + Sort Key: "time" + -> Custom Scan (AsyncAppend) + Output: "time", (PARTIAL avg(temp)) + -> Append + -> Custom Scan (DataNodeScan) + Output: hyper."time", (PARTIAL avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT "time", _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1 + -> Custom Scan (DataNodeScan) + Output: hyper_1."time", (PARTIAL avg(hyper_1.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT "time", _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1 + -> Custom Scan (DataNodeScan) + Output: hyper_2."time", (PARTIAL avg(hyper_2.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT "time", _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1 +(27 rows) + + +######### Grouping on time only (partial aggregation) + +EXPLAIN (verbose, costs off) +SELECT time_bucket('2 days', time) AS time, avg(temp) +FROM hyper +WHERE time >= '2019-01-01' +GROUP BY 1 + + + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Finalize GroupAggregate + Output: (time_bucket('@ 2 days'::interval, "time")), avg(temp) + Group Key: (time_bucket('@ 2 days'::interval, "time")) + -> Sort + Output: (time_bucket('@ 2 days'::interval, "time")), (PARTIAL avg(temp)) + Sort Key: (time_bucket('@ 2 days'::interval, "time")) + -> Custom Scan (AsyncAppend) + Output: (time_bucket('@ 2 days'::interval, "time")), (PARTIAL avg(temp)) + -> Append + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper."time")), (PARTIAL avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1 + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper_1."time")), (PARTIAL avg(hyper_1.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1 + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper_2."time")), (PARTIAL avg(hyper_2.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1 +(27 rows) + + +######### Grouping on time and device (full aggregation) + +EXPLAIN (verbose, costs off) +SELECT time, device, avg(temp) +FROM hyper +WHERE time >= '2019-01-01' +GROUP BY 1,2 +ORDER BY 1,2 + + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) + Output: "time", device, (avg(temp)) + -> Merge Append + Sort Key: hyper."time", hyper.device + -> Custom Scan (DataNodeScan) + Output: hyper."time", hyper.device, (avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: hyper_1."time", hyper_1.device, (avg(hyper_1.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: hyper_2."time", hyper_2.device, (avg(hyper_2.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST +(22 rows) + + +######### Grouping on time and device (full aggregation) + +EXPLAIN (verbose, costs off) +SELECT time_bucket('2 days', time) AS time, device, avg(temp) +FROM hyper +WHERE time >= '2019-01-01' +GROUP BY 1,2 +ORDER BY 1,2 + + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Finalize GroupAggregate + Output: (time_bucket('@ 2 days'::interval, "time")), device, avg(temp) + Group Key: (time_bucket('@ 2 days'::interval, "time")), device + -> Custom Scan (AsyncAppend) + Output: (time_bucket('@ 2 days'::interval, "time")), device, (PARTIAL avg(temp)) + -> Merge Append + Sort Key: (time_bucket('@ 2 days'::interval, hyper."time")), hyper.device + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper."time")), hyper.device, (PARTIAL avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), device, _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper_1."time")), hyper_1.device, (PARTIAL avg(hyper_1.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), device, _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper_2."time")), hyper_2.device, (PARTIAL avg(hyper_2.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), device, _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST +(25 rows) + + +######### Grouping on time and device (full aggregation) + +EXPLAIN (verbose, costs off) +SELECT date_trunc('month', time) AS time, device, avg(temp) +FROM hyper +WHERE time >= '2019-01-01' +GROUP BY 1,2 +ORDER BY 1,2 + + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Finalize GroupAggregate + Output: (date_trunc('month'::text, "time")), device, avg(temp) + Group Key: (date_trunc('month'::text, "time")), device + -> Custom Scan (AsyncAppend) + Output: (date_trunc('month'::text, "time")), device, (PARTIAL avg(temp)) + -> Merge Append + Sort Key: (date_trunc('month'::text, hyper."time")), hyper.device + -> Custom Scan (DataNodeScan) + Output: (date_trunc('month'::text, hyper."time")), hyper.device, (PARTIAL avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT date_trunc('month'::text, "time"), device, _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY date_trunc('month'::text, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: (date_trunc('month'::text, hyper_1."time")), hyper_1.device, (PARTIAL avg(hyper_1.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT date_trunc('month'::text, "time"), device, _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY date_trunc('month'::text, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: (date_trunc('month'::text, hyper_2."time")), hyper_2.device, (PARTIAL avg(hyper_2.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT date_trunc('month'::text, "time"), device, _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY date_trunc('month'::text, "time") ASC NULLS LAST, device ASC NULLS LAST +(25 rows) + + +######### Grouping on time and device (full aggregation) + +EXPLAIN (verbose, costs off) +SELECT time_bucket('2 days', time) AS time, device, avg(temp) +FROM hyper +WHERE time >= '2019-01-01' +GROUP BY 1,2 +HAVING device > 4 +ORDER BY 1,2 + + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Finalize GroupAggregate + Output: (time_bucket('@ 2 days'::interval, "time")), device, avg(temp) + Group Key: (time_bucket('@ 2 days'::interval, "time")), device + -> Custom Scan (AsyncAppend) + Output: (time_bucket('@ 2 days'::interval, "time")), device, (PARTIAL avg(temp)) + -> Merge Append + Sort Key: (time_bucket('@ 2 days'::interval, hyper."time")), hyper.device + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper."time")), hyper.device, (PARTIAL avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), device, _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND ((device > 4)) GROUP BY 1, 2 ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper_1."time")), hyper_1.device, (PARTIAL avg(hyper_1.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), device, _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND ((device > 4)) GROUP BY 1, 2 ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper_2."time")), hyper_2.device, (PARTIAL avg(hyper_2.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), device, _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND ((device > 4)) GROUP BY 1, 2 ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST +(25 rows) + +EXPLAIN (verbose, costs off) +SELECT time_bucket('2 days', time) AS time, device, avg(temp) +FROM hyper +WHERE time >= '2019-01-01' +GROUP BY 1,2 +HAVING avg(temp) > 40 AND max(temp) < 70 +ORDER BY 1,2 + + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Finalize GroupAggregate + Output: (time_bucket('@ 2 days'::interval, "time")), device, avg(temp) + Group Key: (time_bucket('@ 2 days'::interval, "time")), device + Filter: ((avg(temp) > '40'::double precision) AND (max(temp) < '70'::double precision)) + -> Custom Scan (AsyncAppend) + Output: (time_bucket('@ 2 days'::interval, "time")), device, (PARTIAL avg(temp)), (PARTIAL max(temp)) + -> Merge Append + Sort Key: (time_bucket('@ 2 days'::interval, hyper."time")), hyper.device + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper."time")), hyper.device, (PARTIAL avg(hyper.temp)), (PARTIAL max(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), device, _timescaledb_functions.partialize_agg(avg(temp)), _timescaledb_functions.partialize_agg(max(temp)) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper_1."time")), hyper_1.device, (PARTIAL avg(hyper_1.temp)), (PARTIAL max(hyper_1.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), device, _timescaledb_functions.partialize_agg(avg(temp)), _timescaledb_functions.partialize_agg(max(temp)) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper_2."time")), hyper_2.device, (PARTIAL avg(hyper_2.temp)), (PARTIAL max(hyper_2.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), device, _timescaledb_functions.partialize_agg(avg(temp)), _timescaledb_functions.partialize_agg(max(temp)) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST +(26 rows) + + +######### Grouping on device only (full aggregation) + +EXPLAIN (verbose, costs off) +SELECT device, avg(temp) +FROM hyper +WHERE time >= '2019-01-01' +GROUP BY 1 + + + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Finalize GroupAggregate + Output: device, avg(temp) + Group Key: device + -> Sort + Output: device, (PARTIAL avg(temp)) + Sort Key: device + -> Custom Scan (AsyncAppend) + Output: device, (PARTIAL avg(temp)) + -> Append + -> Custom Scan (DataNodeScan) + Output: hyper.device, (PARTIAL avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT device, _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1 + -> Custom Scan (DataNodeScan) + Output: hyper_1.device, (PARTIAL avg(hyper_1.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT device, _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1 + -> Custom Scan (DataNodeScan) + Output: hyper_2.device, (PARTIAL avg(hyper_2.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT device, _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1 +(27 rows) + + +######### No push down on some functions + +EXPLAIN (verbose, costs off) +SELECT location, avg(temp) +FROM hyper +WHERE time >= '2019-01-01' AND (temp * random() >= 0) +GROUP BY 1 + + + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Finalize GroupAggregate + Output: location, avg(temp) + Group Key: location + -> Custom Scan (AsyncAppend) + Output: location, (PARTIAL avg(temp)) + -> Merge Append + Sort Key: hyper.location + -> Partial GroupAggregate + Output: hyper.location, PARTIAL avg(hyper.temp) + Group Key: hyper.location + -> Custom Scan (DataNodeScan) on public.hyper + Output: hyper.location, hyper.temp + Filter: ((hyper.temp * random()) >= '0'::double precision) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT location, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) ORDER BY location ASC NULLS LAST + -> Partial GroupAggregate + Output: hyper_1.location, PARTIAL avg(hyper_1.temp) + Group Key: hyper_1.location + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1.location, hyper_1.temp + Filter: ((hyper_1.temp * random()) >= '0'::double precision) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT location, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) ORDER BY location ASC NULLS LAST + -> Partial GroupAggregate + Output: hyper_2.location, PARTIAL avg(hyper_2.temp) + Group Key: hyper_2.location + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2.location, hyper_2.temp + Filter: ((hyper_2.temp * random()) >= '0'::double precision) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT location, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) ORDER BY location ASC NULLS LAST +(34 rows) + + +######### No push down on some functions + +EXPLAIN (verbose, costs off) +SELECT time_bucket('2 days', time) AS time, device, avg(temp), sum(temp * (random() <= 1)::int) as sum +FROM hyper +WHERE time >= '2019-01-01' +GROUP BY 1,2 +ORDER BY 1,2 + + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Finalize GroupAggregate + Output: (time_bucket('@ 2 days'::interval, hyper."time")), hyper.device, avg(hyper.temp), sum((hyper.temp * (((random() <= '1'::double precision))::integer)::double precision)) + Group Key: (time_bucket('@ 2 days'::interval, hyper."time")), hyper.device + -> Merge Append + Sort Key: (time_bucket('@ 2 days'::interval, hyper."time")), hyper.device + -> Partial GroupAggregate + Output: (time_bucket('@ 2 days'::interval, hyper."time")), hyper.device, PARTIAL avg(hyper.temp), PARTIAL sum((hyper.temp * (((random() <= '1'::double precision))::integer)::double precision)) + Group Key: time_bucket('@ 2 days'::interval, hyper."time"), hyper.device + -> Result + Output: time_bucket('@ 2 days'::interval, hyper."time"), hyper.device, hyper.temp + -> Custom Scan (DataNodeScan) on public.hyper + Output: hyper."time", hyper.device, hyper.temp + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Partial GroupAggregate + Output: (time_bucket('@ 2 days'::interval, hyper_1."time")), hyper_1.device, PARTIAL avg(hyper_1.temp), PARTIAL sum((hyper_1.temp * (((random() <= '1'::double precision))::integer)::double precision)) + Group Key: time_bucket('@ 2 days'::interval, hyper_1."time"), hyper_1.device + -> Result + Output: time_bucket('@ 2 days'::interval, hyper_1."time"), hyper_1.device, hyper_1.temp + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1."time", hyper_1.device, hyper_1.temp + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Partial GroupAggregate + Output: (time_bucket('@ 2 days'::interval, hyper_2."time")), hyper_2.device, PARTIAL avg(hyper_2.temp), PARTIAL sum((hyper_2.temp * (((random() <= '1'::double precision))::integer)::double precision)) + Group Key: time_bucket('@ 2 days'::interval, hyper_2."time"), hyper_2.device + -> Result + Output: time_bucket('@ 2 days'::interval, hyper_2."time"), hyper_2.device, hyper_2.temp + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2."time", hyper_2.device, hyper_2.temp + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST +(35 rows) + + +######### No push down on some functions + +EXPLAIN (verbose, costs off) +SELECT time, device, avg(temp) +FROM hyper +WHERE time >= '2019-01-01' +GROUP BY 1,2 +HAVING avg(temp) * custom_sum(device) > 0.8 +ORDER BY 1,2 + + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: hyper."time", hyper.device, avg(hyper.temp) + Group Key: hyper."time", hyper.device + Filter: ((avg(hyper.temp) * (custom_sum(hyper.device))::double precision) > '0.8'::double precision) + -> Custom Scan (AsyncAppend) + Output: hyper."time", hyper.device, hyper.temp + -> Merge Append + Sort Key: hyper_1."time", hyper_1.device + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1."time", hyper_1.device, hyper_1.temp + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2."time", hyper_2.device, hyper_2.temp + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3."time", hyper_3.device, hyper_3.temp + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST +(23 rows) + + +######### No push down on some functions + +EXPLAIN (verbose, costs off) +SELECT time, device, avg(temp), custom_sum(device) +FROM hyper +WHERE time >= '2019-01-01' +GROUP BY 1,2 +ORDER BY 1,2 + + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: hyper."time", hyper.device, avg(hyper.temp), custom_sum(hyper.device) + Group Key: hyper."time", hyper.device + -> Custom Scan (AsyncAppend) + Output: hyper."time", hyper.device, hyper.temp + -> Merge Append + Sort Key: hyper_1."time", hyper_1.device + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1."time", hyper_1.device, hyper_1.temp + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2."time", hyper_2.device, hyper_2.temp + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3."time", hyper_3.device, hyper_3.temp + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST +(22 rows) + + +######### Constification and runtime push down of time-related functions + + tsl_override_current_timestamptz +---------------------------------- + +(1 row) + + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) + Output: "time", device, (avg(temp)) + -> Merge Append + Sort Key: hyper."time", hyper.device + -> Custom Scan (DataNodeScan) + Output: hyper."time", hyper.device, (avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: hyper_1."time", hyper_1.device, (avg(hyper_1.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: hyper_2."time", hyper_2.device, (avg(hyper_2.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST +(22 rows) + + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) + Output: "time", device, (avg(temp)) + -> Merge Append + Sort Key: hyper."time", hyper.device + -> Custom Scan (DataNodeScan) + Output: hyper."time", hyper.device, (avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: hyper_1."time", hyper_1.device, (avg(hyper_1.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: hyper_2."time", hyper_2.device, (avg(hyper_2.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST +(22 rows) + + tsl_override_current_timestamptz +---------------------------------- + +(1 row) + + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) + Output: "time", device, (avg(temp)) + -> Merge Append + Sort Key: hyper."time", hyper.device + -> Custom Scan (DataNodeScan) + Output: hyper."time", hyper.device, (avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: hyper_1."time", hyper_1.device, (avg(hyper_1.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: hyper_2."time", hyper_2.device, (avg(hyper_2.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST +(22 rows) + + +######### LIMIT push down cases + +EXPLAIN (verbose, costs off) +SELECT time, device +FROM hyper +ORDER BY 1,2 +LIMIT 10 + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: hyper."time", hyper.device + -> Custom Scan (AsyncAppend) + Output: hyper."time", hyper.device + -> Merge Append + Sort Key: hyper_1."time", hyper_1.device + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1."time", hyper_1.device + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST LIMIT 10 + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2."time", hyper_2.device + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST LIMIT 10 + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3."time", hyper_3.device + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST LIMIT 10 +(21 rows) + + +######### LIMIT push down cases + +EXPLAIN (verbose, costs off) +SELECT time, device +FROM hyper +ORDER BY 1,2 +LIMIT 5 +OFFSET 5 + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: hyper."time", hyper.device + -> Custom Scan (AsyncAppend) + Output: hyper."time", hyper.device + -> Merge Append + Sort Key: hyper_1."time", hyper_1.device + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1."time", hyper_1.device + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST LIMIT 10 + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2."time", hyper_2.device + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST LIMIT 10 + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3."time", hyper_3.device + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST LIMIT 10 +(21 rows) + + +######### LIMIT push down cases + +EXPLAIN (verbose, costs off) +SELECT time, device +FROM hyper +ORDER BY 1,2 +LIMIT 0 + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: hyper."time", hyper.device + -> Custom Scan (AsyncAppend) + Output: hyper."time", hyper.device + -> Merge Append + Sort Key: hyper_1."time", hyper_1.device + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1."time", hyper_1.device + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST LIMIT 1 + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2."time", hyper_2.device + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST LIMIT 1 + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3."time", hyper_3.device + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST LIMIT 1 +(21 rows) + + +######### LIMIT push down cases + +EXPLAIN (verbose, costs off) +SELECT time, device +FROM hyper +ORDER BY 1,2 +LIMIT extract(year from date '2000-01-01') + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: hyper."time", hyper.device + -> Custom Scan (AsyncAppend) + Output: hyper."time", hyper.device + -> Merge Append + Sort Key: hyper_1."time", hyper_1.device + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1."time", hyper_1.device + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST LIMIT 2000 + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2."time", hyper_2.device + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST LIMIT 2000 + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3."time", hyper_3.device + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST LIMIT 2000 +(21 rows) + + +######### LIMIT push down cases + +EXPLAIN (verbose, costs off) +SELECT time, device +FROM hyper +ORDER BY 1,2 +LIMIT greatest(random(), 10.0) + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Limit + Output: hyper."time", hyper.device + -> Custom Scan (AsyncAppend) + Output: hyper."time", hyper.device + -> Merge Append + Sort Key: hyper_1."time", hyper_1.device + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1."time", hyper_1.device + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2."time", hyper_2.device + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3."time", hyper_3.device + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST +(21 rows) + + +######### LIMIT push down cases + +EXPLAIN (verbose, costs off) +SELECT time, device, avg(temp) OVER (PARTITION BY device) +FROM hyper +ORDER BY 1,2 +LIMIT 10 + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: hyper."time", hyper.device, (avg(hyper.temp) OVER (?)) + -> Sort + Output: hyper."time", hyper.device, (avg(hyper.temp) OVER (?)) + Sort Key: hyper."time", hyper.device + -> WindowAgg + Output: hyper."time", hyper.device, avg(hyper.temp) OVER (?) + -> Custom Scan (AsyncAppend) + Output: hyper.device, hyper."time", hyper.temp + -> Merge Append + Sort Key: hyper_1.device + -> Result + Output: hyper_1.device, hyper_1."time", hyper_1.temp + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1."time", hyper_1.device, hyper_1.temp + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY device ASC NULLS LAST + -> Result + Output: hyper_2.device, hyper_2."time", hyper_2.temp + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2."time", hyper_2.device, hyper_2.temp + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY device ASC NULLS LAST + -> Result + Output: hyper_3.device, hyper_3."time", hyper_3.temp + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3."time", hyper_3.device, hyper_3.temp + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) ORDER BY device ASC NULLS LAST +(32 rows) + + +######### LIMIT push down cases + +EXPLAIN (verbose, costs off) +SELECT DISTINCT device, time +FROM hyper +ORDER BY 1,2 +LIMIT 10 + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: hyper.device, hyper."time" + -> Unique + Output: hyper.device, hyper."time" + -> Custom Scan (AsyncAppend) + Output: hyper.device, hyper."time" + -> Merge Append + Sort Key: hyper_1.device, hyper_1."time" + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1.device, hyper_1."time" + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT DISTINCT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY device ASC NULLS LAST, "time" ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2.device, hyper_2."time" + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT DISTINCT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY device ASC NULLS LAST, "time" ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3.device, hyper_3."time" + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT DISTINCT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) ORDER BY device ASC NULLS LAST, "time" ASC NULLS LAST +(23 rows) + + +######### LIMIT push down cases + +EXPLAIN (verbose, costs off) +SELECT DISTINCT ON (device) device, time +FROM hyper +ORDER BY 1,2 +LIMIT 10 + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: hyper.device, hyper."time" + -> Unique + Output: hyper.device, hyper."time" + -> Custom Scan (AsyncAppend) + Output: hyper.device, hyper."time" + -> Merge Append + Sort Key: hyper_1.device, hyper_1."time" + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1.device, hyper_1."time" + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT DISTINCT ON (device) "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY device ASC NULLS LAST, "time" ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2.device, hyper_2."time" + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT DISTINCT ON (device) "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) ORDER BY device ASC NULLS LAST, "time" ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3.device, hyper_3."time" + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT DISTINCT ON (device) "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) ORDER BY device ASC NULLS LAST, "time" ASC NULLS LAST +(23 rows) + + +######### LIMIT push down cases + + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: t."time" + -> Nested Loop + Output: t."time" + Join Filter: (t.device = join_test.device) + -> Custom Scan (AsyncAppend) + Output: t."time", t.device + -> Append + -> Custom Scan (DataNodeScan) on public.hyper t_1 + Output: t_1."time", t_1.device + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) + -> Custom Scan (DataNodeScan) on public.hyper t_2 + Output: t_2."time", t_2.device + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) + -> Custom Scan (DataNodeScan) on public.hyper t_3 + Output: t_3."time", t_3.device + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT "time", device FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) + -> Materialize + Output: join_test.device + -> Seq Scan on public.join_test + Output: join_test.device +(27 rows) + + +######### CTEs/Sub-queries + +EXPLAIN (verbose, costs off) +WITH top_n AS ( + SELECT device, avg(temp) + FROM hyper + WHERE time >= '2019-01-01' + GROUP BY 1 + ORDER BY 2 DESC + LIMIT 10 +) +SELECT time_bucket('60s', time) AS "time", device, avg(temp) +FROM hyper INNER JOIN top_n USING (device) +WHERE time >= '2019-01-01' +GROUP BY 1,2 +ORDER BY 1,2 + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: (time_bucket('@ 1 min'::interval, hyper."time")), hyper.device, avg(hyper.temp) + Group Key: time_bucket('@ 1 min'::interval, hyper."time"), hyper.device + -> Nested Loop + Output: time_bucket('@ 1 min'::interval, hyper."time"), hyper.device, hyper.temp + Inner Unique: true + Join Filter: (hyper.device = top_n.device) + -> Custom Scan (AsyncAppend) + Output: hyper."time", hyper.device, hyper.temp + -> Merge Append + Sort Key: (time_bucket('@ 1 min'::interval, hyper_1."time")), hyper_1.device + -> Result + Output: hyper_1."time", hyper_1.device, hyper_1.temp, time_bucket('@ 1 min'::interval, hyper_1."time") + -> Custom Scan (DataNodeScan) on public.hyper hyper_1 + Output: hyper_1."time", hyper_1.device, hyper_1.temp + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('00:01:00'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Result + Output: hyper_2."time", hyper_2.device, hyper_2.temp, time_bucket('@ 1 min'::interval, hyper_2."time") + -> Custom Scan (DataNodeScan) on public.hyper hyper_2 + Output: hyper_2."time", hyper_2.device, hyper_2.temp + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('00:01:00'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Result + Output: hyper_3."time", hyper_3.device, hyper_3.temp, time_bucket('@ 1 min'::interval, hyper_3."time") + -> Custom Scan (DataNodeScan) on public.hyper hyper_3 + Output: hyper_3."time", hyper_3.device, hyper_3.temp + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('00:01:00'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Materialize + Output: top_n.device + -> Subquery Scan on top_n + Output: top_n.device + -> Limit + Output: device, (avg(temp)) + -> Sort + Output: device, (avg(temp)) + Sort Key: (avg(temp)) DESC + -> Finalize GroupAggregate + Output: device, avg(temp) + Group Key: device + -> Custom Scan (AsyncAppend) + Output: device, (PARTIAL avg(temp)) + -> Merge Append + Sort Key: hyper_4.device + -> Custom Scan (DataNodeScan) + Output: hyper_4.device, (PARTIAL avg(hyper_4.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT device, _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1 ORDER BY device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: hyper_5.device, (PARTIAL avg(hyper_5.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT device, _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1 ORDER BY device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: hyper_6.device, (PARTIAL avg(hyper_6.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT device, _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1 ORDER BY device ASC NULLS LAST +(66 rows) + + +######### CTEs/Sub-queries + +EXPLAIN (verbose, costs off) +SELECT time_bucket('60s', h1.time) AS "time", h1.device, avg(h1.temp), max(h2.temp) +FROM hyper h1 INNER JOIN hyper1d h2 ON (time_bucket('60', h1.time) = time_bucket('60', h2.time) AND h1.device = h2.device) +WHERE h1.time BETWEEN '2019-01-01' AND '2019-01-01 15:00' AND + h2.time BETWEEN '2019-01-01' AND '2019-01-01 15:00' +GROUP BY 1,2 +ORDER BY 1,2 + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + GroupAggregate + Output: (time_bucket('@ 1 min'::interval, h1."time")), h1.device, avg(h1.temp), max(h2.temp) + Group Key: time_bucket('@ 1 min'::interval, h1."time"), h1.device + -> Nested Loop + Output: time_bucket('@ 1 min'::interval, h1."time"), h1.device, h1.temp, h2.temp + Join Filter: ((h1.device = h2.device) AND ((time_bucket('@ 1 min'::interval, h2."time")) = time_bucket('@ 1 min'::interval, h1."time"))) + -> Result + Output: h2.temp, h2."time", h2.device, time_bucket('@ 1 min'::interval, h2."time") + -> Custom Scan (DataNodeScan) on public.hyper1d h2 + Output: h2.temp, h2."time", h2.device + Data node: db_dist_query_3 + Chunks: _dist_hyper_2_19_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[5]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('00:01:00'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Materialize + Output: h1."time", h1.device, h1.temp + -> Custom Scan (AsyncAppend) + Output: h1."time", h1.device, h1.temp + -> Append + -> Custom Scan (DataNodeScan) on public.hyper h1_1 + Output: h1_1."time", h1_1.device, h1_1.temp + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('00:01:00'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper h1_2 + Output: h1_2."time", h1_2.device, h1_2.temp + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('00:01:00'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper h1_3 + Output: h1_3."time", h1_3.device, h1_3.temp + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('00:01:00'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST +(33 rows) + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%% RUNNING TESTS on table: hyper1d +%%% PREFIX: EXPLAIN (verbose, costs off) +%%% WHERE_CLAUSE: :REPARTITIONED_TIME_RANGE +%%% ORDER_BY_1: ORDER BY 1 +%%% ORDER_BY_1_2: +%%% LIMIT: +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + setseed +--------- + +(1 row) + + +######### Grouping on time only (partial aggregation) + +EXPLAIN (verbose, costs off) +SELECT time, avg(temp) +FROM hyper1d +WHERE time >= '2019-01-01' +GROUP BY 1 +ORDER BY 1 + + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) + Output: "time", (avg(temp)) + -> Merge Append + Sort Key: hyper1d."time" + -> Custom Scan (DataNodeScan) + Output: hyper1d."time", (avg(hyper1d.temp)) + Relations: Aggregate on (public.hyper1d) + Data node: db_dist_query_1 + Chunks: _dist_hyper_2_20_chunk + Remote SQL: SELECT "time", avg(temp) FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[8]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1 ORDER BY "time" ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: hyper1d_1."time", (avg(hyper1d_1.temp)) + Relations: Aggregate on (public.hyper1d) + Data node: db_dist_query_2 + Chunks: _dist_hyper_2_21_chunk + Remote SQL: SELECT "time", avg(temp) FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[8]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1 ORDER BY "time" ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: hyper1d_2."time", (avg(hyper1d_2.temp)) + Relations: Aggregate on (public.hyper1d) + Data node: db_dist_query_3 + Chunks: _dist_hyper_2_19_chunk + Remote SQL: SELECT "time", avg(temp) FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[5]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1 ORDER BY "time" ASC NULLS LAST +(22 rows) + + +######### Grouping on time only (partial aggregation) + +EXPLAIN (verbose, costs off) +SELECT time_bucket('2 days', time) AS time, avg(temp) +FROM hyper1d +WHERE time >= '2019-01-01' +GROUP BY 1 +ORDER BY 1 + + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Finalize GroupAggregate + Output: (time_bucket('@ 2 days'::interval, "time")), avg(temp) + Group Key: (time_bucket('@ 2 days'::interval, "time")) + -> Custom Scan (AsyncAppend) + Output: (time_bucket('@ 2 days'::interval, "time")), (PARTIAL avg(temp)) + -> Merge Append + Sort Key: (time_bucket('@ 2 days'::interval, hyper1d."time")) + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper1d."time")), (PARTIAL avg(hyper1d.temp)) + Relations: Aggregate on (public.hyper1d) + Data node: db_dist_query_1 + Chunks: _dist_hyper_2_20_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[8]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1 ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper1d_1."time")), (PARTIAL avg(hyper1d_1.temp)) + Relations: Aggregate on (public.hyper1d) + Data node: db_dist_query_2 + Chunks: _dist_hyper_2_21_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[8]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1 ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper1d_2."time")), (PARTIAL avg(hyper1d_2.temp)) + Relations: Aggregate on (public.hyper1d) + Data node: db_dist_query_3 + Chunks: _dist_hyper_2_19_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[5]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1 ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST +(25 rows) + + +######### Grouping on time and device (full aggregation) + +EXPLAIN (verbose, costs off) +SELECT time, device, avg(temp) +FROM hyper +WHERE time >= '2019-01-01' +GROUP BY 1,2 + + + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) + Output: "time", device, (avg(temp)) + -> Append + -> Custom Scan (DataNodeScan) + Output: hyper."time", hyper.device, (avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 + -> Custom Scan (DataNodeScan) + Output: hyper_1."time", hyper_1.device, (avg(hyper_1.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 + -> Custom Scan (DataNodeScan) + Output: hyper_2."time", hyper_2.device, (avg(hyper_2.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 +(21 rows) + + +######### Grouping on time and device (full aggregation) + +EXPLAIN (verbose, costs off) +SELECT time_bucket('2 days', time) AS time, device, avg(temp) +FROM hyper1d +WHERE time >= '2019-01-01' +GROUP BY 1,2 + + + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Finalize GroupAggregate + Output: (time_bucket('@ 2 days'::interval, "time")), device, avg(temp) + Group Key: (time_bucket('@ 2 days'::interval, "time")), device + -> Sort + Output: (time_bucket('@ 2 days'::interval, "time")), device, (PARTIAL avg(temp)) + Sort Key: (time_bucket('@ 2 days'::interval, "time")), device + -> Custom Scan (AsyncAppend) + Output: (time_bucket('@ 2 days'::interval, "time")), device, (PARTIAL avg(temp)) + -> Append + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper1d."time")), hyper1d.device, (PARTIAL avg(hyper1d.temp)) + Relations: Aggregate on (public.hyper1d) + Data node: db_dist_query_1 + Chunks: _dist_hyper_2_20_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), device, _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[8]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper1d_1."time")), hyper1d_1.device, (PARTIAL avg(hyper1d_1.temp)) + Relations: Aggregate on (public.hyper1d) + Data node: db_dist_query_2 + Chunks: _dist_hyper_2_21_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), device, _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[8]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper1d_2."time")), hyper1d_2.device, (PARTIAL avg(hyper1d_2.temp)) + Relations: Aggregate on (public.hyper1d) + Data node: db_dist_query_3 + Chunks: _dist_hyper_2_19_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), device, _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[5]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 +(27 rows) + + +######### Grouping on time and device (full aggregation) + +EXPLAIN (verbose, costs off) +SELECT date_trunc('month', time) AS time, device, avg(temp) +FROM hyper1d +WHERE time >= '2019-01-01' +GROUP BY 1,2 + + + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Finalize GroupAggregate + Output: (date_trunc('month'::text, "time")), device, avg(temp) + Group Key: (date_trunc('month'::text, "time")), device + -> Sort + Output: (date_trunc('month'::text, "time")), device, (PARTIAL avg(temp)) + Sort Key: (date_trunc('month'::text, "time")), device + -> Custom Scan (AsyncAppend) + Output: (date_trunc('month'::text, "time")), device, (PARTIAL avg(temp)) + -> Append + -> Custom Scan (DataNodeScan) + Output: (date_trunc('month'::text, hyper1d."time")), hyper1d.device, (PARTIAL avg(hyper1d.temp)) + Relations: Aggregate on (public.hyper1d) + Data node: db_dist_query_1 + Chunks: _dist_hyper_2_20_chunk + Remote SQL: SELECT date_trunc('month'::text, "time"), device, _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[8]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 + -> Custom Scan (DataNodeScan) + Output: (date_trunc('month'::text, hyper1d_1."time")), hyper1d_1.device, (PARTIAL avg(hyper1d_1.temp)) + Relations: Aggregate on (public.hyper1d) + Data node: db_dist_query_2 + Chunks: _dist_hyper_2_21_chunk + Remote SQL: SELECT date_trunc('month'::text, "time"), device, _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[8]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 + -> Custom Scan (DataNodeScan) + Output: (date_trunc('month'::text, hyper1d_2."time")), hyper1d_2.device, (PARTIAL avg(hyper1d_2.temp)) + Relations: Aggregate on (public.hyper1d) + Data node: db_dist_query_3 + Chunks: _dist_hyper_2_19_chunk + Remote SQL: SELECT date_trunc('month'::text, "time"), device, _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[5]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 +(27 rows) + + +######### Grouping on time and device (full aggregation) + +EXPLAIN (verbose, costs off) +SELECT time_bucket('2 days', time) AS time, device, avg(temp) +FROM hyper1d +WHERE time >= '2019-01-01' +GROUP BY 1,2 +HAVING device > 4 + + + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Finalize GroupAggregate + Output: (time_bucket('@ 2 days'::interval, "time")), device, avg(temp) + Group Key: (time_bucket('@ 2 days'::interval, "time")), device + -> Sort + Output: (time_bucket('@ 2 days'::interval, "time")), device, (PARTIAL avg(temp)) + Sort Key: (time_bucket('@ 2 days'::interval, "time")), device + -> Custom Scan (AsyncAppend) + Output: (time_bucket('@ 2 days'::interval, "time")), device, (PARTIAL avg(temp)) + -> Append + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper1d."time")), hyper1d.device, (PARTIAL avg(hyper1d.temp)) + Relations: Aggregate on (public.hyper1d) + Data node: db_dist_query_1 + Chunks: _dist_hyper_2_20_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), device, _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[8]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND ((device > 4)) GROUP BY 1, 2 + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper1d_1."time")), hyper1d_1.device, (PARTIAL avg(hyper1d_1.temp)) + Relations: Aggregate on (public.hyper1d) + Data node: db_dist_query_2 + Chunks: _dist_hyper_2_21_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), device, _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[8]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND ((device > 4)) GROUP BY 1, 2 + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper1d_2."time")), hyper1d_2.device, (PARTIAL avg(hyper1d_2.temp)) + Relations: Aggregate on (public.hyper1d) + Data node: db_dist_query_3 + Chunks: _dist_hyper_2_19_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), device, _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[5]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND ((device > 4)) GROUP BY 1, 2 +(27 rows) + +EXPLAIN (verbose, costs off) +SELECT time_bucket('2 days', time) AS time, device, avg(temp) +FROM hyper1d +WHERE time >= '2019-01-01' +GROUP BY 1,2 +HAVING avg(temp) > 40 AND max(temp) < 70 + + + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Finalize GroupAggregate + Output: (time_bucket('@ 2 days'::interval, "time")), device, avg(temp) + Group Key: (time_bucket('@ 2 days'::interval, "time")), device + Filter: ((avg(temp) > '40'::double precision) AND (max(temp) < '70'::double precision)) + -> Sort + Output: (time_bucket('@ 2 days'::interval, "time")), device, (PARTIAL avg(temp)), (PARTIAL max(temp)) + Sort Key: (time_bucket('@ 2 days'::interval, "time")), device + -> Custom Scan (AsyncAppend) + Output: (time_bucket('@ 2 days'::interval, "time")), device, (PARTIAL avg(temp)), (PARTIAL max(temp)) + -> Append + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper1d."time")), hyper1d.device, (PARTIAL avg(hyper1d.temp)), (PARTIAL max(hyper1d.temp)) + Relations: Aggregate on (public.hyper1d) + Data node: db_dist_query_1 + Chunks: _dist_hyper_2_20_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), device, _timescaledb_functions.partialize_agg(avg(temp)), _timescaledb_functions.partialize_agg(max(temp)) FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[8]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper1d_1."time")), hyper1d_1.device, (PARTIAL avg(hyper1d_1.temp)), (PARTIAL max(hyper1d_1.temp)) + Relations: Aggregate on (public.hyper1d) + Data node: db_dist_query_2 + Chunks: _dist_hyper_2_21_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), device, _timescaledb_functions.partialize_agg(avg(temp)), _timescaledb_functions.partialize_agg(max(temp)) FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[8]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 + -> Custom Scan (DataNodeScan) + Output: (time_bucket('@ 2 days'::interval, hyper1d_2."time")), hyper1d_2.device, (PARTIAL avg(hyper1d_2.temp)), (PARTIAL max(hyper1d_2.temp)) + Relations: Aggregate on (public.hyper1d) + Data node: db_dist_query_3 + Chunks: _dist_hyper_2_19_chunk + Remote SQL: SELECT public.time_bucket('@ 2 days'::interval, "time"), device, _timescaledb_functions.partialize_agg(avg(temp)), _timescaledb_functions.partialize_agg(max(temp)) FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[5]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 +(28 rows) + + +######### Grouping on device only (full aggregation) + +EXPLAIN (verbose, costs off) +SELECT device, avg(temp) +FROM hyper +WHERE time >= '2019-01-01' +GROUP BY 1 +ORDER BY 1 + + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Finalize GroupAggregate + Output: device, avg(temp) + Group Key: device + -> Custom Scan (AsyncAppend) + Output: device, (PARTIAL avg(temp)) + -> Merge Append + Sort Key: hyper.device + -> Custom Scan (DataNodeScan) + Output: hyper.device, (PARTIAL avg(hyper.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_17_chunk + Remote SQL: SELECT device, _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1 ORDER BY device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: hyper_1.device, (PARTIAL avg(hyper_1.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_18_chunk + Remote SQL: SELECT device, _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4, 5, 6, 7]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1 ORDER BY device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: hyper_2.device, (PARTIAL avg(hyper_2.temp)) + Relations: Aggregate on (public.hyper) + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_16_chunk + Remote SQL: SELECT device, _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1, 2, 3, 4]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1 ORDER BY device ASC NULLS LAST +(25 rows) + + +######### No push down on some functions + +EXPLAIN (verbose, costs off) +SELECT location, avg(temp) +FROM hyper1d +WHERE time >= '2019-01-01' AND (temp * random() >= 0) +GROUP BY 1 +ORDER BY 1 + + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Finalize GroupAggregate + Output: location, avg(temp) + Group Key: location + -> Custom Scan (AsyncAppend) + Output: location, (PARTIAL avg(temp)) + -> Merge Append + Sort Key: hyper1d.location + -> Partial GroupAggregate + Output: hyper1d.location, PARTIAL avg(hyper1d.temp) + Group Key: hyper1d.location + -> Custom Scan (DataNodeScan) on public.hyper1d + Output: hyper1d.location, hyper1d.temp + Filter: ((hyper1d.temp * random()) >= '0'::double precision) + Data node: db_dist_query_1 + Chunks: _dist_hyper_2_20_chunk + Remote SQL: SELECT location, temp FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[8]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) ORDER BY location ASC NULLS LAST + -> Partial GroupAggregate + Output: hyper1d_1.location, PARTIAL avg(hyper1d_1.temp) + Group Key: hyper1d_1.location + -> Custom Scan (DataNodeScan) on public.hyper1d hyper1d_1 + Output: hyper1d_1.location, hyper1d_1.temp + Filter: ((hyper1d_1.temp * random()) >= '0'::double precision) + Data node: db_dist_query_2 + Chunks: _dist_hyper_2_21_chunk + Remote SQL: SELECT location, temp FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[8]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) ORDER BY location ASC NULLS LAST + -> Partial GroupAggregate + Output: hyper1d_2.location, PARTIAL avg(hyper1d_2.temp) + Group Key: hyper1d_2.location + -> Custom Scan (DataNodeScan) on public.hyper1d hyper1d_2 + Output: hyper1d_2.location, hyper1d_2.temp + Filter: ((hyper1d_2.temp * random()) >= '0'::double precision) + Data node: db_dist_query_3 + Chunks: _dist_hyper_2_19_chunk + Remote SQL: SELECT location, temp FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[5]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) ORDER BY location ASC NULLS LAST +(34 rows) + + +######### No push down on some functions + +EXPLAIN (verbose, costs off) +SELECT time_bucket('2 days', time) AS time, device, avg(temp), sum(temp * (random() <= 1)::int) as sum +FROM hyper1d +WHERE time >= '2019-01-01' +GROUP BY 1,2 + + + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Finalize GroupAggregate + Output: (time_bucket('@ 2 days'::interval, hyper1d."time")), hyper1d.device, avg(hyper1d.temp), sum((hyper1d.temp * (((random() <= '1'::double precision))::integer)::double precision)) + Group Key: (time_bucket('@ 2 days'::interval, hyper1d."time")), hyper1d.device + -> Merge Append + Sort Key: (time_bucket('@ 2 days'::interval, hyper1d."time")), hyper1d.device + -> Partial GroupAggregate + Output: (time_bucket('@ 2 days'::interval, hyper1d."time")), hyper1d.device, PARTIAL avg(hyper1d.temp), PARTIAL sum((hyper1d.temp * (((random() <= '1'::double precision))::integer)::double precision)) + Group Key: time_bucket('@ 2 days'::interval, hyper1d."time"), hyper1d.device + -> Result + Output: time_bucket('@ 2 days'::interval, hyper1d."time"), hyper1d.device, hyper1d.temp + -> Custom Scan (DataNodeScan) on public.hyper1d + Output: hyper1d."time", hyper1d.device, hyper1d.temp + Data node: db_dist_query_1 + Chunks: _dist_hyper_2_20_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[8]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Partial GroupAggregate + Output: (time_bucket('@ 2 days'::interval, hyper1d_1."time")), hyper1d_1.device, PARTIAL avg(hyper1d_1.temp), PARTIAL sum((hyper1d_1.temp * (((random() <= '1'::double precision))::integer)::double precision)) + Group Key: time_bucket('@ 2 days'::interval, hyper1d_1."time"), hyper1d_1.device + -> Result + Output: time_bucket('@ 2 days'::interval, hyper1d_1."time"), hyper1d_1.device, hyper1d_1.temp + -> Custom Scan (DataNodeScan) on public.hyper1d hyper1d_1 + Output: hyper1d_1."time", hyper1d_1.device, hyper1d_1.temp + Data node: db_dist_query_2 + Chunks: _dist_hyper_2_21_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[8]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Partial GroupAggregate + Output: (time_bucket('@ 2 days'::interval, hyper1d_2."time")), hyper1d_2.device, PARTIAL avg(hyper1d_2.temp), PARTIAL sum((hyper1d_2.temp * (((random() <= '1'::double precision))::integer)::double precision)) + Group Key: time_bucket('@ 2 days'::interval, hyper1d_2."time"), hyper1d_2.device + -> Result + Output: time_bucket('@ 2 days'::interval, hyper1d_2."time"), hyper1d_2.device, hyper1d_2.temp + -> Custom Scan (DataNodeScan) on public.hyper1d hyper1d_2 + Output: hyper1d_2."time", hyper1d_2.device, hyper1d_2.temp + Data node: db_dist_query_3 + Chunks: _dist_hyper_2_19_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[5]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('2 days'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST +(35 rows) + + +######### No push down on some functions + +EXPLAIN (verbose, costs off) +SELECT time, device, avg(temp) +FROM hyper1d +WHERE time >= '2019-01-01' +GROUP BY 1,2 +HAVING avg(temp) * custom_sum(device) > 0.8 + + + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) + Output: "time", device, (avg(temp)) + -> Append + -> GroupAggregate + Output: hyper1d."time", hyper1d.device, avg(hyper1d.temp) + Group Key: hyper1d."time", hyper1d.device + Filter: ((avg(hyper1d.temp) * (custom_sum(hyper1d.device))::double precision) > '0.8'::double precision) + -> Custom Scan (DataNodeScan) on public.hyper1d + Output: hyper1d."time", hyper1d.device, hyper1d.temp + Data node: db_dist_query_1 + Chunks: _dist_hyper_2_20_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[8]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> GroupAggregate + Output: hyper1d_1."time", hyper1d_1.device, avg(hyper1d_1.temp) + Group Key: hyper1d_1."time", hyper1d_1.device + Filter: ((avg(hyper1d_1.temp) * (custom_sum(hyper1d_1.device))::double precision) > '0.8'::double precision) + -> Custom Scan (DataNodeScan) on public.hyper1d hyper1d_1 + Output: hyper1d_1."time", hyper1d_1.device, hyper1d_1.temp + Data node: db_dist_query_2 + Chunks: _dist_hyper_2_21_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[8]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> GroupAggregate + Output: hyper1d_2."time", hyper1d_2.device, avg(hyper1d_2.temp) + Group Key: hyper1d_2."time", hyper1d_2.device + Filter: ((avg(hyper1d_2.temp) * (custom_sum(hyper1d_2.device))::double precision) > '0.8'::double precision) + -> Custom Scan (DataNodeScan) on public.hyper1d hyper1d_2 + Output: hyper1d_2."time", hyper1d_2.device, hyper1d_2.temp + Data node: db_dist_query_3 + Chunks: _dist_hyper_2_19_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[5]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST +(30 rows) + + +######### No push down on some functions + +EXPLAIN (verbose, costs off) +SELECT time, device, avg(temp), custom_sum(device) +FROM hyper1d +WHERE time >= '2019-01-01' +GROUP BY 1,2 + + + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) + Output: "time", device, (avg(temp)), (custom_sum(device)) + -> Append + -> GroupAggregate + Output: hyper1d."time", hyper1d.device, avg(hyper1d.temp), custom_sum(hyper1d.device) + Group Key: hyper1d."time", hyper1d.device + -> Custom Scan (DataNodeScan) on public.hyper1d + Output: hyper1d."time", hyper1d.device, hyper1d.temp + Data node: db_dist_query_1 + Chunks: _dist_hyper_2_20_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[8]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> GroupAggregate + Output: hyper1d_1."time", hyper1d_1.device, avg(hyper1d_1.temp), custom_sum(hyper1d_1.device) + Group Key: hyper1d_1."time", hyper1d_1.device + -> Custom Scan (DataNodeScan) on public.hyper1d hyper1d_1 + Output: hyper1d_1."time", hyper1d_1.device, hyper1d_1.temp + Data node: db_dist_query_2 + Chunks: _dist_hyper_2_21_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[8]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST + -> GroupAggregate + Output: hyper1d_2."time", hyper1d_2.device, avg(hyper1d_2.temp), custom_sum(hyper1d_2.device) + Group Key: hyper1d_2."time", hyper1d_2.device + -> Custom Scan (DataNodeScan) on public.hyper1d hyper1d_2 + Output: hyper1d_2."time", hyper1d_2.device, hyper1d_2.temp + Data node: db_dist_query_3 + Chunks: _dist_hyper_2_19_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[5]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) ORDER BY "time" ASC NULLS LAST, device ASC NULLS LAST +(27 rows) + + +######### Constification and runtime push down of time-related functions + + tsl_override_current_timestamptz +---------------------------------- + +(1 row) + + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) + Output: "time", device, (avg(temp)) + -> Append + -> Custom Scan (DataNodeScan) + Output: hyper1d."time", hyper1d.device, (avg(hyper1d.temp)) + Relations: Aggregate on (public.hyper1d) + Data node: db_dist_query_1 + Chunks: _dist_hyper_2_20_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[8]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 + -> Custom Scan (DataNodeScan) + Output: hyper1d_1."time", hyper1d_1.device, (avg(hyper1d_1.temp)) + Relations: Aggregate on (public.hyper1d) + Data node: db_dist_query_2 + Chunks: _dist_hyper_2_21_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[8]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 + -> Custom Scan (DataNodeScan) + Output: hyper1d_2."time", hyper1d_2.device, (avg(hyper1d_2.temp)) + Relations: Aggregate on (public.hyper1d) + Data node: db_dist_query_3 + Chunks: _dist_hyper_2_19_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[5]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 +(21 rows) + + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) + Output: "time", device, (avg(temp)) + -> Append + -> Custom Scan (DataNodeScan) + Output: hyper1d."time", hyper1d.device, (avg(hyper1d.temp)) + Relations: Aggregate on (public.hyper1d) + Data node: db_dist_query_1 + Chunks: _dist_hyper_2_20_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[8]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 + -> Custom Scan (DataNodeScan) + Output: hyper1d_1."time", hyper1d_1.device, (avg(hyper1d_1.temp)) + Relations: Aggregate on (public.hyper1d) + Data node: db_dist_query_2 + Chunks: _dist_hyper_2_21_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[8]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 + -> Custom Scan (DataNodeScan) + Output: hyper1d_2."time", hyper1d_2.device, (avg(hyper1d_2.temp)) + Relations: Aggregate on (public.hyper1d) + Data node: db_dist_query_3 + Chunks: _dist_hyper_2_19_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[5]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 +(21 rows) + + tsl_override_current_timestamptz +---------------------------------- + +(1 row) + + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) + Output: "time", device, (avg(temp)) + -> Append + -> Custom Scan (DataNodeScan) + Output: hyper1d."time", hyper1d.device, (avg(hyper1d.temp)) + Relations: Aggregate on (public.hyper1d) + Data node: db_dist_query_1 + Chunks: _dist_hyper_2_20_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[8]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 + -> Custom Scan (DataNodeScan) + Output: hyper1d_1."time", hyper1d_1.device, (avg(hyper1d_1.temp)) + Relations: Aggregate on (public.hyper1d) + Data node: db_dist_query_2 + Chunks: _dist_hyper_2_21_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[8]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 + -> Custom Scan (DataNodeScan) + Output: hyper1d_2."time", hyper1d_2.device, (avg(hyper1d_2.temp)) + Relations: Aggregate on (public.hyper1d) + Data node: db_dist_query_3 + Chunks: _dist_hyper_2_19_chunk + Remote SQL: SELECT "time", device, avg(temp) FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[5]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1, 2 +(21 rows) + + +######### LIMIT push down cases + +EXPLAIN (verbose, costs off) +SELECT time, device +FROM hyper1d + +LIMIT 10 + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: hyper1d."time", hyper1d.device + -> Custom Scan (AsyncAppend) + Output: hyper1d."time", hyper1d.device + -> Append + -> Custom Scan (DataNodeScan) on public.hyper1d hyper1d_1 + Output: hyper1d_1."time", hyper1d_1.device + Data node: db_dist_query_1 + Chunks: _dist_hyper_2_20_chunk + Remote SQL: SELECT "time", device FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[8]) LIMIT 10 + -> Custom Scan (DataNodeScan) on public.hyper1d hyper1d_2 + Output: hyper1d_2."time", hyper1d_2.device + Data node: db_dist_query_2 + Chunks: _dist_hyper_2_21_chunk + Remote SQL: SELECT "time", device FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[8]) LIMIT 10 + -> Custom Scan (DataNodeScan) on public.hyper1d hyper1d_3 + Output: hyper1d_3."time", hyper1d_3.device + Data node: db_dist_query_3 + Chunks: _dist_hyper_2_19_chunk + Remote SQL: SELECT "time", device FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[5]) LIMIT 10 +(20 rows) + + +######### LIMIT push down cases + +EXPLAIN (verbose, costs off) +SELECT time, device +FROM hyper1d + +LIMIT 5 +OFFSET 5 + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: hyper1d."time", hyper1d.device + -> Custom Scan (AsyncAppend) + Output: hyper1d."time", hyper1d.device + -> Append + -> Custom Scan (DataNodeScan) on public.hyper1d hyper1d_1 + Output: hyper1d_1."time", hyper1d_1.device + Data node: db_dist_query_1 + Chunks: _dist_hyper_2_20_chunk + Remote SQL: SELECT "time", device FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[8]) LIMIT 10 + -> Custom Scan (DataNodeScan) on public.hyper1d hyper1d_2 + Output: hyper1d_2."time", hyper1d_2.device + Data node: db_dist_query_2 + Chunks: _dist_hyper_2_21_chunk + Remote SQL: SELECT "time", device FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[8]) LIMIT 10 + -> Custom Scan (DataNodeScan) on public.hyper1d hyper1d_3 + Output: hyper1d_3."time", hyper1d_3.device + Data node: db_dist_query_3 + Chunks: _dist_hyper_2_19_chunk + Remote SQL: SELECT "time", device FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[5]) LIMIT 10 +(20 rows) + + +######### LIMIT push down cases + +EXPLAIN (verbose, costs off) +SELECT time, device +FROM hyper1d + +LIMIT 0 + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------ + Limit + Output: hyper1d."time", hyper1d.device + -> Custom Scan (AsyncAppend) + Output: hyper1d."time", hyper1d.device + -> Append + -> Custom Scan (DataNodeScan) on public.hyper1d hyper1d_1 + Output: hyper1d_1."time", hyper1d_1.device + Data node: db_dist_query_1 + Chunks: _dist_hyper_2_20_chunk + Remote SQL: SELECT "time", device FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[8]) LIMIT 1 + -> Custom Scan (DataNodeScan) on public.hyper1d hyper1d_2 + Output: hyper1d_2."time", hyper1d_2.device + Data node: db_dist_query_2 + Chunks: _dist_hyper_2_21_chunk + Remote SQL: SELECT "time", device FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[8]) LIMIT 1 + -> Custom Scan (DataNodeScan) on public.hyper1d hyper1d_3 + Output: hyper1d_3."time", hyper1d_3.device + Data node: db_dist_query_3 + Chunks: _dist_hyper_2_19_chunk + Remote SQL: SELECT "time", device FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[5]) LIMIT 1 +(20 rows) + + +######### LIMIT push down cases + +EXPLAIN (verbose, costs off) +SELECT time, device +FROM hyper1d + +LIMIT extract(year from date '2000-01-01') + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: hyper1d."time", hyper1d.device + -> Custom Scan (AsyncAppend) + Output: hyper1d."time", hyper1d.device + -> Append + -> Custom Scan (DataNodeScan) on public.hyper1d hyper1d_1 + Output: hyper1d_1."time", hyper1d_1.device + Data node: db_dist_query_1 + Chunks: _dist_hyper_2_20_chunk + Remote SQL: SELECT "time", device FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[8]) LIMIT 2000 + -> Custom Scan (DataNodeScan) on public.hyper1d hyper1d_2 + Output: hyper1d_2."time", hyper1d_2.device + Data node: db_dist_query_2 + Chunks: _dist_hyper_2_21_chunk + Remote SQL: SELECT "time", device FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[8]) LIMIT 2000 + -> Custom Scan (DataNodeScan) on public.hyper1d hyper1d_3 + Output: hyper1d_3."time", hyper1d_3.device + Data node: db_dist_query_3 + Chunks: _dist_hyper_2_19_chunk + Remote SQL: SELECT "time", device FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[5]) LIMIT 2000 +(20 rows) + + +######### LIMIT push down cases + +EXPLAIN (verbose, costs off) +SELECT time, device +FROM hyper1d + +LIMIT greatest(random(), 10.0) + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: hyper1d."time", hyper1d.device + -> Custom Scan (AsyncAppend) + Output: hyper1d."time", hyper1d.device + -> Append + -> Custom Scan (DataNodeScan) on public.hyper1d hyper1d_1 + Output: hyper1d_1."time", hyper1d_1.device + Data node: db_dist_query_1 + Chunks: _dist_hyper_2_20_chunk + Remote SQL: SELECT "time", device FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[8]) + -> Custom Scan (DataNodeScan) on public.hyper1d hyper1d_2 + Output: hyper1d_2."time", hyper1d_2.device + Data node: db_dist_query_2 + Chunks: _dist_hyper_2_21_chunk + Remote SQL: SELECT "time", device FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[8]) + -> Custom Scan (DataNodeScan) on public.hyper1d hyper1d_3 + Output: hyper1d_3."time", hyper1d_3.device + Data node: db_dist_query_3 + Chunks: _dist_hyper_2_19_chunk + Remote SQL: SELECT "time", device FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[5]) +(20 rows) + + +######### LIMIT push down cases + +EXPLAIN (verbose, costs off) +SELECT time, device, avg(temp) OVER (PARTITION BY device) +FROM hyper1d + +LIMIT 10 + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: hyper1d."time", hyper1d.device, (avg(hyper1d.temp) OVER (?)) + -> WindowAgg + Output: hyper1d."time", hyper1d.device, avg(hyper1d.temp) OVER (?) + -> Custom Scan (AsyncAppend) + Output: hyper1d.device, hyper1d."time", hyper1d.temp + -> Merge Append + Sort Key: hyper1d_1.device + -> Result + Output: hyper1d_1.device, hyper1d_1."time", hyper1d_1.temp + -> Custom Scan (DataNodeScan) on public.hyper1d hyper1d_1 + Output: hyper1d_1."time", hyper1d_1.device, hyper1d_1.temp + Data node: db_dist_query_1 + Chunks: _dist_hyper_2_20_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[8]) ORDER BY device ASC NULLS LAST + -> Result + Output: hyper1d_2.device, hyper1d_2."time", hyper1d_2.temp + -> Custom Scan (DataNodeScan) on public.hyper1d hyper1d_2 + Output: hyper1d_2."time", hyper1d_2.device, hyper1d_2.temp + Data node: db_dist_query_2 + Chunks: _dist_hyper_2_21_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[8]) ORDER BY device ASC NULLS LAST + -> Result + Output: hyper1d_3.device, hyper1d_3."time", hyper1d_3.temp + -> Custom Scan (DataNodeScan) on public.hyper1d hyper1d_3 + Output: hyper1d_3."time", hyper1d_3.device, hyper1d_3.temp + Data node: db_dist_query_3 + Chunks: _dist_hyper_2_19_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[5]) ORDER BY device ASC NULLS LAST +(29 rows) + + +######### LIMIT push down cases + +EXPLAIN (verbose, costs off) +SELECT DISTINCT device, time +FROM hyper1d + +LIMIT 10 + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: hyper1d.device, hyper1d."time" + -> Unique + Output: hyper1d.device, hyper1d."time" + -> Custom Scan (AsyncAppend) + Output: hyper1d.device, hyper1d."time" + -> Merge Append + Sort Key: hyper1d_1.device, hyper1d_1."time" + -> Custom Scan (DataNodeScan) on public.hyper1d hyper1d_1 + Output: hyper1d_1.device, hyper1d_1."time" + Data node: db_dist_query_1 + Chunks: _dist_hyper_2_20_chunk + Remote SQL: SELECT DISTINCT "time", device FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[8]) ORDER BY device ASC NULLS LAST, "time" ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper1d hyper1d_2 + Output: hyper1d_2.device, hyper1d_2."time" + Data node: db_dist_query_2 + Chunks: _dist_hyper_2_21_chunk + Remote SQL: SELECT DISTINCT "time", device FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[8]) ORDER BY device ASC NULLS LAST, "time" ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper1d hyper1d_3 + Output: hyper1d_3.device, hyper1d_3."time" + Data node: db_dist_query_3 + Chunks: _dist_hyper_2_19_chunk + Remote SQL: SELECT DISTINCT "time", device FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[5]) ORDER BY device ASC NULLS LAST, "time" ASC NULLS LAST +(23 rows) + + +######### LIMIT push down cases + +EXPLAIN (verbose, costs off) +SELECT DISTINCT ON (device) device, time +FROM hyper1d + +LIMIT 10 + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: hyper1d.device, hyper1d."time" + -> Unique + Output: hyper1d.device, hyper1d."time" + -> Custom Scan (AsyncAppend) + Output: hyper1d.device, hyper1d."time" + -> Merge Append + Sort Key: hyper1d_1.device + -> Custom Scan (DataNodeScan) on public.hyper1d hyper1d_1 + Output: hyper1d_1.device, hyper1d_1."time" + Data node: db_dist_query_1 + Chunks: _dist_hyper_2_20_chunk + Remote SQL: SELECT DISTINCT ON (device) "time", device FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[8]) ORDER BY device ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper1d hyper1d_2 + Output: hyper1d_2.device, hyper1d_2."time" + Data node: db_dist_query_2 + Chunks: _dist_hyper_2_21_chunk + Remote SQL: SELECT DISTINCT ON (device) "time", device FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[8]) ORDER BY device ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper1d hyper1d_3 + Output: hyper1d_3.device, hyper1d_3."time" + Data node: db_dist_query_3 + Chunks: _dist_hyper_2_19_chunk + Remote SQL: SELECT DISTINCT ON (device) "time", device FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[5]) ORDER BY device ASC NULLS LAST +(23 rows) + + +######### LIMIT push down cases + + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: t."time" + -> Nested Loop + Output: t."time" + Join Filter: (t.device = join_test.device) + -> Custom Scan (AsyncAppend) + Output: t."time", t.device + -> Append + -> Custom Scan (DataNodeScan) on public.hyper1d t_1 + Output: t_1."time", t_1.device + Data node: db_dist_query_1 + Chunks: _dist_hyper_2_20_chunk + Remote SQL: SELECT "time", device FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[8]) + -> Custom Scan (DataNodeScan) on public.hyper1d t_2 + Output: t_2."time", t_2.device + Data node: db_dist_query_2 + Chunks: _dist_hyper_2_21_chunk + Remote SQL: SELECT "time", device FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[8]) + -> Custom Scan (DataNodeScan) on public.hyper1d t_3 + Output: t_3."time", t_3.device + Data node: db_dist_query_3 + Chunks: _dist_hyper_2_19_chunk + Remote SQL: SELECT "time", device FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[5]) + -> Materialize + Output: join_test.device + -> Seq Scan on public.join_test + Output: join_test.device +(27 rows) + + +######### CTEs/Sub-queries + +EXPLAIN (verbose, costs off) +WITH top_n AS ( + SELECT device, avg(temp) + FROM hyper1d + WHERE time >= '2019-01-01' + GROUP BY 1 + ORDER BY 2 DESC + LIMIT 10 +) +SELECT time_bucket('60s', time) AS "time", device, avg(temp) +FROM hyper1d INNER JOIN top_n USING (device) +WHERE time >= '2019-01-01' +GROUP BY 1,2 +ORDER BY 1,2 + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: (time_bucket('@ 1 min'::interval, hyper1d."time")), hyper1d.device, avg(hyper1d.temp) + Group Key: time_bucket('@ 1 min'::interval, hyper1d."time"), hyper1d.device + -> Nested Loop + Output: time_bucket('@ 1 min'::interval, hyper1d."time"), hyper1d.device, hyper1d.temp + Inner Unique: true + Join Filter: (hyper1d.device = top_n.device) + -> Custom Scan (AsyncAppend) + Output: hyper1d."time", hyper1d.device, hyper1d.temp + -> Merge Append + Sort Key: (time_bucket('@ 1 min'::interval, hyper1d_1."time")), hyper1d_1.device + -> Result + Output: hyper1d_1."time", hyper1d_1.device, hyper1d_1.temp, time_bucket('@ 1 min'::interval, hyper1d_1."time") + -> Custom Scan (DataNodeScan) on public.hyper1d hyper1d_1 + Output: hyper1d_1."time", hyper1d_1.device, hyper1d_1.temp + Data node: db_dist_query_1 + Chunks: _dist_hyper_2_20_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[8]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('00:01:00'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Result + Output: hyper1d_2."time", hyper1d_2.device, hyper1d_2.temp, time_bucket('@ 1 min'::interval, hyper1d_2."time") + -> Custom Scan (DataNodeScan) on public.hyper1d hyper1d_2 + Output: hyper1d_2."time", hyper1d_2.device, hyper1d_2.temp + Data node: db_dist_query_2 + Chunks: _dist_hyper_2_21_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[8]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('00:01:00'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Result + Output: hyper1d_3."time", hyper1d_3.device, hyper1d_3.temp, time_bucket('@ 1 min'::interval, hyper1d_3."time") + -> Custom Scan (DataNodeScan) on public.hyper1d hyper1d_3 + Output: hyper1d_3."time", hyper1d_3.device, hyper1d_3.temp + Data node: db_dist_query_3 + Chunks: _dist_hyper_2_19_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[5]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('00:01:00'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Materialize + Output: top_n.device + -> Subquery Scan on top_n + Output: top_n.device + -> Limit + Output: device, (avg(temp)) + -> Sort + Output: device, (avg(temp)) + Sort Key: (avg(temp)) DESC + -> Finalize GroupAggregate + Output: device, avg(temp) + Group Key: device + -> Custom Scan (AsyncAppend) + Output: device, (PARTIAL avg(temp)) + -> Merge Append + Sort Key: hyper1d_4.device + -> Custom Scan (DataNodeScan) + Output: hyper1d_4.device, (PARTIAL avg(hyper1d_4.temp)) + Relations: Aggregate on (public.hyper1d) + Data node: db_dist_query_1 + Chunks: _dist_hyper_2_20_chunk + Remote SQL: SELECT device, _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[8]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1 ORDER BY device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: hyper1d_5.device, (PARTIAL avg(hyper1d_5.temp)) + Relations: Aggregate on (public.hyper1d) + Data node: db_dist_query_2 + Chunks: _dist_hyper_2_21_chunk + Remote SQL: SELECT device, _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[8]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1 ORDER BY device ASC NULLS LAST + -> Custom Scan (DataNodeScan) + Output: hyper1d_6.device, (PARTIAL avg(hyper1d_6.temp)) + Relations: Aggregate on (public.hyper1d) + Data node: db_dist_query_3 + Chunks: _dist_hyper_2_19_chunk + Remote SQL: SELECT device, _timescaledb_functions.partialize_agg(avg(temp)) FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[5]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) GROUP BY 1 ORDER BY device ASC NULLS LAST +(66 rows) + + +######### CTEs/Sub-queries + +EXPLAIN (verbose, costs off) +SELECT time_bucket('60s', h1.time) AS "time", h1.device, avg(h1.temp), max(h2.temp) +FROM hyper h1 INNER JOIN hyper1d h2 ON (time_bucket('60', h1.time) = time_bucket('60', h2.time) AND h1.device = h2.device) +WHERE h1.time BETWEEN '2019-01-01' AND '2019-01-01 15:00' AND + h2.time BETWEEN '2019-01-01' AND '2019-01-01 15:00' +GROUP BY 1,2 +ORDER BY 1,2 + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + GroupAggregate + Output: (time_bucket('@ 1 min'::interval, h1."time")), h1.device, avg(h1.temp), max(h2.temp) + Group Key: time_bucket('@ 1 min'::interval, h1."time"), h1.device + -> Nested Loop + Output: time_bucket('@ 1 min'::interval, h1."time"), h1.device, h1.temp, h2.temp + Join Filter: ((h1.device = h2.device) AND ((time_bucket('@ 1 min'::interval, h2."time")) = time_bucket('@ 1 min'::interval, h1."time"))) + -> Result + Output: h2.temp, h2."time", h2.device, time_bucket('@ 1 min'::interval, h2."time") + -> Custom Scan (DataNodeScan) on public.hyper1d h2 + Output: h2.temp, h2."time", h2.device + Data node: db_dist_query_3 + Chunks: _dist_hyper_2_19_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper1d WHERE _timescaledb_functions.chunks_in(public.hyper1d.*, ARRAY[5]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('00:01:00'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Materialize + Output: h1."time", h1.device, h1.temp + -> Custom Scan (AsyncAppend) + Output: h1."time", h1.device, h1.temp + -> Append + -> Custom Scan (DataNodeScan) on public.hyper h1_1 + Output: h1_1."time", h1_1.device, h1_1.temp + Data node: db_dist_query_1 + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('00:01:00'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper h1_2 + Output: h1_2."time", h1_2.device, h1_2.temp + Data node: db_dist_query_2 + Chunks: _dist_hyper_1_2_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('00:01:00'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.hyper h1_3 + Output: h1_3."time", h1_3.device, h1_3.temp + Data node: db_dist_query_3 + Chunks: _dist_hyper_1_3_chunk + Remote SQL: SELECT "time", device, temp FROM public.hyper WHERE _timescaledb_functions.chunks_in(public.hyper.*, ARRAY[1]) AND (("time" >= '2019-01-01 00:00:00-08'::timestamp with time zone)) AND (("time" <= '2019-01-01 15:00:00-08'::timestamp with time zone)) ORDER BY public.time_bucket('00:01:00'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST +(33 rows) + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%% RUNNING TESTS on table: reference +%%% PREFIX: +%%% WHERE_CLAUSE: :REPARTITIONED_TIME_RANGE +%%% ORDER_BY_1: ORDER BY 1 +%%% ORDER_BY_1_2: ORDER BY 1,2 +%%% LIMIT: +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%% RUNNING TESTS on table: hyper +%%% PREFIX: +%%% WHERE_CLAUSE: :REPARTITIONED_TIME_RANGE +%%% ORDER_BY_1: ORDER BY 1 +%%% ORDER_BY_1_2: ORDER BY 1,2 +%%% LIMIT: +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%% RUNNING TESTS on table: reference +%%% PREFIX: +%%% WHERE_CLAUSE: :CLEAN_PARTITIONING_TIME_RANGE +%%% ORDER_BY_1: ORDER BY 1 +%%% ORDER_BY_1_2: ORDER BY 1,2 +%%% LIMIT: +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%% RUNNING TESTS on table: hyper +%%% PREFIX: +%%% WHERE_CLAUSE: :CLEAN_PARTITIONING_TIME_RANGE +%%% ORDER_BY_1: ORDER BY 1 +%%% ORDER_BY_1_2: ORDER BY 1,2 +%%% LIMIT: +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%% RUNNING TESTS on table: hyper +%%% PREFIX: +%%% WHERE_CLAUSE: :CLEAN_PARTITIONING_TIME_RANGE +%%% ORDER_BY_1: ORDER BY 1 +%%% ORDER_BY_1_2: ORDER BY 1,2 +%%% LIMIT: +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%% RUNNING TESTS on table: hyper1d +%%% PREFIX: +%%% WHERE_CLAUSE: :REPARTITIONED_TIME_RANGE +%%% ORDER_BY_1: ORDER BY 1 +%%% ORDER_BY_1_2: ORDER BY 1,2 +%%% LIMIT: +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +:DIFF_CMD_UNOPT +:DIFF_CMD_OPT +:DIFF_CMD_REPART +:DIFF_CMD_1DIM +RESET ROLE; +DROP DATABASE :DATA_NODE_1 WITH (FORCE); +DROP DATABASE :DATA_NODE_2 WITH (FORCE); +DROP DATABASE :DATA_NODE_3 WITH (FORCE); diff --git a/tsl/test/expected/dist_ref_table_join-16.out b/tsl/test/expected/dist_ref_table_join-16.out new file mode 100644 index 00000000000..8f0e1ee16ee --- /dev/null +++ b/tsl/test/expected/dist_ref_table_join-16.out @@ -0,0 +1,2230 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER +\set ECHO all +\set DATA_NODE_1 :TEST_DBNAME _1 +\set DATA_NODE_2 :TEST_DBNAME _2 +\set DATA_NODE_3 :TEST_DBNAME _3 +-- Add data nodes +SELECT node_name, database, node_created, database_created, extension_created +FROM ( + SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* + FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) +) a; + node_name | database | node_created | database_created | extension_created +--------------------------+--------------------------+--------------+------------------+------------------- + db_dist_ref_table_join_1 | db_dist_ref_table_join_1 | t | t | t + db_dist_ref_table_join_2 | db_dist_ref_table_join_2 | t | t | t + db_dist_ref_table_join_3 | db_dist_ref_table_join_3 | t | t | t +(3 rows) + +GRANT USAGE ON FOREIGN SERVER :DATA_NODE_1, :DATA_NODE_2, :DATA_NODE_3 TO PUBLIC; +\des + List of foreign servers + Name | Owner | Foreign-data wrapper +--------------------------+--------------------+---------------------- + db_dist_ref_table_join_1 | cluster_super_user | timescaledb_fdw + db_dist_ref_table_join_2 | cluster_super_user | timescaledb_fdw + db_dist_ref_table_join_3 | cluster_super_user | timescaledb_fdw +(3 rows) + +drop table if exists metric; +NOTICE: table "metric" does not exist, skipping +CREATE table metric(ts timestamptz, id int, value float); +SELECT create_distributed_hypertable('metric', 'ts', 'id'); +NOTICE: adding not-null constraint to column "ts" + create_distributed_hypertable +------------------------------- + (1,public,metric,t) +(1 row) + +INSERT into metric values ('2022-02-02 02:02:02+03', 1, 50); +INSERT into metric values ('2020-01-01 01:01:01+03', 1, 60); +INSERT into metric values ('2000-03-03 03:03:03+03', 1, 70); +INSERT into metric values ('2000-04-04 04:04:03+03', 2, 80); +-- Reference table with generic replication +CREATE table metric_name(id int primary key, name text); +INSERT into metric_name values (1, 'cpu1'); +INSERT into metric_name values (2, 'cpu2'); +CALL distributed_exec($$CREATE table metric_name(id int primary key, name text);$$); +CALL distributed_exec($$INSERT into metric_name values (1, 'cpu1');$$); +CALL distributed_exec($$INSERT into metric_name values (2, 'cpu2');$$); +-- The reference table as DHT +CREATE TABLE metric_name_dht(id BIGSERIAL, name text); +SELECT create_distributed_hypertable('metric_name_dht', 'id', chunk_time_interval => 9223372036854775807, replication_factor => 3); + create_distributed_hypertable +------------------------------- + (2,public,metric_name_dht,t) +(1 row) + +INSERT into metric_name_dht (id, name) values (1, 'cpu1'); +INSERT into metric_name_dht (id, name) values (2, 'cpu2'); +-- A local version of the reference table +CREATE table metric_name_local(id int primary key, name text); +INSERT into metric_name_local values (1, 'cpu1'); +INSERT into metric_name_local values (2, 'cpu2'); +CREATE table reference_table2(id int primary key, name text); +SELECT create_distributed_hypertable('reference_table2', 'id', chunk_time_interval => 2147483647, replication_factor => 3); + create_distributed_hypertable +------------------------------- + (3,public,reference_table2,t) +(1 row) + +CREATE table local_table(id int primary key, name text); +SET client_min_messages TO WARNING; +-- Create a table in a different schema +CREATE SCHEMA test1; +GRANT CREATE ON SCHEMA test1 TO :ROLE_DEFAULT_PERM_USER; +GRANT USAGE ON SCHEMA test1 TO :ROLE_DEFAULT_PERM_USER; +CREATE table test1.table_in_schema(id int primary key, name text); +SELECT fdwoptions FROM pg_foreign_data_wrapper WHERE fdwname = 'timescaledb_fdw'; + fdwoptions +------------ + +(1 row) + +ALTER FOREIGN DATA WRAPPER timescaledb_fdw OPTIONS (ADD reference_tables 'metric_name, reference_table2'); +ALTER FOREIGN DATA WRAPPER timescaledb_fdw OPTIONS (SET reference_tables 'metric_name, metric_name_dht'); +SELECT fdwoptions FROM pg_foreign_data_wrapper WHERE fdwname = 'timescaledb_fdw'; + fdwoptions +--------------------------------------------------- + {"reference_tables=metric_name, metric_name_dht"} +(1 row) + +\set ON_ERROR_STOP 0 +-- Try to declare a non existing table as reference table +ALTER FOREIGN DATA WRAPPER timescaledb_fdw OPTIONS (SET reference_tables 'metric_name, reference_table2, non_existing_table'); +ERROR: table "non_existing_table" does not exist +SELECT fdwoptions FROM pg_foreign_data_wrapper WHERE fdwname = 'timescaledb_fdw'; + fdwoptions +--------------------------------------------------- + {"reference_tables=metric_name, metric_name_dht"} +(1 row) + +-- Try to declare a hypertable as reference table +ALTER FOREIGN DATA WRAPPER timescaledb_fdw OPTIONS (SET reference_tables 'metric_name, reference_table2, metric'); +SELECT fdwoptions FROM pg_foreign_data_wrapper WHERE fdwname = 'timescaledb_fdw'; + fdwoptions +------------------------------------------------------------ + {"reference_tables=metric_name, reference_table2, metric"} +(1 row) + +-- Try to add an empty field +ALTER FOREIGN DATA WRAPPER timescaledb_fdw OPTIONS (SET reference_tables 'metric_name, , metric'); +ERROR: parameter "reference_tables" must be a comma-separated list of reference table names +SELECT fdwoptions FROM pg_foreign_data_wrapper WHERE fdwname = 'timescaledb_fdw'; + fdwoptions +------------------------------------------------------------ + {"reference_tables=metric_name, reference_table2, metric"} +(1 row) + +-- Try to declare a view as reference table +CREATE VIEW metric_name_view AS SELECT * FROM metric_name; +ALTER FOREIGN DATA WRAPPER timescaledb_fdw OPTIONS (SET reference_tables 'metric_name, metric_name_view'); +ERROR: relation "metric_name_view" is not an ordinary table. Only ordinary tables can be used as reference tables +SELECT fdwoptions FROM pg_foreign_data_wrapper WHERE fdwname = 'timescaledb_fdw'; + fdwoptions +------------------------------------------------------------ + {"reference_tables=metric_name, reference_table2, metric"} +(1 row) + +-- Try to use a table in a schema +ALTER FOREIGN DATA WRAPPER timescaledb_fdw OPTIONS (SET reference_tables 'test1.table_in_schema'); +SELECT fdwoptions FROM pg_foreign_data_wrapper WHERE fdwname = 'timescaledb_fdw'; + fdwoptions +------------------------------------------ + {reference_tables=test1.table_in_schema} +(1 row) + +-- Try to use a non-existing table in a schema +ALTER FOREIGN DATA WRAPPER timescaledb_fdw OPTIONS (SET reference_tables 'test1.table_in_schema_non_existing'); +ERROR: table "test1.table_in_schema_non_existing" does not exist +SELECT fdwoptions FROM pg_foreign_data_wrapper WHERE fdwname = 'timescaledb_fdw'; + fdwoptions +------------------------------------------ + {reference_tables=test1.table_in_schema} +(1 row) + +\set ON_ERROR_STOP 1 +-- Set empty options +ALTER FOREIGN DATA WRAPPER timescaledb_fdw OPTIONS (SET reference_tables ''); +SELECT fdwoptions FROM pg_foreign_data_wrapper WHERE fdwname = 'timescaledb_fdw'; + fdwoptions +--------------------- + {reference_tables=} +(1 row) + +-- Remove options +ALTER FOREIGN DATA WRAPPER timescaledb_fdw OPTIONS (DROP reference_tables); +SELECT fdwoptions FROM pg_foreign_data_wrapper WHERE fdwname = 'timescaledb_fdw'; + fdwoptions +------------ + +(1 row) + +-- Set options again +ALTER FOREIGN DATA WRAPPER timescaledb_fdw OPTIONS (ADD reference_tables 'metric_name, metric_name_dht, reference_table2'); +SELECT fdwoptions FROM pg_foreign_data_wrapper WHERE fdwname = 'timescaledb_fdw'; + fdwoptions +--------------------------------------------------------------------- + {"reference_tables=metric_name, metric_name_dht, reference_table2"} +(1 row) + +SET client_min_messages TO DEBUG1; +\set PREFIX 'EXPLAIN (analyze, verbose, costs off, timing off, summary off)' +-- Analyze tables +ANALYZE metric; +LOG: statement: ANALYZE metric; +ANALYZE metric_name; +LOG: statement: ANALYZE metric_name; +ANALYZE metric_name_dht; +LOG: statement: ANALYZE metric_name_dht; +-- Our cost model for these kinds of plans is not so good yet, so make some +-- tweaks to always get the join pushdown. +set timescaledb.enable_parameterized_data_node_scan to false; +LOG: statement: set timescaledb.enable_parameterized_data_node_scan to false; +ALTER FOREIGN DATA WRAPPER timescaledb_fdw OPTIONS (ADD fdw_tuple_cost '0.08'); +LOG: statement: ALTER FOREIGN DATA WRAPPER timescaledb_fdw OPTIONS (ADD fdw_tuple_cost '0.08'); +ALTER FOREIGN DATA WRAPPER timescaledb_fdw OPTIONS (ADD fdw_startup_cost '100.0'); +LOG: statement: ALTER FOREIGN DATA WRAPPER timescaledb_fdw OPTIONS (ADD fdw_startup_cost '100.0'); +------- +-- Tests based on results +------- +-- Simple join +SELECT * FROM metric LEFT JOIN metric_name USING (id); +LOG: statement: SELECT * FROM metric LEFT JOIN metric_name USING (id); +DEBUG: try to push down a join on a reference table + id | ts | value | name +----+------------------------------+-------+------ + 1 | Tue Feb 01 15:02:02 2022 PST | 50 | cpu1 + 1 | Tue Dec 31 14:01:01 2019 PST | 60 | cpu1 + 1 | Thu Mar 02 16:03:03 2000 PST | 70 | cpu1 + 2 | Mon Apr 03 18:04:03 2000 PDT | 80 | cpu2 +(4 rows) + +-- Filter +SELECT * FROM metric LEFT JOIN metric_name USING (id) WHERE name LIKE 'cpu%' AND ts BETWEEN '2022-02-02 02:02:02+03' AND '2022-02-02 02:12:02+03'; +LOG: statement: SELECT * FROM metric LEFT JOIN metric_name USING (id) WHERE name LIKE 'cpu%' AND ts BETWEEN '2022-02-02 02:02:02+03' AND '2022-02-02 02:12:02+03'; +DEBUG: try to push down a join on a reference table + id | ts | value | name +----+------------------------------+-------+------ + 1 | Tue Feb 01 15:02:02 2022 PST | 50 | cpu1 +(1 row) + +SELECT * FROM metric LEFT JOIN metric_name USING (id) WHERE name = 'cpu1'; +LOG: statement: SELECT * FROM metric LEFT JOIN metric_name USING (id) WHERE name = 'cpu1'; +DEBUG: try to push down a join on a reference table + id | ts | value | name +----+------------------------------+-------+------ + 1 | Tue Feb 01 15:02:02 2022 PST | 50 | cpu1 + 1 | Tue Dec 31 14:01:01 2019 PST | 60 | cpu1 + 1 | Thu Mar 02 16:03:03 2000 PST | 70 | cpu1 +(3 rows) + +SELECT * FROM metric LEFT JOIN metric_name USING (id) WHERE name LIKE 'cpu1' AND name LIKE 'cpu2'; +LOG: statement: SELECT * FROM metric LEFT JOIN metric_name USING (id) WHERE name LIKE 'cpu1' AND name LIKE 'cpu2'; +DEBUG: try to push down a join on a reference table + id | ts | value | name +----+----+-------+------ +(0 rows) + +-- Ordering +SELECT * FROM metric LEFT JOIN metric_name USING (id) order by metric_name.name ASC; +LOG: statement: SELECT * FROM metric LEFT JOIN metric_name USING (id) order by metric_name.name ASC; +DEBUG: try to push down a join on a reference table + id | ts | value | name +----+------------------------------+-------+------ + 1 | Tue Feb 01 15:02:02 2022 PST | 50 | cpu1 + 1 | Tue Dec 31 14:01:01 2019 PST | 60 | cpu1 + 1 | Thu Mar 02 16:03:03 2000 PST | 70 | cpu1 + 2 | Mon Apr 03 18:04:03 2000 PDT | 80 | cpu2 +(4 rows) + +SELECT * FROM metric LEFT JOIN metric_name USING (id) order by metric_name.name DESC; +LOG: statement: SELECT * FROM metric LEFT JOIN metric_name USING (id) order by metric_name.name DESC; +DEBUG: try to push down a join on a reference table + id | ts | value | name +----+------------------------------+-------+------ + 2 | Mon Apr 03 18:04:03 2000 PDT | 80 | cpu2 + 1 | Tue Feb 01 15:02:02 2022 PST | 50 | cpu1 + 1 | Tue Dec 31 14:01:01 2019 PST | 60 | cpu1 + 1 | Thu Mar 02 16:03:03 2000 PST | 70 | cpu1 +(4 rows) + +-- Aggregations +SELECT SUM(metric.value) FROM metric LEFT JOIN metric_name USING (id) WHERE name = 'cpu1'; +LOG: statement: SELECT SUM(metric.value) FROM metric LEFT JOIN metric_name USING (id) WHERE name = 'cpu1'; +DEBUG: try to push down a join on a reference table + sum +----- + 180 +(1 row) + +SELECT MAX(metric.value), MIN(metric.value) FROM metric LEFT JOIN metric_name USING (id) WHERE name = 'cpu1'; +LOG: statement: SELECT MAX(metric.value), MIN(metric.value) FROM metric LEFT JOIN metric_name USING (id) WHERE name = 'cpu1'; +DEBUG: try to push down a join on a reference table + max | min +-----+----- + 70 | 50 +(1 row) + +SELECT COUNT(*) FROM metric LEFT JOIN metric_name USING (id) WHERE name = 'cpu1'; +LOG: statement: SELECT COUNT(*) FROM metric LEFT JOIN metric_name USING (id) WHERE name = 'cpu1'; +DEBUG: try to push down a join on a reference table + count +------- + 3 +(1 row) + +-- Aggregations and Renaming +SELECT SUM(m1.value) FROM metric m1 LEFT JOIN metric_name m2 USING (id) WHERE name = 'cpu1'; +LOG: statement: SELECT SUM(m1.value) FROM metric m1 LEFT JOIN metric_name m2 USING (id) WHERE name = 'cpu1'; +DEBUG: try to push down a join on a reference table + sum +----- + 180 +(1 row) + +SELECT MAX(m1.value), MIN(m1.value) FROM metric AS m1 LEFT JOIN metric_name AS m2 USING (id) WHERE name = 'cpu1'; +LOG: statement: SELECT MAX(m1.value), MIN(m1.value) FROM metric AS m1 LEFT JOIN metric_name AS m2 USING (id) WHERE name = 'cpu1'; +DEBUG: try to push down a join on a reference table + max | min +-----+----- + 70 | 50 +(1 row) + +SELECT COUNT(*) FROM metric AS ma LEFT JOIN metric_name as m2 USING (id) WHERE name = 'cpu1'; +LOG: statement: SELECT COUNT(*) FROM metric AS ma LEFT JOIN metric_name as m2 USING (id) WHERE name = 'cpu1'; +DEBUG: try to push down a join on a reference table + count +------- + 3 +(1 row) + +-- Grouping +SELECT name, max(value), min(value) FROM metric LEFT JOIN metric_name USING (id) WHERE name LIKE 'cpu%' GROUP BY name; +LOG: statement: SELECT name, max(value), min(value) FROM metric LEFT JOIN metric_name USING (id) WHERE name LIKE 'cpu%' GROUP BY name; +DEBUG: try to push down a join on a reference table + name | max | min +------+-----+----- + cpu1 | 70 | 50 + cpu2 | 80 | 80 +(2 rows) + +SELECT name, max(value), min(value) FROM metric LEFT JOIN metric_name USING (id) WHERE name LIKE 'cpu%' GROUP BY name ORDER BY name DESC; +LOG: statement: SELECT name, max(value), min(value) FROM metric LEFT JOIN metric_name USING (id) WHERE name LIKE 'cpu%' GROUP BY name ORDER BY name DESC; +DEBUG: try to push down a join on a reference table + name | max | min +------+-----+----- + cpu2 | 80 | 80 + cpu1 | 70 | 50 +(2 rows) + +SELECT name, max(value), min(value) FROM metric LEFT JOIN metric_name USING (id) WHERE name LIKE 'cpu%' GROUP BY name HAVING min(value) > 60 ORDER BY name DESC; +LOG: statement: SELECT name, max(value), min(value) FROM metric LEFT JOIN metric_name USING (id) WHERE name LIKE 'cpu%' GROUP BY name HAVING min(value) > 60 ORDER BY name DESC; +DEBUG: try to push down a join on a reference table + name | max | min +------+-----+----- + cpu2 | 80 | 80 +(1 row) + +------- +-- Tests based on query plans +------- +-- Tests without filter (vanilla PostgreSQL reftable) +:PREFIX +SELECT * FROM metric LEFT JOIN metric_name USING (id); +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT * FROM metric LEFT JOIN metric_name USING (id); +DEBUG: try to push down a join on a reference table + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Custom Scan (AsyncAppend) (actual rows=4 loops=1) + Output: metric.id, metric.ts, metric.value, metric_name.name + -> Append (actual rows=4 loops=1) + -> Custom Scan (DataNodeScan) (actual rows=3 loops=1) + Output: metric_1.id, metric_1.ts, metric_1.value, metric_name.name + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT r8.id, r8.ts, r8.value, r2.name FROM (public.metric r8 LEFT JOIN public.metric_name r2 ON (((r8.id = r2.id)))) WHERE _timescaledb_functions.chunks_in(r8, ARRAY[1, 2, 3]) + -> Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Output: metric_2.id, metric_2.ts, metric_2.value, metric_name.name + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT r9.id, r9.ts, r9.value, r2.name FROM (public.metric r9 LEFT JOIN public.metric_name r2 ON (((r9.id = r2.id)))) WHERE _timescaledb_functions.chunks_in(r9, ARRAY[1]) +(15 rows) + +:PREFIX +SELECT * FROM metric LEFT JOIN metric_name ON metric.id = metric_name.id; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT * FROM metric LEFT JOIN metric_name ON metric.id = metric_name.id; +DEBUG: try to push down a join on a reference table + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) (actual rows=4 loops=1) + Output: metric.ts, metric.id, metric.value, metric_name.id, metric_name.name + -> Append (actual rows=4 loops=1) + -> Custom Scan (DataNodeScan) (actual rows=3 loops=1) + Output: metric_1.ts, metric_1.id, metric_1.value, metric_name.id, metric_name.name + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT r8.ts, r8.id, r8.value, r2.id, r2.name FROM (public.metric r8 LEFT JOIN public.metric_name r2 ON (((r8.id = r2.id)))) WHERE _timescaledb_functions.chunks_in(r8, ARRAY[1, 2, 3]) + -> Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Output: metric_2.ts, metric_2.id, metric_2.value, metric_name.id, metric_name.name + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT r9.ts, r9.id, r9.value, r2.id, r2.name FROM (public.metric r9 LEFT JOIN public.metric_name r2 ON (((r9.id = r2.id)))) WHERE _timescaledb_functions.chunks_in(r9, ARRAY[1]) +(15 rows) + +-- Tests without filter (DHT reftable) +:PREFIX +SELECT * FROM metric LEFT JOIN metric_name_dht USING (id); +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT * FROM metric LEFT JOIN metric_name_dht USING (id); +DEBUG: try to push down a join on a reference table + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=4 loops=1) + Output: metric.id, metric.ts, metric.value, metric_name_dht.name + -> Custom Scan (AsyncAppend) (actual rows=4 loops=1) + Output: metric.id, metric.ts, metric.value, metric_name_dht.name + -> Append (actual rows=4 loops=1) + -> Custom Scan (DataNodeScan) (actual rows=3 loops=1) + Output: metric_1.id, metric_1.ts, metric_1.value, metric_name_dht.name + Data node: db_dist_ref_table_join_1 + Fetcher Type: Cursor + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT r8.id, r8.ts, r8.value, r2.name FROM (public.metric r8 LEFT JOIN public.metric_name_dht r2 ON (((r8.id = r2.id)))) WHERE _timescaledb_functions.chunks_in(r8, ARRAY[1, 2, 3]) + -> Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Output: metric_2.id, metric_2.ts, metric_2.value, metric_name_dht.name + Data node: db_dist_ref_table_join_2 + Fetcher Type: Cursor + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT r9.id, r9.ts, r9.value, r2.name FROM (public.metric r9 LEFT JOIN public.metric_name_dht r2 ON (((r9.id = r2.id)))) WHERE _timescaledb_functions.chunks_in(r9, ARRAY[1]) +(17 rows) + +-- Tests with filter pushdown +:PREFIX +SELECT * FROM metric LEFT JOIN metric_name USING (id) WHERE value > 10; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT * FROM metric LEFT JOIN metric_name USING (id) WHERE value > 10; +DEBUG: try to push down a join on a reference table + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) (actual rows=4 loops=1) + Output: metric.id, metric.ts, metric.value, metric_name.name + -> Append (actual rows=4 loops=1) + -> Custom Scan (DataNodeScan) (actual rows=3 loops=1) + Output: metric_1.id, metric_1.ts, metric_1.value, metric_name.name + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT r8.id, r8.ts, r8.value, r2.name FROM (public.metric r8 LEFT JOIN public.metric_name r2 ON (((r8.id = r2.id)))) WHERE _timescaledb_functions.chunks_in(r8, ARRAY[1, 2, 3]) AND ((r8.value > 10::double precision)) + -> Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Output: metric_2.id, metric_2.ts, metric_2.value, metric_name.name + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT r9.id, r9.ts, r9.value, r2.name FROM (public.metric r9 LEFT JOIN public.metric_name r2 ON (((r9.id = r2.id)))) WHERE _timescaledb_functions.chunks_in(r9, ARRAY[1]) AND ((r9.value > 10::double precision)) +(15 rows) + +PREPARE prepared_join_pushdown_value (int) AS + SELECT * FROM metric LEFT JOIN metric_name USING (id) WHERE value > $1; +LOG: statement: PREPARE prepared_join_pushdown_value (int) AS + SELECT * FROM metric LEFT JOIN metric_name USING (id) WHERE value > $1; +:PREFIX +EXECUTE prepared_join_pushdown_value(10); +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +EXECUTE prepared_join_pushdown_value(10); +DEBUG: try to push down a join on a reference table + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) (actual rows=4 loops=1) + Output: metric.id, metric.ts, metric.value, metric_name.name + -> Append (actual rows=4 loops=1) + -> Custom Scan (DataNodeScan) (actual rows=3 loops=1) + Output: metric_1.id, metric_1.ts, metric_1.value, metric_name.name + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT r8.id, r8.ts, r8.value, r2.name FROM (public.metric r8 LEFT JOIN public.metric_name r2 ON (((r8.id = r2.id)))) WHERE _timescaledb_functions.chunks_in(r8, ARRAY[1, 2, 3]) AND ((r8.value > 10::double precision)) + -> Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Output: metric_2.id, metric_2.ts, metric_2.value, metric_name.name + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT r9.id, r9.ts, r9.value, r2.name FROM (public.metric r9 LEFT JOIN public.metric_name r2 ON (((r9.id = r2.id)))) WHERE _timescaledb_functions.chunks_in(r9, ARRAY[1]) AND ((r9.value > 10::double precision)) +(15 rows) + +:PREFIX +SELECT * FROM metric LEFT JOIN metric_name USING (id) WHERE ts > '2022-02-02 02:02:02+03'; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT * FROM metric LEFT JOIN metric_name USING (id) WHERE ts > '2022-02-02 02:02:02+03'; +DEBUG: try to push down a join on a reference table + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (DataNodeScan) (actual rows=0 loops=1) + Output: metric.id, metric.ts, metric.value, metric_name.name + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT r5.id, r5.ts, r5.value, r2.name FROM (public.metric r5 LEFT JOIN public.metric_name r2 ON (((r5.id = r2.id)))) WHERE _timescaledb_functions.chunks_in(r5, ARRAY[1]) AND ((r5.ts > '2022-02-01 15:02:02-08'::timestamp with time zone)) +(6 rows) + +:PREFIX +SELECT * FROM metric LEFT JOIN metric_name USING (id) WHERE ts BETWEEN '2022-02-02 02:02:02+03' AND '2022-02-02 02:12:02+03'; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT * FROM metric LEFT JOIN metric_name USING (id) WHERE ts BETWEEN '2022-02-02 02:02:02+03' AND '2022-02-02 02:12:02+03'; +DEBUG: try to push down a join on a reference table + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Output: metric.id, metric.ts, metric.value, metric_name.name + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT r5.id, r5.ts, r5.value, r2.name FROM (public.metric r5 LEFT JOIN public.metric_name r2 ON (((r5.id = r2.id)))) WHERE _timescaledb_functions.chunks_in(r5, ARRAY[1]) AND ((r5.ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((r5.ts <= '2022-02-01 15:12:02-08'::timestamp with time zone)) +(6 rows) + +:PREFIX +SELECT * FROM metric LEFT JOIN metric_name USING (id) WHERE name LIKE 'cpu%' AND ts BETWEEN '2022-02-02 02:02:02+03' AND '2022-02-02 02:12:02+03'; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT * FROM metric LEFT JOIN metric_name USING (id) WHERE name LIKE 'cpu%' AND ts BETWEEN '2022-02-02 02:02:02+03' AND '2022-02-02 02:12:02+03'; +DEBUG: try to push down a join on a reference table + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Output: metric.id, metric.ts, metric.value, metric_name.name + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT r5.id, r5.ts, r5.value, r2.name FROM (public.metric r5 INNER JOIN public.metric_name r2 ON (((r5.id = r2.id)) AND ((r2.name ~~ 'cpu%'::text)) AND ((r5.ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((r5.ts <= '2022-02-01 15:12:02-08'::timestamp with time zone)))) WHERE _timescaledb_functions.chunks_in(r5, ARRAY[1]) +(6 rows) + +:PREFIX +SELECT * FROM metric LEFT JOIN metric_name USING (id) WHERE name LIKE 'cpu%'; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT * FROM metric LEFT JOIN metric_name USING (id) WHERE name LIKE 'cpu%'; +DEBUG: try to push down a join on a reference table + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) (actual rows=4 loops=1) + Output: metric.id, metric.ts, metric.value, metric_name.name + -> Append (actual rows=4 loops=1) + -> Custom Scan (DataNodeScan) (actual rows=3 loops=1) + Output: metric_1.id, metric_1.ts, metric_1.value, metric_name.name + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT r8.id, r8.ts, r8.value, r2.name FROM (public.metric r8 INNER JOIN public.metric_name r2 ON (((r8.id = r2.id)) AND ((r2.name ~~ 'cpu%'::text)))) WHERE _timescaledb_functions.chunks_in(r8, ARRAY[1, 2, 3]) + -> Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Output: metric_2.id, metric_2.ts, metric_2.value, metric_name.name + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT r9.id, r9.ts, r9.value, r2.name FROM (public.metric r9 INNER JOIN public.metric_name r2 ON (((r9.id = r2.id)) AND ((r2.name ~~ 'cpu%'::text)))) WHERE _timescaledb_functions.chunks_in(r9, ARRAY[1]) +(15 rows) + +:PREFIX +SELECT * FROM metric LEFT JOIN metric_name USING (id) WHERE name = 'cpu2'; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT * FROM metric LEFT JOIN metric_name USING (id) WHERE name = 'cpu2'; +DEBUG: try to push down a join on a reference table + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) (actual rows=1 loops=1) + Output: metric.id, metric.ts, metric.value, metric_name.name + -> Append (actual rows=1 loops=1) + -> Custom Scan (DataNodeScan) (actual rows=0 loops=1) + Output: metric_1.id, metric_1.ts, metric_1.value, metric_name.name + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT r8.id, r8.ts, r8.value, r2.name FROM (public.metric r8 INNER JOIN public.metric_name r2 ON (((r8.id = r2.id)) AND ((r2.name = 'cpu2'::text)))) WHERE _timescaledb_functions.chunks_in(r8, ARRAY[1, 2, 3]) + -> Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Output: metric_2.id, metric_2.ts, metric_2.value, metric_name.name + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT r9.id, r9.ts, r9.value, r2.name FROM (public.metric r9 INNER JOIN public.metric_name r2 ON (((r9.id = r2.id)) AND ((r2.name = 'cpu2'::text)))) WHERE _timescaledb_functions.chunks_in(r9, ARRAY[1]) +(15 rows) + +:PREFIX +SELECT * FROM metric LEFT JOIN metric_name_dht USING (id) WHERE name LIKE 'cpu%'; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT * FROM metric LEFT JOIN metric_name_dht USING (id) WHERE name LIKE 'cpu%'; +DEBUG: try to push down a join on a reference table + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=4 loops=1) + Output: metric.id, metric.ts, metric.value, metric_name_dht.name + -> Custom Scan (AsyncAppend) (actual rows=4 loops=1) + Output: metric.id, metric.ts, metric.value, metric_name_dht.name + -> Append (actual rows=4 loops=1) + -> Custom Scan (DataNodeScan) (actual rows=3 loops=1) + Output: metric_1.id, metric_1.ts, metric_1.value, metric_name_dht.name + Data node: db_dist_ref_table_join_1 + Fetcher Type: Cursor + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT r8.id, r8.ts, r8.value, r2.name FROM (public.metric r8 INNER JOIN public.metric_name_dht r2 ON (((r8.id = r2.id)) AND ((r2.name ~~ 'cpu%'::text)))) WHERE _timescaledb_functions.chunks_in(r8, ARRAY[1, 2, 3]) + -> Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Output: metric_2.id, metric_2.ts, metric_2.value, metric_name_dht.name + Data node: db_dist_ref_table_join_2 + Fetcher Type: Cursor + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT r9.id, r9.ts, r9.value, r2.name FROM (public.metric r9 INNER JOIN public.metric_name_dht r2 ON (((r9.id = r2.id)) AND ((r2.name ~~ 'cpu%'::text)))) WHERE _timescaledb_functions.chunks_in(r9, ARRAY[1]) +(17 rows) + +-- Tests with an expression that evaluates to false +:PREFIX +SELECT * FROM metric LEFT JOIN metric_name USING (id) WHERE name LIKE 'cpu1' AND name LIKE 'cpu2'; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT * FROM metric LEFT JOIN metric_name USING (id) WHERE name LIKE 'cpu1' AND name LIKE 'cpu2'; +DEBUG: try to push down a join on a reference table + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) (actual rows=0 loops=1) + Output: metric.id, metric.ts, metric.value, metric_name.name + -> Append (actual rows=0 loops=1) + -> Custom Scan (DataNodeScan) (actual rows=0 loops=1) + Output: metric_1.id, metric_1.ts, metric_1.value, metric_name.name + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT r8.id, r8.ts, r8.value, r2.name FROM (public.metric r8 INNER JOIN public.metric_name r2 ON (((r8.id = r2.id)) AND ((r2.name ~~ 'cpu1'::text)) AND ((r2.name ~~ 'cpu2'::text)))) WHERE _timescaledb_functions.chunks_in(r8, ARRAY[1, 2, 3]) + -> Custom Scan (DataNodeScan) (actual rows=0 loops=1) + Output: metric_2.id, metric_2.ts, metric_2.value, metric_name.name + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT r9.id, r9.ts, r9.value, r2.name FROM (public.metric r9 INNER JOIN public.metric_name r2 ON (((r9.id = r2.id)) AND ((r2.name ~~ 'cpu1'::text)) AND ((r2.name ~~ 'cpu2'::text)))) WHERE _timescaledb_functions.chunks_in(r9, ARRAY[1]) +(15 rows) + +-- Tests with aliases +:PREFIX +SELECT * FROM metric m1 LEFT JOIN metric_name m2 USING (id); +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT * FROM metric m1 LEFT JOIN metric_name m2 USING (id); +DEBUG: try to push down a join on a reference table + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Custom Scan (AsyncAppend) (actual rows=4 loops=1) + Output: m1.id, m1.ts, m1.value, m2.name + -> Append (actual rows=4 loops=1) + -> Custom Scan (DataNodeScan) (actual rows=3 loops=1) + Output: m1_1.id, m1_1.ts, m1_1.value, m2.name + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT r8.id, r8.ts, r8.value, r2.name FROM (public.metric r8 LEFT JOIN public.metric_name r2 ON (((r8.id = r2.id)))) WHERE _timescaledb_functions.chunks_in(r8, ARRAY[1, 2, 3]) + -> Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Output: m1_2.id, m1_2.ts, m1_2.value, m2.name + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT r9.id, r9.ts, r9.value, r2.name FROM (public.metric r9 LEFT JOIN public.metric_name r2 ON (((r9.id = r2.id)))) WHERE _timescaledb_functions.chunks_in(r9, ARRAY[1]) +(15 rows) + +:PREFIX +SELECT * FROM metric m1 LEFT JOIN metric_name m2 ON m1.id = m2.id; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT * FROM metric m1 LEFT JOIN metric_name m2 ON m1.id = m2.id; +DEBUG: try to push down a join on a reference table + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) (actual rows=4 loops=1) + Output: m1.ts, m1.id, m1.value, m2.id, m2.name + -> Append (actual rows=4 loops=1) + -> Custom Scan (DataNodeScan) (actual rows=3 loops=1) + Output: m1_1.ts, m1_1.id, m1_1.value, m2.id, m2.name + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT r8.ts, r8.id, r8.value, r2.id, r2.name FROM (public.metric r8 LEFT JOIN public.metric_name r2 ON (((r8.id = r2.id)))) WHERE _timescaledb_functions.chunks_in(r8, ARRAY[1, 2, 3]) + -> Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Output: m1_2.ts, m1_2.id, m1_2.value, m2.id, m2.name + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT r9.ts, r9.id, r9.value, r2.id, r2.name FROM (public.metric r9 LEFT JOIN public.metric_name r2 ON (((r9.id = r2.id)))) WHERE _timescaledb_functions.chunks_in(r9, ARRAY[1]) +(15 rows) + +:PREFIX +SELECT * FROM metric m1 LEFT JOIN metric_name m2 ON m1.id = m2.id WHERE m1.value > 10; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT * FROM metric m1 LEFT JOIN metric_name m2 ON m1.id = m2.id WHERE m1.value > 10; +DEBUG: try to push down a join on a reference table + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) (actual rows=4 loops=1) + Output: m1.ts, m1.id, m1.value, m2.id, m2.name + -> Append (actual rows=4 loops=1) + -> Custom Scan (DataNodeScan) (actual rows=3 loops=1) + Output: m1_1.ts, m1_1.id, m1_1.value, m2.id, m2.name + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT r8.ts, r8.id, r8.value, r2.id, r2.name FROM (public.metric r8 LEFT JOIN public.metric_name r2 ON (((r8.id = r2.id)))) WHERE _timescaledb_functions.chunks_in(r8, ARRAY[1, 2, 3]) AND ((r8.value > 10::double precision)) + -> Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Output: m1_2.ts, m1_2.id, m1_2.value, m2.id, m2.name + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT r9.ts, r9.id, r9.value, r2.id, r2.name FROM (public.metric r9 LEFT JOIN public.metric_name r2 ON (((r9.id = r2.id)))) WHERE _timescaledb_functions.chunks_in(r9, ARRAY[1]) AND ((r9.value > 10::double precision)) +(15 rows) + +:PREFIX +SELECT * FROM metric m1 LEFT JOIN metric_name m2 ON m1.id = m2.id WHERE m1.value > 10 AND m2.name LIKE 'cpu%'; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT * FROM metric m1 LEFT JOIN metric_name m2 ON m1.id = m2.id WHERE m1.value > 10 AND m2.name LIKE 'cpu%'; +DEBUG: try to push down a join on a reference table + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) (actual rows=4 loops=1) + Output: m1.ts, m1.id, m1.value, m2.id, m2.name + -> Append (actual rows=4 loops=1) + -> Custom Scan (DataNodeScan) (actual rows=3 loops=1) + Output: m1_1.ts, m1_1.id, m1_1.value, m2.id, m2.name + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT r8.ts, r8.id, r8.value, r2.id, r2.name FROM (public.metric r8 INNER JOIN public.metric_name r2 ON (((r8.id = r2.id)) AND ((r2.name ~~ 'cpu%'::text)) AND ((r8.value > 10::double precision)))) WHERE _timescaledb_functions.chunks_in(r8, ARRAY[1, 2, 3]) + -> Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Output: m1_2.ts, m1_2.id, m1_2.value, m2.id, m2.name + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT r9.ts, r9.id, r9.value, r2.id, r2.name FROM (public.metric r9 INNER JOIN public.metric_name r2 ON (((r9.id = r2.id)) AND ((r2.name ~~ 'cpu%'::text)) AND ((r9.value > 10::double precision)))) WHERE _timescaledb_functions.chunks_in(r9, ARRAY[1]) +(15 rows) + +-- Tests with projections +:PREFIX +SELECT name, value FROM metric LEFT JOIN metric_name USING (id) WHERE name LIKE 'cpu%' AND ts BETWEEN '2022-02-02 02:02:02+03' AND '2022-02-02 02:12:02+03'; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT name, value FROM metric LEFT JOIN metric_name USING (id) WHERE name LIKE 'cpu%' AND ts BETWEEN '2022-02-02 02:02:02+03' AND '2022-02-02 02:12:02+03'; +DEBUG: try to push down a join on a reference table + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + Output: metric_name.name, metric.value + -> Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Output: metric.value, metric_name.name + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT r5.value, r2.name FROM (public.metric r5 INNER JOIN public.metric_name r2 ON (((r5.id = r2.id)) AND ((r2.name ~~ 'cpu%'::text)) AND ((r5.ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((r5.ts <= '2022-02-01 15:12:02-08'::timestamp with time zone)))) WHERE _timescaledb_functions.chunks_in(r5, ARRAY[1]) +(8 rows) + +:PREFIX +SELECT m1.ts, m1.value FROM metric m1 LEFT JOIN metric_name m2 USING (id) WHERE name LIKE 'cpu%' AND ts BETWEEN '2022-02-02 02:02:02+03' AND '2022-02-02 02:12:02+03'; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT m1.ts, m1.value FROM metric m1 LEFT JOIN metric_name m2 USING (id) WHERE name LIKE 'cpu%' AND ts BETWEEN '2022-02-02 02:02:02+03' AND '2022-02-02 02:12:02+03'; +DEBUG: try to push down a join on a reference table + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Output: m1.ts, m1.value + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT r5.ts, r5.value FROM (public.metric r5 INNER JOIN public.metric_name r2 ON (((r5.id = r2.id)) AND ((r2.name ~~ 'cpu%'::text)) AND ((r5.ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((r5.ts <= '2022-02-01 15:12:02-08'::timestamp with time zone)))) WHERE _timescaledb_functions.chunks_in(r5, ARRAY[1]) +(6 rows) + +:PREFIX +SELECT m1.id, m1.id FROM metric m1 LEFT JOIN metric_name m2 USING (id) WHERE name LIKE 'cpu%' AND ts BETWEEN '2022-02-02 02:02:02+03' AND '2022-02-02 02:12:02+03'; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT m1.id, m1.id FROM metric m1 LEFT JOIN metric_name m2 USING (id) WHERE name LIKE 'cpu%' AND ts BETWEEN '2022-02-02 02:02:02+03' AND '2022-02-02 02:12:02+03'; +DEBUG: try to push down a join on a reference table + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + Output: m1.id, m1.id + -> Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Output: m1.id + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT r5.id FROM (public.metric r5 INNER JOIN public.metric_name r2 ON (((r5.id = r2.id)) AND ((r2.name ~~ 'cpu%'::text)) AND ((r5.ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((r5.ts <= '2022-02-01 15:12:02-08'::timestamp with time zone)))) WHERE _timescaledb_functions.chunks_in(r5, ARRAY[1]) +(8 rows) + +:PREFIX +SELECT m1.id, m2.id FROM metric m1 LEFT JOIN metric_name m2 USING (id) WHERE name LIKE 'cpu%' AND ts BETWEEN '2022-02-02 02:02:02+03' AND '2022-02-02 02:12:02+03'; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT m1.id, m2.id FROM metric m1 LEFT JOIN metric_name m2 USING (id) WHERE name LIKE 'cpu%' AND ts BETWEEN '2022-02-02 02:02:02+03' AND '2022-02-02 02:12:02+03'; +DEBUG: try to push down a join on a reference table + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Output: m1.id, m2.id + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT r5.id, r2.id FROM (public.metric r5 INNER JOIN public.metric_name r2 ON (((r5.id = r2.id)) AND ((r2.name ~~ 'cpu%'::text)) AND ((r5.ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((r5.ts <= '2022-02-01 15:12:02-08'::timestamp with time zone)))) WHERE _timescaledb_functions.chunks_in(r5, ARRAY[1]) +(6 rows) + +:PREFIX +SELECT m1.*, m2.* FROM metric m1 LEFT JOIN metric_name m2 USING (id) WHERE name LIKE 'cpu%' AND ts BETWEEN '2022-02-02 02:02:02+03' AND '2022-02-02 02:12:02+03'; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT m1.*, m2.* FROM metric m1 LEFT JOIN metric_name m2 USING (id) WHERE name LIKE 'cpu%' AND ts BETWEEN '2022-02-02 02:02:02+03' AND '2022-02-02 02:12:02+03'; +DEBUG: try to push down a join on a reference table + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Output: m1.ts, m1.id, m1.value, m2.id, m2.name + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT r5.ts, r5.id, r5.value, r2.id, r2.name FROM (public.metric r5 INNER JOIN public.metric_name r2 ON (((r5.id = r2.id)) AND ((r2.name ~~ 'cpu%'::text)) AND ((r5.ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((r5.ts <= '2022-02-01 15:12:02-08'::timestamp with time zone)))) WHERE _timescaledb_functions.chunks_in(r5, ARRAY[1]) +(6 rows) + +:PREFIX +SELECT * FROM metric m1 LEFT JOIN metric_name m2 USING (id) WHERE name LIKE 'cpu%' AND ts BETWEEN '2022-02-02 02:02:02+03' AND '2022-02-02 02:12:02+03'; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT * FROM metric m1 LEFT JOIN metric_name m2 USING (id) WHERE name LIKE 'cpu%' AND ts BETWEEN '2022-02-02 02:02:02+03' AND '2022-02-02 02:12:02+03'; +DEBUG: try to push down a join on a reference table + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Output: m1.id, m1.ts, m1.value, m2.name + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT r5.id, r5.ts, r5.value, r2.name FROM (public.metric r5 INNER JOIN public.metric_name r2 ON (((r5.id = r2.id)) AND ((r2.name ~~ 'cpu%'::text)) AND ((r5.ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((r5.ts <= '2022-02-01 15:12:02-08'::timestamp with time zone)))) WHERE _timescaledb_functions.chunks_in(r5, ARRAY[1]) +(6 rows) + +-- Ordering +:PREFIX +SELECT name, value FROM metric LEFT JOIN metric_name USING (id) ORDER BY name; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT name, value FROM metric LEFT JOIN metric_name USING (id) ORDER BY name; +DEBUG: try to push down a join on a reference table + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort (actual rows=4 loops=1) + Output: metric_name.name, metric.value + Sort Key: metric_name.name + Sort Method: quicksort + -> Result (actual rows=4 loops=1) + Output: metric_name.name, metric.value + -> Custom Scan (AsyncAppend) (actual rows=4 loops=1) + Output: metric.value, metric_name.name + -> Append (actual rows=4 loops=1) + -> Custom Scan (DataNodeScan) (actual rows=3 loops=1) + Output: metric_1.value, metric_name.name + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT r8.value, r2.name FROM (public.metric r8 LEFT JOIN public.metric_name r2 ON (((r8.id = r2.id)))) WHERE _timescaledb_functions.chunks_in(r8, ARRAY[1, 2, 3]) + -> Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Output: metric_2.value, metric_name.name + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT r9.value, r2.name FROM (public.metric r9 LEFT JOIN public.metric_name r2 ON (((r9.id = r2.id)))) WHERE _timescaledb_functions.chunks_in(r9, ARRAY[1]) +(21 rows) + +:PREFIX +SELECT name, value FROM metric LEFT JOIN metric_name USING (id) ORDER BY name ASC; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT name, value FROM metric LEFT JOIN metric_name USING (id) ORDER BY name ASC; +DEBUG: try to push down a join on a reference table + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort (actual rows=4 loops=1) + Output: metric_name.name, metric.value + Sort Key: metric_name.name + Sort Method: quicksort + -> Result (actual rows=4 loops=1) + Output: metric_name.name, metric.value + -> Custom Scan (AsyncAppend) (actual rows=4 loops=1) + Output: metric.value, metric_name.name + -> Append (actual rows=4 loops=1) + -> Custom Scan (DataNodeScan) (actual rows=3 loops=1) + Output: metric_1.value, metric_name.name + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT r8.value, r2.name FROM (public.metric r8 LEFT JOIN public.metric_name r2 ON (((r8.id = r2.id)))) WHERE _timescaledb_functions.chunks_in(r8, ARRAY[1, 2, 3]) + -> Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Output: metric_2.value, metric_name.name + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT r9.value, r2.name FROM (public.metric r9 LEFT JOIN public.metric_name r2 ON (((r9.id = r2.id)))) WHERE _timescaledb_functions.chunks_in(r9, ARRAY[1]) +(21 rows) + +:PREFIX +SELECT name, value FROM metric LEFT JOIN metric_name USING (id) ORDER BY name DESC; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT name, value FROM metric LEFT JOIN metric_name USING (id) ORDER BY name DESC; +DEBUG: try to push down a join on a reference table + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort (actual rows=4 loops=1) + Output: metric_name.name, metric.value + Sort Key: metric_name.name DESC + Sort Method: quicksort + -> Result (actual rows=4 loops=1) + Output: metric_name.name, metric.value + -> Custom Scan (AsyncAppend) (actual rows=4 loops=1) + Output: metric.value, metric_name.name + -> Append (actual rows=4 loops=1) + -> Custom Scan (DataNodeScan) (actual rows=3 loops=1) + Output: metric_1.value, metric_name.name + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT r8.value, r2.name FROM (public.metric r8 LEFT JOIN public.metric_name r2 ON (((r8.id = r2.id)))) WHERE _timescaledb_functions.chunks_in(r8, ARRAY[1, 2, 3]) + -> Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Output: metric_2.value, metric_name.name + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT r9.value, r2.name FROM (public.metric r9 LEFT JOIN public.metric_name r2 ON (((r9.id = r2.id)))) WHERE _timescaledb_functions.chunks_in(r9, ARRAY[1]) +(21 rows) + +:PREFIX +SELECT name, value FROM metric LEFT JOIN metric_name USING (id) ORDER BY name ASC NULLS first; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT name, value FROM metric LEFT JOIN metric_name USING (id) ORDER BY name ASC NULLS first; +DEBUG: try to push down a join on a reference table + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort (actual rows=4 loops=1) + Output: metric_name.name, metric.value + Sort Key: metric_name.name NULLS FIRST + Sort Method: quicksort + -> Result (actual rows=4 loops=1) + Output: metric_name.name, metric.value + -> Custom Scan (AsyncAppend) (actual rows=4 loops=1) + Output: metric.value, metric_name.name + -> Append (actual rows=4 loops=1) + -> Custom Scan (DataNodeScan) (actual rows=3 loops=1) + Output: metric_1.value, metric_name.name + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT r8.value, r2.name FROM (public.metric r8 LEFT JOIN public.metric_name r2 ON (((r8.id = r2.id)))) WHERE _timescaledb_functions.chunks_in(r8, ARRAY[1, 2, 3]) + -> Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Output: metric_2.value, metric_name.name + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT r9.value, r2.name FROM (public.metric r9 LEFT JOIN public.metric_name r2 ON (((r9.id = r2.id)))) WHERE _timescaledb_functions.chunks_in(r9, ARRAY[1]) +(21 rows) + +:PREFIX +SELECT name, value FROM metric LEFT JOIN metric_name USING (id) ORDER BY name ASC NULLS last; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT name, value FROM metric LEFT JOIN metric_name USING (id) ORDER BY name ASC NULLS last; +DEBUG: try to push down a join on a reference table + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort (actual rows=4 loops=1) + Output: metric_name.name, metric.value + Sort Key: metric_name.name + Sort Method: quicksort + -> Result (actual rows=4 loops=1) + Output: metric_name.name, metric.value + -> Custom Scan (AsyncAppend) (actual rows=4 loops=1) + Output: metric.value, metric_name.name + -> Append (actual rows=4 loops=1) + -> Custom Scan (DataNodeScan) (actual rows=3 loops=1) + Output: metric_1.value, metric_name.name + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT r8.value, r2.name FROM (public.metric r8 LEFT JOIN public.metric_name r2 ON (((r8.id = r2.id)))) WHERE _timescaledb_functions.chunks_in(r8, ARRAY[1, 2, 3]) + -> Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Output: metric_2.value, metric_name.name + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT r9.value, r2.name FROM (public.metric r9 LEFT JOIN public.metric_name r2 ON (((r9.id = r2.id)))) WHERE _timescaledb_functions.chunks_in(r9, ARRAY[1]) +(21 rows) + +:PREFIX +SELECT name, value FROM metric LEFT JOIN metric_name USING (id) ORDER BY name DESC NULLS first; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT name, value FROM metric LEFT JOIN metric_name USING (id) ORDER BY name DESC NULLS first; +DEBUG: try to push down a join on a reference table + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort (actual rows=4 loops=1) + Output: metric_name.name, metric.value + Sort Key: metric_name.name DESC + Sort Method: quicksort + -> Result (actual rows=4 loops=1) + Output: metric_name.name, metric.value + -> Custom Scan (AsyncAppend) (actual rows=4 loops=1) + Output: metric.value, metric_name.name + -> Append (actual rows=4 loops=1) + -> Custom Scan (DataNodeScan) (actual rows=3 loops=1) + Output: metric_1.value, metric_name.name + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT r8.value, r2.name FROM (public.metric r8 LEFT JOIN public.metric_name r2 ON (((r8.id = r2.id)))) WHERE _timescaledb_functions.chunks_in(r8, ARRAY[1, 2, 3]) + -> Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Output: metric_2.value, metric_name.name + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT r9.value, r2.name FROM (public.metric r9 LEFT JOIN public.metric_name r2 ON (((r9.id = r2.id)))) WHERE _timescaledb_functions.chunks_in(r9, ARRAY[1]) +(21 rows) + +:PREFIX +SELECT name, value FROM metric LEFT JOIN metric_name USING (id) ORDER BY name DESC NULLS last; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT name, value FROM metric LEFT JOIN metric_name USING (id) ORDER BY name DESC NULLS last; +DEBUG: try to push down a join on a reference table + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort (actual rows=4 loops=1) + Output: metric_name.name, metric.value + Sort Key: metric_name.name DESC NULLS LAST + Sort Method: quicksort + -> Result (actual rows=4 loops=1) + Output: metric_name.name, metric.value + -> Custom Scan (AsyncAppend) (actual rows=4 loops=1) + Output: metric.value, metric_name.name + -> Append (actual rows=4 loops=1) + -> Custom Scan (DataNodeScan) (actual rows=3 loops=1) + Output: metric_1.value, metric_name.name + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT r8.value, r2.name FROM (public.metric r8 LEFT JOIN public.metric_name r2 ON (((r8.id = r2.id)))) WHERE _timescaledb_functions.chunks_in(r8, ARRAY[1, 2, 3]) + -> Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Output: metric_2.value, metric_name.name + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT r9.value, r2.name FROM (public.metric r9 LEFT JOIN public.metric_name r2 ON (((r9.id = r2.id)))) WHERE _timescaledb_functions.chunks_in(r9, ARRAY[1]) +(21 rows) + +:PREFIX +SELECT name, value FROM metric LEFT JOIN metric_name USING (id) ORDER BY name, value DESC; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT name, value FROM metric LEFT JOIN metric_name USING (id) ORDER BY name, value DESC; +DEBUG: try to push down a join on a reference table + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort (actual rows=4 loops=1) + Output: metric_name.name, metric.value + Sort Key: metric_name.name, metric.value DESC + Sort Method: quicksort + -> Result (actual rows=4 loops=1) + Output: metric_name.name, metric.value + -> Custom Scan (AsyncAppend) (actual rows=4 loops=1) + Output: metric.value, metric_name.name + -> Append (actual rows=4 loops=1) + -> Custom Scan (DataNodeScan) (actual rows=3 loops=1) + Output: metric_1.value, metric_name.name + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT r8.value, r2.name FROM (public.metric r8 LEFT JOIN public.metric_name r2 ON (((r8.id = r2.id)))) WHERE _timescaledb_functions.chunks_in(r8, ARRAY[1, 2, 3]) + -> Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Output: metric_2.value, metric_name.name + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT r9.value, r2.name FROM (public.metric r9 LEFT JOIN public.metric_name r2 ON (((r9.id = r2.id)))) WHERE _timescaledb_functions.chunks_in(r9, ARRAY[1]) +(21 rows) + +:PREFIX +SELECT name, value FROM metric LEFT JOIN metric_name USING (id) ORDER BY value, name DESC; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT name, value FROM metric LEFT JOIN metric_name USING (id) ORDER BY value, name DESC; +DEBUG: try to push down a join on a reference table + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort (actual rows=4 loops=1) + Output: metric_name.name, metric.value + Sort Key: metric.value, metric_name.name DESC + Sort Method: quicksort + -> Result (actual rows=4 loops=1) + Output: metric_name.name, metric.value + -> Custom Scan (AsyncAppend) (actual rows=4 loops=1) + Output: metric.value, metric_name.name + -> Append (actual rows=4 loops=1) + -> Custom Scan (DataNodeScan) (actual rows=3 loops=1) + Output: metric_1.value, metric_name.name + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT r8.value, r2.name FROM (public.metric r8 LEFT JOIN public.metric_name r2 ON (((r8.id = r2.id)))) WHERE _timescaledb_functions.chunks_in(r8, ARRAY[1, 2, 3]) + -> Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Output: metric_2.value, metric_name.name + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT r9.value, r2.name FROM (public.metric r9 LEFT JOIN public.metric_name r2 ON (((r9.id = r2.id)))) WHERE _timescaledb_functions.chunks_in(r9, ARRAY[1]) +(21 rows) + +:PREFIX +SELECT name, value FROM metric LEFT JOIN metric_name USING (id) ORDER BY value ASC, name DESC; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT name, value FROM metric LEFT JOIN metric_name USING (id) ORDER BY value ASC, name DESC; +DEBUG: try to push down a join on a reference table + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort (actual rows=4 loops=1) + Output: metric_name.name, metric.value + Sort Key: metric.value, metric_name.name DESC + Sort Method: quicksort + -> Result (actual rows=4 loops=1) + Output: metric_name.name, metric.value + -> Custom Scan (AsyncAppend) (actual rows=4 loops=1) + Output: metric.value, metric_name.name + -> Append (actual rows=4 loops=1) + -> Custom Scan (DataNodeScan) (actual rows=3 loops=1) + Output: metric_1.value, metric_name.name + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT r8.value, r2.name FROM (public.metric r8 LEFT JOIN public.metric_name r2 ON (((r8.id = r2.id)))) WHERE _timescaledb_functions.chunks_in(r8, ARRAY[1, 2, 3]) + -> Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Output: metric_2.value, metric_name.name + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT r9.value, r2.name FROM (public.metric r9 LEFT JOIN public.metric_name r2 ON (((r9.id = r2.id)))) WHERE _timescaledb_functions.chunks_in(r9, ARRAY[1]) +(21 rows) + +:PREFIX +SELECT name, value FROM metric LEFT JOIN metric_name USING (id) ORDER BY value ASC NULLS last, name DESC NULLS first; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT name, value FROM metric LEFT JOIN metric_name USING (id) ORDER BY value ASC NULLS last, name DESC NULLS first; +DEBUG: try to push down a join on a reference table + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort (actual rows=4 loops=1) + Output: metric_name.name, metric.value + Sort Key: metric.value, metric_name.name DESC + Sort Method: quicksort + -> Result (actual rows=4 loops=1) + Output: metric_name.name, metric.value + -> Custom Scan (AsyncAppend) (actual rows=4 loops=1) + Output: metric.value, metric_name.name + -> Append (actual rows=4 loops=1) + -> Custom Scan (DataNodeScan) (actual rows=3 loops=1) + Output: metric_1.value, metric_name.name + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT r8.value, r2.name FROM (public.metric r8 LEFT JOIN public.metric_name r2 ON (((r8.id = r2.id)))) WHERE _timescaledb_functions.chunks_in(r8, ARRAY[1, 2, 3]) + -> Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Output: metric_2.value, metric_name.name + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT r9.value, r2.name FROM (public.metric r9 LEFT JOIN public.metric_name r2 ON (((r9.id = r2.id)))) WHERE _timescaledb_functions.chunks_in(r9, ARRAY[1]) +(21 rows) + +-- Ordering with explicit table qualification +:PREFIX +SELECT name, value FROM metric LEFT JOIN metric_name USING (id) ORDER BY value, name, metric_name.id; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT name, value FROM metric LEFT JOIN metric_name USING (id) ORDER BY value, name, metric_name.id; +DEBUG: try to push down a join on a reference table + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort (actual rows=4 loops=1) + Output: metric_name.name, metric.value, metric_name.id + Sort Key: metric.value, metric_name.name, metric_name.id + Sort Method: quicksort + -> Result (actual rows=4 loops=1) + Output: metric_name.name, metric.value, metric_name.id + -> Custom Scan (AsyncAppend) (actual rows=4 loops=1) + Output: metric.value, metric_name.name, metric_name.id + -> Append (actual rows=4 loops=1) + -> Custom Scan (DataNodeScan) (actual rows=3 loops=1) + Output: metric_1.value, metric_name.name, metric_name.id + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT r8.value, r2.name, r2.id FROM (public.metric r8 LEFT JOIN public.metric_name r2 ON (((r8.id = r2.id)))) WHERE _timescaledb_functions.chunks_in(r8, ARRAY[1, 2, 3]) + -> Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Output: metric_2.value, metric_name.name, metric_name.id + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT r9.value, r2.name, r2.id FROM (public.metric r9 LEFT JOIN public.metric_name r2 ON (((r9.id = r2.id)))) WHERE _timescaledb_functions.chunks_in(r9, ARRAY[1]) +(21 rows) + +:PREFIX +SELECT name, value FROM metric LEFT JOIN metric_name USING (id) ORDER BY value, name, metric_name.id, metric.id; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT name, value FROM metric LEFT JOIN metric_name USING (id) ORDER BY value, name, metric_name.id, metric.id; +DEBUG: try to push down a join on a reference table + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Sort (actual rows=4 loops=1) + Output: metric_name.name, metric.value, metric_name.id, metric.id + Sort Key: metric.value, metric_name.name, metric_name.id, metric.id + Sort Method: quicksort + -> Result (actual rows=4 loops=1) + Output: metric_name.name, metric.value, metric_name.id, metric.id + -> Custom Scan (AsyncAppend) (actual rows=4 loops=1) + Output: metric.value, metric.id, metric_name.name, metric_name.id + -> Append (actual rows=4 loops=1) + -> Custom Scan (DataNodeScan) (actual rows=3 loops=1) + Output: metric_1.value, metric_1.id, metric_name.name, metric_name.id + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT r8.value, r8.id, r2.name, r2.id FROM (public.metric r8 LEFT JOIN public.metric_name r2 ON (((r8.id = r2.id)))) WHERE _timescaledb_functions.chunks_in(r8, ARRAY[1, 2, 3]) + -> Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Output: metric_2.value, metric_2.id, metric_name.name, metric_name.id + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT r9.value, r9.id, r2.name, r2.id FROM (public.metric r9 LEFT JOIN public.metric_name r2 ON (((r9.id = r2.id)))) WHERE _timescaledb_functions.chunks_in(r9, ARRAY[1]) +(21 rows) + +-- Ordering with explicit table qualification and aliases +:PREFIX +SELECT name, value FROM metric m1 LEFT JOIN metric_name m2 USING (id) ORDER BY value, name, m1.id, m2.id; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT name, value FROM metric m1 LEFT JOIN metric_name m2 USING (id) ORDER BY value, name, m1.id, m2.id; +DEBUG: try to push down a join on a reference table + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Sort (actual rows=4 loops=1) + Output: m2.name, m1.value, m1.id, m2.id + Sort Key: m1.value, m2.name, m1.id, m2.id + Sort Method: quicksort + -> Result (actual rows=4 loops=1) + Output: m2.name, m1.value, m1.id, m2.id + -> Custom Scan (AsyncAppend) (actual rows=4 loops=1) + Output: m1.value, m1.id, m2.name, m2.id + -> Append (actual rows=4 loops=1) + -> Custom Scan (DataNodeScan) (actual rows=3 loops=1) + Output: m1_1.value, m1_1.id, m2.name, m2.id + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT r8.value, r8.id, r2.name, r2.id FROM (public.metric r8 LEFT JOIN public.metric_name r2 ON (((r8.id = r2.id)))) WHERE _timescaledb_functions.chunks_in(r8, ARRAY[1, 2, 3]) + -> Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Output: m1_2.value, m1_2.id, m2.name, m2.id + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT r9.value, r9.id, r2.name, r2.id FROM (public.metric r9 LEFT JOIN public.metric_name r2 ON (((r9.id = r2.id)))) WHERE _timescaledb_functions.chunks_in(r9, ARRAY[1]) +(21 rows) + +-- Grouping +:PREFIX +SELECT name FROM metric LEFT JOIN metric_name USING (id) WHERE name LIKE 'cpu%' GROUP BY name; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT name FROM metric LEFT JOIN metric_name USING (id) WHERE name LIKE 'cpu%' GROUP BY name; +DEBUG: try to push down a join on a reference table + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Group (actual rows=2 loops=1) + Output: metric_name.name + Group Key: metric_name.name + -> Custom Scan (AsyncAppend) (actual rows=4 loops=1) + Output: metric_name.name + -> Merge Append (actual rows=4 loops=1) + Sort Key: metric_name.name + -> Custom Scan (DataNodeScan) (actual rows=3 loops=1) + Output: metric_name.name + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT r2.name FROM (public.metric r8 INNER JOIN public.metric_name r2 ON (((r8.id = r2.id)) AND ((r2.name ~~ 'cpu%'::text)))) WHERE _timescaledb_functions.chunks_in(r8, ARRAY[1, 2, 3]) ORDER BY r2.name ASC NULLS LAST + -> Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Output: metric_name.name + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT r2.name FROM (public.metric r9 INNER JOIN public.metric_name r2 ON (((r9.id = r2.id)) AND ((r2.name ~~ 'cpu%'::text)))) WHERE _timescaledb_functions.chunks_in(r9, ARRAY[1]) ORDER BY r2.name ASC NULLS LAST +(19 rows) + +:PREFIX +SELECT name, max(value), min(value) FROM metric LEFT JOIN metric_name USING (id) WHERE name LIKE 'cpu%' GROUP BY name; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT name, max(value), min(value) FROM metric LEFT JOIN metric_name USING (id) WHERE name LIKE 'cpu%' GROUP BY name; +DEBUG: try to push down a join on a reference table + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate (actual rows=2 loops=1) + Output: metric_name.name, max(metric.value), min(metric.value) + Group Key: metric_name.name + -> Result (actual rows=4 loops=1) + Output: metric_name.name, metric.value + -> Custom Scan (AsyncAppend) (actual rows=4 loops=1) + Output: metric.value, metric_name.name + -> Merge Append (actual rows=4 loops=1) + Sort Key: metric_name.name + -> Custom Scan (DataNodeScan) (actual rows=3 loops=1) + Output: metric_1.value, metric_name.name + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT r8.value, r2.name FROM (public.metric r8 INNER JOIN public.metric_name r2 ON (((r8.id = r2.id)) AND ((r2.name ~~ 'cpu%'::text)))) WHERE _timescaledb_functions.chunks_in(r8, ARRAY[1, 2, 3]) ORDER BY r2.name ASC NULLS LAST + -> Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Output: metric_2.value, metric_name.name + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT r9.value, r2.name FROM (public.metric r9 INNER JOIN public.metric_name r2 ON (((r9.id = r2.id)) AND ((r2.name ~~ 'cpu%'::text)))) WHERE _timescaledb_functions.chunks_in(r9, ARRAY[1]) ORDER BY r2.name ASC NULLS LAST +(21 rows) + +:PREFIX +SELECT name, max(value) FROM metric LEFT JOIN metric_name USING (id) WHERE name LIKE 'cpu%' AND ts BETWEEN '2022-02-02 02:02:02+03' AND '2022-02-02 02:12:02+03' GROUP BY name; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT name, max(value) FROM metric LEFT JOIN metric_name USING (id) WHERE name LIKE 'cpu%' AND ts BETWEEN '2022-02-02 02:02:02+03' AND '2022-02-02 02:12:02+03' GROUP BY name; +DEBUG: try to push down a join on a reference table + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate (actual rows=1 loops=1) + Output: metric_name.name, max(metric.value) + Group Key: metric_name.name + -> Result (actual rows=1 loops=1) + Output: metric_name.name, metric.value + -> Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Output: metric.value, metric_name.name + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk + Remote SQL: SELECT r5.value, r2.name FROM (public.metric r5 INNER JOIN public.metric_name r2 ON (((r5.id = r2.id)) AND ((r2.name ~~ 'cpu%'::text)) AND ((r5.ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((r5.ts <= '2022-02-01 15:12:02-08'::timestamp with time zone)))) WHERE _timescaledb_functions.chunks_in(r5, ARRAY[1]) ORDER BY r2.name ASC NULLS LAST +(11 rows) + +-- Grouping and sorting +:PREFIX +SELECT name, max(value), min(value) FROM metric LEFT JOIN metric_name USING (id) WHERE name LIKE 'cpu%' and ts BETWEEN '2000-02-02 02:02:02+03' and '2022-02-02 02:12:02+03' GROUP BY name ORDER BY name DESC; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT name, max(value), min(value) FROM metric LEFT JOIN metric_name USING (id) WHERE name LIKE 'cpu%' and ts BETWEEN '2000-02-02 02:02:02+03' and '2022-02-02 02:12:02+03' GROUP BY name ORDER BY name DESC; +DEBUG: try to push down a join on a reference table + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate (actual rows=2 loops=1) + Output: metric_name.name, max(metric.value), min(metric.value) + Group Key: metric_name.name + -> Result (actual rows=4 loops=1) + Output: metric_name.name, metric.value + -> Custom Scan (AsyncAppend) (actual rows=4 loops=1) + Output: metric.value, metric_name.name + -> Merge Append (actual rows=4 loops=1) + Sort Key: metric_name.name DESC + -> Custom Scan (DataNodeScan) (actual rows=3 loops=1) + Output: metric_1.value, metric_name.name + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT r8.value, r2.name FROM (public.metric r8 INNER JOIN public.metric_name r2 ON (((r8.id = r2.id)) AND ((r2.name ~~ 'cpu%'::text)) AND ((r8.ts >= '2000-02-01 15:02:02-08'::timestamp with time zone)) AND ((r8.ts <= '2022-02-01 15:12:02-08'::timestamp with time zone)))) WHERE _timescaledb_functions.chunks_in(r8, ARRAY[1, 2, 3]) ORDER BY r2.name DESC NULLS FIRST + -> Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Output: metric_2.value, metric_name.name + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT r9.value, r2.name FROM (public.metric r9 INNER JOIN public.metric_name r2 ON (((r9.id = r2.id)) AND ((r2.name ~~ 'cpu%'::text)) AND ((r9.ts >= '2000-02-01 15:02:02-08'::timestamp with time zone)) AND ((r9.ts <= '2022-02-01 15:12:02-08'::timestamp with time zone)))) WHERE _timescaledb_functions.chunks_in(r9, ARRAY[1]) ORDER BY r2.name DESC NULLS FIRST +(21 rows) + +-- Having +:PREFIX +SELECT name, max(value), min(value) FROM metric LEFT JOIN metric_name USING (id) WHERE name LIKE 'cpu%' and ts BETWEEN '2000-02-02 02:02:02+03' and '2022-02-02 02:12:02+03' GROUP BY name having min(value) > 0 ORDER BY name DESC; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT name, max(value), min(value) FROM metric LEFT JOIN metric_name USING (id) WHERE name LIKE 'cpu%' and ts BETWEEN '2000-02-02 02:02:02+03' and '2022-02-02 02:12:02+03' GROUP BY name having min(value) > 0 ORDER BY name DESC; +DEBUG: try to push down a join on a reference table + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate (actual rows=2 loops=1) + Output: metric_name.name, max(metric.value), min(metric.value) + Group Key: metric_name.name + Filter: (min(metric.value) > '0'::double precision) + -> Result (actual rows=4 loops=1) + Output: metric_name.name, metric.value + -> Custom Scan (AsyncAppend) (actual rows=4 loops=1) + Output: metric.value, metric_name.name + -> Merge Append (actual rows=4 loops=1) + Sort Key: metric_name.name DESC + -> Custom Scan (DataNodeScan) (actual rows=3 loops=1) + Output: metric_1.value, metric_name.name + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT r8.value, r2.name FROM (public.metric r8 INNER JOIN public.metric_name r2 ON (((r8.id = r2.id)) AND ((r2.name ~~ 'cpu%'::text)) AND ((r8.ts >= '2000-02-01 15:02:02-08'::timestamp with time zone)) AND ((r8.ts <= '2022-02-01 15:12:02-08'::timestamp with time zone)))) WHERE _timescaledb_functions.chunks_in(r8, ARRAY[1, 2, 3]) ORDER BY r2.name DESC NULLS FIRST + -> Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Output: metric_2.value, metric_name.name + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT r9.value, r2.name FROM (public.metric r9 INNER JOIN public.metric_name r2 ON (((r9.id = r2.id)) AND ((r2.name ~~ 'cpu%'::text)) AND ((r9.ts >= '2000-02-01 15:02:02-08'::timestamp with time zone)) AND ((r9.ts <= '2022-02-01 15:12:02-08'::timestamp with time zone)))) WHERE _timescaledb_functions.chunks_in(r9, ARRAY[1]) ORDER BY r2.name DESC NULLS FIRST +(22 rows) + +-- Rank +:PREFIX +SELECT name, value, RANK () OVER (ORDER by value) from metric join metric_name_local USING (id); +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT name, value, RANK () OVER (ORDER by value) from metric join metric_name_local USING (id); + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + WindowAgg (actual rows=4 loops=1) + Output: metric_name_local.name, metric.value, rank() OVER (?) + -> Nested Loop (actual rows=4 loops=1) + Output: metric.value, metric_name_local.name + Inner Unique: true + -> Custom Scan (AsyncAppend) (actual rows=4 loops=1) + Output: metric.value, metric.id + -> Merge Append (actual rows=4 loops=1) + Sort Key: metric_1.value + -> Custom Scan (DataNodeScan) on public.metric metric_1 (actual rows=3 loops=1) + Output: metric_1.value, metric_1.id + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT id, value FROM public.metric WHERE _timescaledb_functions.chunks_in(public.metric.*, ARRAY[1, 2, 3]) ORDER BY value ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.metric metric_2 (actual rows=1 loops=1) + Output: metric_2.value, metric_2.id + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT id, value FROM public.metric WHERE _timescaledb_functions.chunks_in(public.metric.*, ARRAY[1]) ORDER BY value ASC NULLS LAST + -> Index Scan using metric_name_local_pkey on public.metric_name_local (actual rows=1 loops=4) + Output: metric_name_local.id, metric_name_local.name + Index Cond: (metric_name_local.id = metric.id) +(24 rows) + +-- Check returned types +SELECT pg_typeof("name"), pg_typeof("id"), pg_typeof("value"), name, id, value FROM metric +LEFT JOIN metric_name USING (id) WHERE name LIKE 'cpu%' LIMIT 1; +LOG: statement: SELECT pg_typeof("name"), pg_typeof("id"), pg_typeof("value"), name, id, value FROM metric +LEFT JOIN metric_name USING (id) WHERE name LIKE 'cpu%' LIMIT 1; +DEBUG: try to push down a join on a reference table + pg_typeof | pg_typeof | pg_typeof | name | id | value +-----------+-----------+------------------+------+----+------- + text | integer | double precision | cpu1 | 1 | 50 +(1 row) + +-- Left join and reference table on the left hypertable on the right (no pushdown) +:PREFIX +SELECT * FROM metric_name LEFT JOIN metric USING (id) WHERE name LIKE 'cpu%'; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT * FROM metric_name LEFT JOIN metric USING (id) WHERE name LIKE 'cpu%'; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------- + Nested Loop Left Join (actual rows=4 loops=1) + Output: metric_name.id, metric_name.name, metric.ts, metric.value + Join Filter: (metric_name.id = metric.id) + Rows Removed by Join Filter: 4 + -> Seq Scan on public.metric_name (actual rows=2 loops=1) + Output: metric_name.id, metric_name.name + Filter: (metric_name.name ~~ 'cpu%'::text) + -> Materialize (actual rows=4 loops=2) + Output: metric.ts, metric.value, metric.id + -> Custom Scan (AsyncAppend) (actual rows=4 loops=1) + Output: metric.ts, metric.value, metric.id + -> Append (actual rows=4 loops=1) + -> Custom Scan (DataNodeScan) on public.metric metric_1 (actual rows=3 loops=1) + Output: metric_1.ts, metric_1.value, metric_1.id + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT ts, id, value FROM public.metric WHERE _timescaledb_functions.chunks_in(public.metric.*, ARRAY[1, 2, 3]) + -> Custom Scan (DataNodeScan) on public.metric metric_2 (actual rows=1 loops=1) + Output: metric_2.ts, metric_2.value, metric_2.id + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT ts, id, value FROM public.metric WHERE _timescaledb_functions.chunks_in(public.metric.*, ARRAY[1]) +(24 rows) + +-- Right join reference table on the left, hypertable on the right (can be converted into a left join by PostgreSQL, pushdown) +:PREFIX +SELECT * FROM metric_name RIGHT JOIN metric USING (id) WHERE name LIKE 'cpu%'; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT * FROM metric_name RIGHT JOIN metric USING (id) WHERE name LIKE 'cpu%'; +DEBUG: try to push down a join on a reference table + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=4 loops=1) + Output: metric.id, metric_name.name, metric.ts, metric.value + -> Custom Scan (AsyncAppend) (actual rows=4 loops=1) + Output: metric_name.name, metric.id, metric.ts, metric.value + -> Append (actual rows=4 loops=1) + -> Custom Scan (DataNodeScan) (actual rows=3 loops=1) + Output: metric_name.name, metric_1.id, metric_1.ts, metric_1.value + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT r1.name, r8.id, r8.ts, r8.value FROM (public.metric r8 INNER JOIN public.metric_name r1 ON (((r1.id = r8.id)) AND ((r1.name ~~ 'cpu%'::text)))) WHERE _timescaledb_functions.chunks_in(r8, ARRAY[1, 2, 3]) + -> Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Output: metric_name.name, metric_2.id, metric_2.ts, metric_2.value + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT r1.name, r9.id, r9.ts, r9.value FROM (public.metric r9 INNER JOIN public.metric_name r1 ON (((r1.id = r9.id)) AND ((r1.name ~~ 'cpu%'::text)))) WHERE _timescaledb_functions.chunks_in(r9, ARRAY[1]) +(17 rows) + +-- Right join hypertable on the left, reference table on the right (no pushdown) +:PREFIX +SELECT * FROM metric RIGHT JOIN metric_name USING (id) WHERE name LIKE 'cpu%'; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT * FROM metric RIGHT JOIN metric_name USING (id) WHERE name LIKE 'cpu%'; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------- + Nested Loop Left Join (actual rows=4 loops=1) + Output: metric_name.id, metric.ts, metric.value, metric_name.name + Join Filter: (metric.id = metric_name.id) + Rows Removed by Join Filter: 4 + -> Seq Scan on public.metric_name (actual rows=2 loops=1) + Output: metric_name.id, metric_name.name + Filter: (metric_name.name ~~ 'cpu%'::text) + -> Materialize (actual rows=4 loops=2) + Output: metric.ts, metric.value, metric.id + -> Custom Scan (AsyncAppend) (actual rows=4 loops=1) + Output: metric.ts, metric.value, metric.id + -> Append (actual rows=4 loops=1) + -> Custom Scan (DataNodeScan) on public.metric metric_1 (actual rows=3 loops=1) + Output: metric_1.ts, metric_1.value, metric_1.id + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT ts, id, value FROM public.metric WHERE _timescaledb_functions.chunks_in(public.metric.*, ARRAY[1, 2, 3]) + -> Custom Scan (DataNodeScan) on public.metric metric_2 (actual rows=1 loops=1) + Output: metric_2.ts, metric_2.value, metric_2.id + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT ts, id, value FROM public.metric WHERE _timescaledb_functions.chunks_in(public.metric.*, ARRAY[1]) +(24 rows) + +-- Inner join and reference table left, hypertable on the right (pushdown) +:PREFIX +SELECT * FROM metric_name INNER JOIN metric USING (id) WHERE name LIKE 'cpu%'; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT * FROM metric_name INNER JOIN metric USING (id) WHERE name LIKE 'cpu%'; +DEBUG: try to push down a join on a reference table + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) (actual rows=4 loops=1) + Output: metric_name.id, metric_name.name, metric.ts, metric.value + -> Append (actual rows=4 loops=1) + -> Custom Scan (DataNodeScan) (actual rows=3 loops=1) + Output: metric_name.id, metric_name.name, metric_1.ts, metric_1.value + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT r1.id, r1.name, r8.ts, r8.value FROM (public.metric r8 INNER JOIN public.metric_name r1 ON (((r1.id = r8.id)) AND ((r1.name ~~ 'cpu%'::text)))) WHERE _timescaledb_functions.chunks_in(r8, ARRAY[1, 2, 3]) + -> Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Output: metric_name.id, metric_name.name, metric_2.ts, metric_2.value + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT r1.id, r1.name, r9.ts, r9.value FROM (public.metric r9 INNER JOIN public.metric_name r1 ON (((r1.id = r9.id)) AND ((r1.name ~~ 'cpu%'::text)))) WHERE _timescaledb_functions.chunks_in(r9, ARRAY[1]) +(15 rows) + +-- Implicit join on two tables, hypertable left, reference table right (pushdown) +:PREFIX +SELECT * FROM metric m1, metric_name m2 WHERE m1.id=m2.id AND name LIKE 'cpu%'; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT * FROM metric m1, metric_name m2 WHERE m1.id=m2.id AND name LIKE 'cpu%'; +DEBUG: try to push down a join on a reference table + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) (actual rows=4 loops=1) + Output: m1.ts, m1.id, m1.value, m2.id, m2.name + -> Append (actual rows=4 loops=1) + -> Custom Scan (DataNodeScan) (actual rows=3 loops=1) + Output: m1_1.ts, m1_1.id, m1_1.value, m2.id, m2.name + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT r7.ts, r7.id, r7.value, r2.id, r2.name FROM (public.metric r7 INNER JOIN public.metric_name r2 ON (((r7.id = r2.id)) AND ((r2.name ~~ 'cpu%'::text)))) WHERE _timescaledb_functions.chunks_in(r7, ARRAY[1, 2, 3]) + -> Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Output: m1_2.ts, m1_2.id, m1_2.value, m2.id, m2.name + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT r8.ts, r8.id, r8.value, r2.id, r2.name FROM (public.metric r8 INNER JOIN public.metric_name r2 ON (((r8.id = r2.id)) AND ((r2.name ~~ 'cpu%'::text)))) WHERE _timescaledb_functions.chunks_in(r8, ARRAY[1]) +(15 rows) + +-- Implicit join on two tables, reference table left, hypertable right (pushdown) +:PREFIX +SELECT * FROM metric m2, metric_name m1 WHERE m1.id=m2.id AND name LIKE 'cpu%'; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT * FROM metric m2, metric_name m1 WHERE m1.id=m2.id AND name LIKE 'cpu%'; +DEBUG: try to push down a join on a reference table + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) (actual rows=4 loops=1) + Output: m2.ts, m2.id, m2.value, m1.id, m1.name + -> Append (actual rows=4 loops=1) + -> Custom Scan (DataNodeScan) (actual rows=3 loops=1) + Output: m2_1.ts, m2_1.id, m2_1.value, m1.id, m1.name + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT r7.ts, r7.id, r7.value, r2.id, r2.name FROM (public.metric r7 INNER JOIN public.metric_name r2 ON (((r7.id = r2.id)) AND ((r2.name ~~ 'cpu%'::text)))) WHERE _timescaledb_functions.chunks_in(r7, ARRAY[1, 2, 3]) + -> Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Output: m2_2.ts, m2_2.id, m2_2.value, m1.id, m1.name + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT r8.ts, r8.id, r8.value, r2.id, r2.name FROM (public.metric r8 INNER JOIN public.metric_name r2 ON (((r8.id = r2.id)) AND ((r2.name ~~ 'cpu%'::text)))) WHERE _timescaledb_functions.chunks_in(r8, ARRAY[1]) +(15 rows) + +-- Implicit join on three tables (no pushdown) +:PREFIX +SELECT * FROM metric m1, metric_name m2, metric_name m3 WHERE m1.id=m2.id AND m2.id = m3.id AND m3.name LIKE 'cpu%'; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT * FROM metric m1, metric_name m2, metric_name m3 WHERE m1.id=m2.id AND m2.id = m3.id AND m3.name LIKE 'cpu%'; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------- + Nested Loop (actual rows=4 loops=1) + Output: m1.ts, m1.id, m1.value, m2.id, m2.name, m3.id, m3.name + Inner Unique: true + Join Filter: (m1.id = m3.id) + Rows Removed by Join Filter: 1 + -> Nested Loop (actual rows=4 loops=1) + Output: m1.ts, m1.id, m1.value, m2.id, m2.name + Inner Unique: true + Join Filter: (m1.id = m2.id) + Rows Removed by Join Filter: 1 + -> Custom Scan (AsyncAppend) (actual rows=4 loops=1) + Output: m1.ts, m1.id, m1.value + -> Append (actual rows=4 loops=1) + -> Custom Scan (DataNodeScan) on public.metric m1_1 (actual rows=3 loops=1) + Output: m1_1.ts, m1_1.id, m1_1.value + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT ts, id, value FROM public.metric WHERE _timescaledb_functions.chunks_in(public.metric.*, ARRAY[1, 2, 3]) + -> Custom Scan (DataNodeScan) on public.metric m1_2 (actual rows=1 loops=1) + Output: m1_2.ts, m1_2.id, m1_2.value + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT ts, id, value FROM public.metric WHERE _timescaledb_functions.chunks_in(public.metric.*, ARRAY[1]) + -> Materialize (actual rows=1 loops=4) + Output: m2.id, m2.name + -> Seq Scan on public.metric_name m2 (actual rows=2 loops=1) + Output: m2.id, m2.name + -> Materialize (actual rows=1 loops=4) + Output: m3.id, m3.name + -> Seq Scan on public.metric_name m3 (actual rows=2 loops=1) + Output: m3.id, m3.name + Filter: (m3.name ~~ 'cpu%'::text) +(34 rows) + +-- Left join on a DHT and a subselect on a reference table (subselect can be removed, pushdown) +:PREFIX +SELECT * FROM metric LEFT JOIN (SELECT * FROM metric_name) AS sub ON metric.id=sub.id; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT * FROM metric LEFT JOIN (SELECT * FROM metric_name) AS sub ON metric.id=sub.id; +DEBUG: try to push down a join on a reference table + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) (actual rows=4 loops=1) + Output: metric.ts, metric.id, metric.value, metric_name.id, metric_name.name + -> Append (actual rows=4 loops=1) + -> Custom Scan (DataNodeScan) (actual rows=3 loops=1) + Output: metric_1.ts, metric_1.id, metric_1.value, metric_name.id, metric_name.name + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT r9.ts, r9.id, r9.value, r4.id, r4.name FROM (public.metric r9 LEFT JOIN public.metric_name r4 ON (((r9.id = r4.id)))) WHERE _timescaledb_functions.chunks_in(r9, ARRAY[1, 2, 3]) + -> Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Output: metric_2.ts, metric_2.id, metric_2.value, metric_name.id, metric_name.name + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT r10.ts, r10.id, r10.value, r4.id, r4.name FROM (public.metric r10 LEFT JOIN public.metric_name r4 ON (((r10.id = r4.id)))) WHERE _timescaledb_functions.chunks_in(r10, ARRAY[1]) +(15 rows) + +-- Left join on a DHT and a subselect with filter on a reference table (subselect can be removed, pushdown) +:PREFIX +SELECT * FROM metric LEFT JOIN (SELECT * FROM metric_name WHERE name LIKE 'cpu%') AS sub ON metric.id=sub.id; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT * FROM metric LEFT JOIN (SELECT * FROM metric_name WHERE name LIKE 'cpu%') AS sub ON metric.id=sub.id; +DEBUG: try to push down a join on a reference table + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) (actual rows=4 loops=1) + Output: metric.ts, metric.id, metric.value, metric_name.id, metric_name.name + -> Append (actual rows=4 loops=1) + -> Custom Scan (DataNodeScan) (actual rows=3 loops=1) + Output: metric_1.ts, metric_1.id, metric_1.value, metric_name.id, metric_name.name + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT r9.ts, r9.id, r9.value, r4.id, r4.name FROM (public.metric r9 LEFT JOIN public.metric_name r4 ON (((r9.id = r4.id)) AND ((r4.name ~~ 'cpu%'::text)))) WHERE _timescaledb_functions.chunks_in(r9, ARRAY[1, 2, 3]) + -> Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Output: metric_2.ts, metric_2.id, metric_2.value, metric_name.id, metric_name.name + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT r10.ts, r10.id, r10.value, r4.id, r4.name FROM (public.metric r10 LEFT JOIN public.metric_name r4 ON (((r10.id = r4.id)) AND ((r4.name ~~ 'cpu%'::text)))) WHERE _timescaledb_functions.chunks_in(r10, ARRAY[1]) +(15 rows) + +-- Left join on a subselect on a DHT and a reference table (subselect can be removed, pushdown) +:PREFIX +SELECT * FROM (SELECT * FROM metric) as sub LEFT JOIN metric_name ON sub.id=metric_name.id WHERE name LIKE 'cpu%'; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT * FROM (SELECT * FROM metric) as sub LEFT JOIN metric_name ON sub.id=metric_name.id WHERE name LIKE 'cpu%'; +DEBUG: try to push down a join on a reference table + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) (actual rows=4 loops=1) + Output: metric.ts, metric.id, metric.value, metric_name.id, metric_name.name + -> Append (actual rows=4 loops=1) + -> Custom Scan (DataNodeScan) (actual rows=3 loops=1) + Output: metric_1.ts, metric_1.id, metric_1.value, metric_name.id, metric_name.name + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT r9.ts, r9.id, r9.value, r2.id, r2.name FROM (public.metric r9 INNER JOIN public.metric_name r2 ON (((r2.id = r9.id)) AND ((r2.name ~~ 'cpu%'::text)))) WHERE _timescaledb_functions.chunks_in(r9, ARRAY[1, 2, 3]) + -> Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Output: metric_2.ts, metric_2.id, metric_2.value, metric_name.id, metric_name.name + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT r10.ts, r10.id, r10.value, r2.id, r2.name FROM (public.metric r10 INNER JOIN public.metric_name r2 ON (((r2.id = r10.id)) AND ((r2.name ~~ 'cpu%'::text)))) WHERE _timescaledb_functions.chunks_in(r10, ARRAY[1]) +(15 rows) + +-- Left join and hypertable on left and right (no pushdown) +:PREFIX +SELECT * FROM metric m1 LEFT JOIN metric m2 USING (id) WHERE m1.id = 2; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT * FROM metric m1 LEFT JOIN metric m2 USING (id) WHERE m1.id = 2; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------- + Nested Loop Left Join (actual rows=1 loops=1) + Output: m1.id, m1.ts, m1.value, m2.ts, m2.value + -> Custom Scan (DataNodeScan) on public.metric m1 (actual rows=1 loops=1) + Output: m1.id, m1.ts, m1.value + Data node: db_dist_ref_table_join_2 + Fetcher Type: Cursor + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT ts, id, value FROM public.metric WHERE _timescaledb_functions.chunks_in(public.metric.*, ARRAY[1]) AND ((id = 2)) + -> Custom Scan (AsyncAppend) (actual rows=1 loops=1) + Output: m2.ts, m2.value, m2.id + -> Append (actual rows=1 loops=1) + -> Custom Scan (DataNodeScan) on public.metric m2_1 (actual rows=0 loops=1) + Output: m2_1.ts, m2_1.value, m2_1.id + Data node: db_dist_ref_table_join_1 + Fetcher Type: Cursor + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT ts, id, value FROM public.metric WHERE _timescaledb_functions.chunks_in(public.metric.*, ARRAY[1, 2, 3]) AND ((id = 2)) + -> Custom Scan (DataNodeScan) on public.metric m2_2 (actual rows=1 loops=1) + Output: m2_2.ts, m2_2.value, m2_2.id + Data node: db_dist_ref_table_join_2 + Fetcher Type: Cursor + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT ts, id, value FROM public.metric WHERE _timescaledb_functions.chunks_in(public.metric.*, ARRAY[1]) AND ((id = 2)) +(23 rows) + +-- Left join and reference table on left and right +:PREFIX +SELECT * FROM metric_name m1 LEFT JOIN metric_name m2 USING (id) WHERE m1.name LIKE 'cpu%'; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT * FROM metric_name m1 LEFT JOIN metric_name m2 USING (id) WHERE m1.name LIKE 'cpu%'; + QUERY PLAN +----------------------------------------------------------------------- + Nested Loop Left Join (actual rows=2 loops=1) + Output: m1.id, m1.name, m2.name + Inner Unique: true + Join Filter: (m1.id = m2.id) + Rows Removed by Join Filter: 1 + -> Seq Scan on public.metric_name m1 (actual rows=2 loops=1) + Output: m1.id, m1.name + Filter: (m1.name ~~ 'cpu%'::text) + -> Materialize (actual rows=2 loops=2) + Output: m2.name, m2.id + -> Seq Scan on public.metric_name m2 (actual rows=2 loops=1) + Output: m2.name, m2.id +(12 rows) + +-- Only aggregation no values needs to be transferred +:PREFIX +SELECT count(*) FROM metric m1 LEFT JOIN metric_name m2 USING (id) WHERE m2.name LIKE 'cpu%'; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT count(*) FROM metric m1 LEFT JOIN metric_name m2 USING (id) WHERE m2.name LIKE 'cpu%'; +DEBUG: try to push down a join on a reference table + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Aggregate (actual rows=1 loops=1) + Output: count(*) + -> Custom Scan (AsyncAppend) (actual rows=4 loops=1) + -> Append (actual rows=4 loops=1) + -> Custom Scan (DataNodeScan) (actual rows=3 loops=1) + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT NULL FROM (public.metric r8 INNER JOIN public.metric_name r2 ON (((r8.id = r2.id)) AND ((r2.name ~~ 'cpu%'::text)))) WHERE _timescaledb_functions.chunks_in(r8, ARRAY[1, 2, 3]) + -> Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT NULL FROM (public.metric r9 INNER JOIN public.metric_name r2 ON (((r9.id = r2.id)) AND ((r2.name ~~ 'cpu%'::text)))) WHERE _timescaledb_functions.chunks_in(r9, ARRAY[1]) +(14 rows) + +-- Lateral joins that can be converted into regular joins +:PREFIX +SELECT * FROM metric m1 LEFT JOIN LATERAL (SELECT * FROM metric_name m2 WHERE m1.id = m2.id) t ON TRUE; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT * FROM metric m1 LEFT JOIN LATERAL (SELECT * FROM metric_name m2 WHERE m1.id = m2.id) t ON TRUE; +DEBUG: try to push down a join on a reference table + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) (actual rows=4 loops=1) + Output: m1.ts, m1.id, m1.value, m2.id, m2.name + -> Append (actual rows=4 loops=1) + -> Custom Scan (DataNodeScan) (actual rows=3 loops=1) + Output: m1_1.ts, m1_1.id, m1_1.value, m2.id, m2.name + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT r9.ts, r9.id, r9.value, r4.id, r4.name FROM (public.metric r9 LEFT JOIN public.metric_name r4 ON (((r9.id = r4.id)))) WHERE _timescaledb_functions.chunks_in(r9, ARRAY[1, 2, 3]) + -> Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Output: m1_2.ts, m1_2.id, m1_2.value, m2.id, m2.name + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT r10.ts, r10.id, r10.value, r4.id, r4.name FROM (public.metric r10 LEFT JOIN public.metric_name r4 ON (((r10.id = r4.id)))) WHERE _timescaledb_functions.chunks_in(r10, ARRAY[1]) +(15 rows) + +:PREFIX +SELECT * FROM metric m1 LEFT JOIN LATERAL (SELECT * FROM metric_name m2 WHERE m1.id > m2.id) t ON TRUE; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT * FROM metric m1 LEFT JOIN LATERAL (SELECT * FROM metric_name m2 WHERE m1.id > m2.id) t ON TRUE; +DEBUG: try to push down a join on a reference table + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) (actual rows=4 loops=1) + Output: m1.ts, m1.id, m1.value, m2.id, m2.name + -> Append (actual rows=4 loops=1) + -> Custom Scan (DataNodeScan) (actual rows=3 loops=1) + Output: m1_1.ts, m1_1.id, m1_1.value, m2.id, m2.name + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT r9.ts, r9.id, r9.value, r4.id, r4.name FROM (public.metric r9 LEFT JOIN public.metric_name r4 ON (((r9.id > r4.id)))) WHERE _timescaledb_functions.chunks_in(r9, ARRAY[1, 2, 3]) + -> Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Output: m1_2.ts, m1_2.id, m1_2.value, m2.id, m2.name + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT r10.ts, r10.id, r10.value, r4.id, r4.name FROM (public.metric r10 LEFT JOIN public.metric_name r4 ON (((r10.id > r4.id)))) WHERE _timescaledb_functions.chunks_in(r10, ARRAY[1]) +(15 rows) + +-- Lateral join that can not be converted and pushed down +:PREFIX +SELECT * FROM metric m1 LEFT JOIN LATERAL (SELECT * FROM metric_name m2 WHERE m1.id > m2.id ORDER BY m2.name LIMIT 1) t ON TRUE; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT * FROM metric m1 LEFT JOIN LATERAL (SELECT * FROM metric_name m2 WHERE m1.id > m2.id ORDER BY m2.name LIMIT 1) t ON TRUE; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------- + Nested Loop Left Join (actual rows=4 loops=1) + Output: m1.ts, m1.id, m1.value, m2.id, m2.name + -> Custom Scan (AsyncAppend) (actual rows=4 loops=1) + Output: m1.ts, m1.id, m1.value + -> Append (actual rows=4 loops=1) + -> Custom Scan (DataNodeScan) on public.metric m1_1 (actual rows=3 loops=1) + Output: m1_1.ts, m1_1.id, m1_1.value + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT ts, id, value FROM public.metric WHERE _timescaledb_functions.chunks_in(public.metric.*, ARRAY[1, 2, 3]) + -> Custom Scan (DataNodeScan) on public.metric m1_2 (actual rows=1 loops=1) + Output: m1_2.ts, m1_2.id, m1_2.value + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT ts, id, value FROM public.metric WHERE _timescaledb_functions.chunks_in(public.metric.*, ARRAY[1]) + -> Limit (actual rows=0 loops=4) + Output: m2.id, m2.name + -> Sort (actual rows=0 loops=4) + Output: m2.id, m2.name + Sort Key: m2.name + Sort Method: quicksort + -> Seq Scan on public.metric_name m2 (actual rows=0 loops=4) + Output: m2.id, m2.name + Filter: (m1.id > m2.id) + Rows Removed by Filter: 2 +(27 rows) + +-- Two left joins (no pushdown) +:PREFIX +SELECT * FROM metric m1 LEFT JOIN metric m2 USING (id) LEFT JOIN metric_name mn USING(id); +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT * FROM metric m1 LEFT JOIN metric m2 USING (id) LEFT JOIN metric_name mn USING(id); + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------- + Hash Left Join (actual rows=10 loops=1) + Output: m1.id, m1.ts, m1.value, m2.ts, m2.value, mn.name + Inner Unique: true + Hash Cond: (m1.id = mn.id) + -> Nested Loop Left Join (actual rows=10 loops=1) + Output: m1.id, m1.ts, m1.value, m2.ts, m2.value + Join Filter: (m1.id = m2.id) + Rows Removed by Join Filter: 6 + -> Custom Scan (AsyncAppend) (actual rows=4 loops=1) + Output: m1.id, m1.ts, m1.value + -> Append (actual rows=4 loops=1) + -> Custom Scan (DataNodeScan) on public.metric m1_1 (actual rows=3 loops=1) + Output: m1_1.id, m1_1.ts, m1_1.value + Data node: db_dist_ref_table_join_1 + Fetcher Type: Cursor + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT ts, id, value FROM public.metric WHERE _timescaledb_functions.chunks_in(public.metric.*, ARRAY[1, 2, 3]) + -> Custom Scan (DataNodeScan) on public.metric m1_2 (actual rows=1 loops=1) + Output: m1_2.id, m1_2.ts, m1_2.value + Data node: db_dist_ref_table_join_2 + Fetcher Type: Cursor + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT ts, id, value FROM public.metric WHERE _timescaledb_functions.chunks_in(public.metric.*, ARRAY[1]) + -> Materialize (actual rows=4 loops=4) + Output: m2.ts, m2.value, m2.id + -> Custom Scan (AsyncAppend) (actual rows=4 loops=1) + Output: m2.ts, m2.value, m2.id + -> Append (actual rows=4 loops=1) + -> Custom Scan (DataNodeScan) on public.metric m2_1 (actual rows=3 loops=1) + Output: m2_1.ts, m2_1.value, m2_1.id + Data node: db_dist_ref_table_join_1 + Fetcher Type: Cursor + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT ts, id, value FROM public.metric WHERE _timescaledb_functions.chunks_in(public.metric.*, ARRAY[1, 2, 3]) + -> Custom Scan (DataNodeScan) on public.metric m2_2 (actual rows=1 loops=1) + Output: m2_2.ts, m2_2.value, m2_2.id + Data node: db_dist_ref_table_join_2 + Fetcher Type: Cursor + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT ts, id, value FROM public.metric WHERE _timescaledb_functions.chunks_in(public.metric.*, ARRAY[1]) + -> Hash (actual rows=2 loops=1) + Output: mn.name, mn.id + Buckets: 1024 Batches: 1 + -> Seq Scan on public.metric_name mn (actual rows=2 loops=1) + Output: mn.name, mn.id +(45 rows) + +------- +-- Tests with shippable and non-shippable joins / EquivalenceClass +-- See 'dist_param.sql' for an explanation of the used textin / int4out +-- functions. +------- +-- Shippable non-EquivalenceClass join +:PREFIX +SELECT name, max(value), count(*) +FROM metric JOIN metric_name ON texteq('cpu' || textin(int4out(metric.id)), name) +GROUP BY name +ORDER BY name; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT name, max(value), count(*) +FROM metric JOIN metric_name ON texteq('cpu' || textin(int4out(metric.id)), name) +GROUP BY name +ORDER BY name; +DEBUG: try to push down a join on a reference table + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + GroupAggregate (actual rows=2 loops=1) + Output: metric_name.name, max(metric.value), count(*) + Group Key: metric_name.name + -> Result (actual rows=4 loops=1) + Output: metric_name.name, metric.value + -> Custom Scan (AsyncAppend) (actual rows=4 loops=1) + Output: metric.value, metric_name.name + -> Merge Append (actual rows=4 loops=1) + Sort Key: metric_name.name + -> Custom Scan (DataNodeScan) (actual rows=3 loops=1) + Output: metric_1.value, metric_name.name + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT r8.value, r2.name FROM (public.metric r8 INNER JOIN public.metric_name r2 ON ((texteq(('cpu'::text || textin(int4out(r8.id))), r2.name)))) WHERE _timescaledb_functions.chunks_in(r8, ARRAY[1, 2, 3]) ORDER BY r2.name ASC NULLS LAST + -> Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Output: metric_2.value, metric_name.name + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT r9.value, r2.name FROM (public.metric r9 INNER JOIN public.metric_name r2 ON ((texteq(('cpu'::text || textin(int4out(r9.id))), r2.name)))) WHERE _timescaledb_functions.chunks_in(r9, ARRAY[1]) ORDER BY r2.name ASC NULLS LAST +(21 rows) + +-- Non-shippable equality class join +:PREFIX +SELECT name, max(value), count(*) +FROM metric JOIN metric_name ON name = concat('cpu', metric.id) +GROUP BY name +ORDER BY name; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT name, max(value), count(*) +FROM metric JOIN metric_name ON name = concat('cpu', metric.id) +GROUP BY name +ORDER BY name; +DEBUG: try to push down a join on a reference table +DEBUG: join pushdown on reference table is not supported for the used query + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate (actual rows=2 loops=1) + Output: metric_name.name, max(metric.value), count(*) + Group Key: metric_name.name + -> Merge Join (actual rows=4 loops=1) + Output: metric_name.name, metric.value + Merge Cond: ((concat('cpu', metric.id)) = metric_name.name) + -> Sort (actual rows=4 loops=1) + Output: metric.value, metric.id, (concat('cpu', metric.id)) + Sort Key: (concat('cpu', metric.id)) + Sort Method: quicksort + -> Result (actual rows=4 loops=1) + Output: metric.value, metric.id, concat('cpu', metric.id) + -> Custom Scan (AsyncAppend) (actual rows=4 loops=1) + Output: metric.value, metric.id + -> Append (actual rows=4 loops=1) + -> Custom Scan (DataNodeScan) on public.metric metric_1 (actual rows=3 loops=1) + Output: metric_1.value, metric_1.id + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT id, value FROM public.metric WHERE _timescaledb_functions.chunks_in(public.metric.*, ARRAY[1, 2, 3]) + -> Custom Scan (DataNodeScan) on public.metric metric_2 (actual rows=1 loops=1) + Output: metric_2.value, metric_2.id + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT id, value FROM public.metric WHERE _timescaledb_functions.chunks_in(public.metric.*, ARRAY[1]) + -> Sort (actual rows=4 loops=1) + Output: metric_name.name + Sort Key: metric_name.name + Sort Method: quicksort + -> Seq Scan on public.metric_name (actual rows=2 loops=1) + Output: metric_name.name +(33 rows) + +-- Non-shippable non-EquivalenceClass join +:PREFIX +SELECT name, max(value), count(*) +FROM metric JOIN metric_name ON texteq(concat('cpu', textin(int4out(metric.id))), name) +GROUP BY name +ORDER BY name; +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT name, max(value), count(*) +FROM metric JOIN metric_name ON texteq(concat('cpu', textin(int4out(metric.id))), name) +GROUP BY name +ORDER BY name; +DEBUG: try to push down a join on a reference table +DEBUG: join pushdown on reference table is not supported for the used query + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate (actual rows=2 loops=1) + Output: metric_name.name, max(metric.value), count(*) + Group Key: metric_name.name + -> Sort (actual rows=4 loops=1) + Output: metric_name.name, metric.value + Sort Key: metric_name.name + Sort Method: quicksort + -> Nested Loop (actual rows=4 loops=1) + Output: metric_name.name, metric.value + Join Filter: texteq(concat('cpu', textin(int4out(metric.id))), metric_name.name) + Rows Removed by Join Filter: 4 + -> Custom Scan (AsyncAppend) (actual rows=4 loops=1) + Output: metric.value, metric.id + -> Append (actual rows=4 loops=1) + -> Custom Scan (DataNodeScan) on public.metric metric_1 (actual rows=3 loops=1) + Output: metric_1.value, metric_1.id + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT id, value FROM public.metric WHERE _timescaledb_functions.chunks_in(public.metric.*, ARRAY[1, 2, 3]) + -> Custom Scan (DataNodeScan) on public.metric metric_2 (actual rows=1 loops=1) + Output: metric_2.value, metric_2.id + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT id, value FROM public.metric WHERE _timescaledb_functions.chunks_in(public.metric.*, ARRAY[1]) + -> Materialize (actual rows=2 loops=4) + Output: metric_name.name + -> Seq Scan on public.metric_name (actual rows=2 loops=1) + Output: metric_name.name +(30 rows) + +------- +-- MERGE is supported in PG >= 15. Currently, it is not supported in TimescaleDB +-- on distributed hypertables. Perform a MERGE here to check if the join pushdown +-- can handle the MERGE command properly. ON_ERROR_STOP is disabled for this test. +-- Older PostgreSQL versions report an error because MERGE is not supported. This +-- will be ignored due to the setting. +-- Commenting below test as error meesage is different on windows vs unix. +-- Issue #5725 is opened to track it. +------- +-- \set ON_ERROR_STOP 0 +-- MERGE INTO metric as target_0 +-- USING metric as input_0 +-- inner join (select id from metric_name as input_1) as subq_0 +-- ON (TRUE) +-- ON target_0.id = input_0.id +-- WHEN MATCHED +-- THEN DO NOTHING +-- WHEN NOT MATCHED +-- THEN DO NOTHING; +-- \set ON_ERROR_STOP 1 +------- +-- Tests without enable_per_data_node_queries (no pushdown supported) +------- +SET timescaledb.enable_per_data_node_queries = false; +LOG: statement: SET timescaledb.enable_per_data_node_queries = false; +:PREFIX +SELECT * FROM metric LEFT JOIN metric_name USING (id); +LOG: statement: EXPLAIN (analyze, verbose, costs off, timing off, summary off) +SELECT * FROM metric LEFT JOIN metric_name USING (id); +DEBUG: join on reference table is not considered to be pushed down because 'enable_per_data_node_queries' GUC is disabled + QUERY PLAN +------------------------------------------------------------------------------------------------------------- + Nested Loop Left Join (actual rows=4 loops=1) + Output: _dist_hyper_1_1_chunk.id, _dist_hyper_1_1_chunk.ts, _dist_hyper_1_1_chunk.value, metric_name.name + Inner Unique: true + Join Filter: (_dist_hyper_1_1_chunk.id = metric_name.id) + Rows Removed by Join Filter: 1 + -> Append (actual rows=4 loops=1) + -> Foreign Scan on _timescaledb_internal._dist_hyper_1_1_chunk (actual rows=1 loops=1) + Output: _dist_hyper_1_1_chunk.id, _dist_hyper_1_1_chunk.ts, _dist_hyper_1_1_chunk.value + Data node: db_dist_ref_table_join_1 + Fetcher Type: Cursor + Remote SQL: SELECT ts, id, value FROM _timescaledb_internal._dist_hyper_1_1_chunk + -> Foreign Scan on _timescaledb_internal._dist_hyper_1_2_chunk (actual rows=1 loops=1) + Output: _dist_hyper_1_2_chunk.id, _dist_hyper_1_2_chunk.ts, _dist_hyper_1_2_chunk.value + Data node: db_dist_ref_table_join_1 + Fetcher Type: Cursor + Remote SQL: SELECT ts, id, value FROM _timescaledb_internal._dist_hyper_1_2_chunk + -> Foreign Scan on _timescaledb_internal._dist_hyper_1_3_chunk (actual rows=1 loops=1) + Output: _dist_hyper_1_3_chunk.id, _dist_hyper_1_3_chunk.ts, _dist_hyper_1_3_chunk.value + Data node: db_dist_ref_table_join_1 + Fetcher Type: Cursor + Remote SQL: SELECT ts, id, value FROM _timescaledb_internal._dist_hyper_1_3_chunk + -> Foreign Scan on _timescaledb_internal._dist_hyper_1_4_chunk (actual rows=1 loops=1) + Output: _dist_hyper_1_4_chunk.id, _dist_hyper_1_4_chunk.ts, _dist_hyper_1_4_chunk.value + Data node: db_dist_ref_table_join_2 + Fetcher Type: Cursor + Remote SQL: SELECT ts, id, value FROM _timescaledb_internal._dist_hyper_1_4_chunk + -> Materialize (actual rows=1 loops=4) + Output: metric_name.name, metric_name.id + -> Seq Scan on public.metric_name (actual rows=2 loops=1) + Output: metric_name.name, metric_name.id +(30 rows) + +SET timescaledb.enable_per_data_node_queries = true; +LOG: statement: SET timescaledb.enable_per_data_node_queries = true; +------- +-- Tests with empty reftable +------- +RESET client_min_messages; +LOG: statement: RESET client_min_messages; +TRUNCATE metric_name; +CALL distributed_exec($$TRUNCATE metric_name;$$); +-- Left join +SELECT * FROM metric LEFT JOIN metric_name USING (id); + id | ts | value | name +----+------------------------------+-------+------ + 1 | Tue Feb 01 15:02:02 2022 PST | 50 | + 1 | Tue Dec 31 14:01:01 2019 PST | 60 | + 1 | Thu Mar 02 16:03:03 2000 PST | 70 | + 2 | Mon Apr 03 18:04:03 2000 PDT | 80 | +(4 rows) + +:PREFIX +SELECT * FROM metric LEFT JOIN metric_name USING (id); + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Custom Scan (AsyncAppend) (actual rows=4 loops=1) + Output: metric.id, metric.ts, metric.value, metric_name.name + -> Append (actual rows=4 loops=1) + -> Custom Scan (DataNodeScan) (actual rows=3 loops=1) + Output: metric_1.id, metric_1.ts, metric_1.value, metric_name.name + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT r8.id, r8.ts, r8.value, r2.name FROM (public.metric r8 LEFT JOIN public.metric_name r2 ON (((r8.id = r2.id)))) WHERE _timescaledb_functions.chunks_in(r8, ARRAY[1, 2, 3]) + -> Custom Scan (DataNodeScan) (actual rows=1 loops=1) + Output: metric_2.id, metric_2.ts, metric_2.value, metric_name.name + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT r9.id, r9.ts, r9.value, r2.name FROM (public.metric r9 LEFT JOIN public.metric_name r2 ON (((r9.id = r2.id)))) WHERE _timescaledb_functions.chunks_in(r9, ARRAY[1]) +(15 rows) + +-- Inner join +SELECT * FROM metric JOIN metric_name USING (id); + id | ts | value | name +----+----+-------+------ +(0 rows) + +:PREFIX +SELECT * FROM metric JOIN metric_name USING (id); + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) (actual rows=0 loops=1) + Output: metric.id, metric.ts, metric.value, metric_name.name + -> Append (actual rows=0 loops=1) + -> Custom Scan (DataNodeScan) (actual rows=0 loops=1) + Output: metric_1.id, metric_1.ts, metric_1.value, metric_name.name + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT r8.id, r8.ts, r8.value, r2.name FROM (public.metric r8 INNER JOIN public.metric_name r2 ON (((r8.id = r2.id)))) WHERE _timescaledb_functions.chunks_in(r8, ARRAY[1, 2, 3]) + -> Custom Scan (DataNodeScan) (actual rows=0 loops=1) + Output: metric_2.id, metric_2.ts, metric_2.value, metric_name.name + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT r9.id, r9.ts, r9.value, r2.name FROM (public.metric r9 INNER JOIN public.metric_name r2 ON (((r9.id = r2.id)))) WHERE _timescaledb_functions.chunks_in(r9, ARRAY[1]) +(15 rows) + +-- Filter on the NULL column +:PREFIX +SELECT * FROM metric LEFT JOIN metric_name USING (id) WHERE name IS NOT NULL; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) (actual rows=0 loops=1) + Output: metric.id, metric.ts, metric.value, metric_name.name + -> Append (actual rows=0 loops=1) + -> Custom Scan (DataNodeScan) (actual rows=0 loops=1) + Output: metric_1.id, metric_1.ts, metric_1.value, metric_name.name + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT r8.id, r8.ts, r8.value, r2.name FROM (public.metric r8 INNER JOIN public.metric_name r2 ON (((r8.id = r2.id)) AND ((r2.name IS NOT NULL)))) WHERE _timescaledb_functions.chunks_in(r8, ARRAY[1, 2, 3]) + -> Custom Scan (DataNodeScan) (actual rows=0 loops=1) + Output: metric_2.id, metric_2.ts, metric_2.value, metric_name.name + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT r9.id, r9.ts, r9.value, r2.name FROM (public.metric r9 INNER JOIN public.metric_name r2 ON (((r9.id = r2.id)) AND ((r2.name IS NOT NULL)))) WHERE _timescaledb_functions.chunks_in(r9, ARRAY[1]) +(15 rows) + +:PREFIX +SELECT * FROM metric LEFT JOIN metric_name USING (id) WHERE name = 'cpu1'; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (AsyncAppend) (actual rows=0 loops=1) + Output: metric.id, metric.ts, metric.value, metric_name.name + -> Append (actual rows=0 loops=1) + -> Custom Scan (DataNodeScan) (actual rows=0 loops=1) + Output: metric_1.id, metric_1.ts, metric_1.value, metric_name.name + Data node: db_dist_ref_table_join_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk + Remote SQL: SELECT r8.id, r8.ts, r8.value, r2.name FROM (public.metric r8 INNER JOIN public.metric_name r2 ON (((r8.id = r2.id)) AND ((r2.name = 'cpu1'::text)))) WHERE _timescaledb_functions.chunks_in(r8, ARRAY[1, 2, 3]) + -> Custom Scan (DataNodeScan) (actual rows=0 loops=1) + Output: metric_2.id, metric_2.ts, metric_2.value, metric_name.name + Data node: db_dist_ref_table_join_2 + Fetcher Type: COPY + Chunks: _dist_hyper_1_4_chunk + Remote SQL: SELECT r9.id, r9.ts, r9.value, r2.name FROM (public.metric r9 INNER JOIN public.metric_name r2 ON (((r9.id = r2.id)) AND ((r2.name = 'cpu1'::text)))) WHERE _timescaledb_functions.chunks_in(r9, ARRAY[1]) +(15 rows) + +------- +-- Drop reftable on DNs and check proper error reporting +------- +\set ON_ERROR_STOP 0 +CALL distributed_exec($$DROP table metric_name;$$); +SELECT * FROM metric LEFT JOIN metric_name USING (id); +ERROR: [db_dist_ref_table_join_1]: relation "public.metric_name" does not exist diff --git a/tsl/test/expected/dist_remote_error-16.out b/tsl/test/expected/dist_remote_error-16.out new file mode 100644 index 00000000000..c32fbd0e2e3 --- /dev/null +++ b/tsl/test/expected/dist_remote_error-16.out @@ -0,0 +1,433 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +--\set DATA_NODE_1 data_node_1 +--\set DATA_NODE_2 data_node_2 +--\set DATA_NODE_3 data_node_3 +-- Set up the data nodes. +\set DATA_NODE_1 :TEST_DBNAME _1 +\set DATA_NODE_2 :TEST_DBNAME _2 +\set DATA_NODE_3 :TEST_DBNAME _3 +\c :TEST_DBNAME :ROLE_SUPERUSER +SELECT node_name, database, node_created, database_created, extension_created +FROM ( + SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* + FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) +) a; + node_name | database | node_created | database_created | extension_created +------------------------+------------------------+--------------+------------------+------------------- + db_dist_remote_error_1 | db_dist_remote_error_1 | t | t | t + db_dist_remote_error_2 | db_dist_remote_error_2 | t | t | t + db_dist_remote_error_3 | db_dist_remote_error_3 | t | t | t +(3 rows) + +GRANT USAGE ON FOREIGN SERVER :DATA_NODE_1, :DATA_NODE_2, :DATA_NODE_3 TO PUBLIC; +GRANT CREATE ON SCHEMA public TO :ROLE_1; +-- Import setup file to data nodes. +\unset ECHO +-- Disable SSL to get stable error output across versions. SSL adds some output +-- that changed in PG 14. +set timescaledb.debug_enable_ssl to off; +set client_min_messages to error; +SET timescaledb.hide_data_node_name_in_errors = 'on'; +-- A relatively big table on one data node +CREATE TABLE metrics_dist_remote_error(filler_1 int, filler_2 int, filler_3 int, time timestamptz NOT NULL, device_id int, v0 int, v1 int, v2 float, v3 float); +SELECT create_distributed_hypertable('metrics_dist_remote_error','time','device_id',3, + data_nodes => ARRAY[:'DATA_NODE_1']); + create_distributed_hypertable +---------------------------------------- + (1,public,metrics_dist_remote_error,t) +(1 row) + +ALTER TABLE metrics_dist_remote_error DROP COLUMN filler_1; +INSERT INTO metrics_dist_remote_error(time,device_id,v0,v1,v2,v3) SELECT time, device_id, device_id+1, device_id + 2, device_id + 0.5, NULL FROM generate_series('2000-01-01 0:00:00+0'::timestamptz,'2000-01-05 23:55:00+0','6m') gtime(time), generate_series(1,5,1) gdevice(device_id); +ALTER TABLE metrics_dist_remote_error DROP COLUMN filler_2; +INSERT INTO metrics_dist_remote_error(time,device_id,v0,v1,v2,v3) SELECT time, device_id, device_id+1, device_id + 2, device_id + 0.5, NULL FROM generate_series('2000-01-06 0:00:00+0'::timestamptz,'2000-01-12 23:55:00+0','6m') gtime(time), generate_series(1,5,1) gdevice(device_id); +ALTER TABLE metrics_dist_remote_error DROP COLUMN filler_3; +INSERT INTO metrics_dist_remote_error(time,device_id,v0,v1,v2,v3) SELECT time, device_id, device_id+1, device_id + 2, device_id + 0.5, NULL FROM generate_series('2000-01-13 0:00:00+0'::timestamptz,'2000-01-19 23:55:00+0','6m') gtime(time), generate_series(1,5,1) gdevice(device_id); +ANALYZE metrics_dist_remote_error; +-- The error messages vary wildly between the Postgres versions, dependent on +-- the particular behavior of libqp in this or that case. The purpose of this +-- test is not to solidify this accidental behavior, but to merely exercise the +-- error handling code to make sure it doesn't have fatal errors. Unfortunately, +-- there is no way to suppress error output from a psql script. +set client_min_messages to ERROR; +\set ON_ERROR_STOP off +set timescaledb.remote_data_fetcher = 'copy'; +explain (analyze, verbose, costs off, timing off, summary off) +select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(0, device_id)::int != 0; +ERROR: []: debug point: requested to error out after 0 rows, 0 rows seen +explain (analyze, verbose, costs off, timing off, summary off) +select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(1, device_id)::int != 0; +ERROR: []: debug point: requested to error out after 1 rows, 1 rows seen +explain (analyze, verbose, costs off, timing off, summary off) +select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(2, device_id)::int != 0; +ERROR: []: debug point: requested to error out after 2 rows, 2 rows seen +explain (analyze, verbose, costs off, timing off, summary off) +select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(701, device_id)::int != 0; +ERROR: []: debug point: requested to error out after 701 rows, 701 rows seen +explain (analyze, verbose, costs off, timing off, summary off) +select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(10000, device_id)::int != 0; +ERROR: []: debug point: requested to error out after 10000 rows, 10000 rows seen +explain (analyze, verbose, costs off, timing off, summary off) +select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(16384, device_id)::int != 0; +ERROR: []: debug point: requested to error out after 16384 rows, 16384 rows seen +explain (analyze, verbose, costs off, timing off, summary off) +select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(10000000, device_id)::int != 0; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=22799 loops=1) + Output: 1 + -> Custom Scan (DataNodeScan) on public.metrics_dist_remote_error (actual rows=22799 loops=1) + Data node: db_dist_remote_error_1 + Fetcher Type: COPY + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_9_chunk + Remote SQL: SELECT NULL FROM public.metrics_dist_remote_error WHERE _timescaledb_functions.chunks_in(public.metrics_dist_remote_error.*, ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9]) AND ((public.ts_debug_shippable_error_after_n_rows(10000000, device_id) <> 0)) +(7 rows) + +-- We don't test fatal errors here, because PG versions before 14 are unable to +-- report them properly to the access node, so we get different errors in these +-- versions. +-- Now test the same with the cursor fetcher. +set timescaledb.remote_data_fetcher = 'cursor'; +explain (analyze, verbose, costs off, timing off, summary off) +select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(0, device_id)::int != 0; +ERROR: []: debug point: requested to error out after 0 rows, 0 rows seen +explain (analyze, verbose, costs off, timing off, summary off) +select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(1, device_id)::int != 0; +ERROR: []: debug point: requested to error out after 1 rows, 1 rows seen +explain (analyze, verbose, costs off, timing off, summary off) +select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(2, device_id)::int != 0; +ERROR: []: debug point: requested to error out after 2 rows, 2 rows seen +explain (analyze, verbose, costs off, timing off, summary off) +select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(701, device_id)::int != 0; +ERROR: []: debug point: requested to error out after 701 rows, 701 rows seen +explain (analyze, verbose, costs off, timing off, summary off) +select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(10000, device_id)::int != 0; +ERROR: []: debug point: requested to error out after 10000 rows, 10000 rows seen +explain (analyze, verbose, costs off, timing off, summary off) +select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(10000000, device_id)::int != 0; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=22799 loops=1) + Output: 1 + -> Custom Scan (DataNodeScan) on public.metrics_dist_remote_error (actual rows=22799 loops=1) + Data node: db_dist_remote_error_1 + Fetcher Type: Cursor + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_9_chunk + Remote SQL: SELECT NULL FROM public.metrics_dist_remote_error WHERE _timescaledb_functions.chunks_in(public.metrics_dist_remote_error.*, ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9]) AND ((public.ts_debug_shippable_error_after_n_rows(10000000, device_id) <> 0)) +(7 rows) + +-- Now test the same with the prepared statement fetcher. +set timescaledb.remote_data_fetcher = 'prepared'; +explain (analyze, verbose, costs off, timing off, summary off) +select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(0, device_id)::int != 0; +ERROR: []: debug point: requested to error out after 0 rows, 0 rows seen +explain (analyze, verbose, costs off, timing off, summary off) +select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(1, device_id)::int != 0; +ERROR: []: debug point: requested to error out after 1 rows, 1 rows seen +explain (analyze, verbose, costs off, timing off, summary off) +select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(2, device_id)::int != 0; +ERROR: []: debug point: requested to error out after 2 rows, 2 rows seen +explain (analyze, verbose, costs off, timing off, summary off) +select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(701, device_id)::int != 0; +ERROR: []: debug point: requested to error out after 701 rows, 701 rows seen +explain (analyze, verbose, costs off, timing off, summary off) +select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(10000, device_id)::int != 0; +ERROR: []: debug point: requested to error out after 10000 rows, 10000 rows seen +explain (analyze, verbose, costs off, timing off, summary off) +select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(10000000, device_id)::int != 0; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=22799 loops=1) + Output: 1 + -> Custom Scan (DataNodeScan) on public.metrics_dist_remote_error (actual rows=22799 loops=1) + Data node: db_dist_remote_error_1 + Fetcher Type: Prepared statement + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_9_chunk + Remote SQL: SELECT NULL FROM public.metrics_dist_remote_error WHERE _timescaledb_functions.chunks_in(public.metrics_dist_remote_error.*, ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9]) AND ((public.ts_debug_shippable_error_after_n_rows(10000000, device_id) <> 0)) +(7 rows) + +reset timescaledb.remote_data_fetcher; +-- Table with broken send for a data type. +create table metrics_dist_bs(like metrics_dist_remote_error); +alter table metrics_dist_bs alter column v0 type bs; +select table_name from create_distributed_hypertable('metrics_dist_bs', + 'time', 'device_id'); + table_name +----------------- + metrics_dist_bs +(1 row) + +set timescaledb.enable_connection_binary_data to off; +insert into metrics_dist_bs + select * from metrics_dist_remote_error; +set timescaledb.enable_connection_binary_data to on; +explain (analyze, verbose, costs off, timing off, summary off) +select * from metrics_dist_bs; +ERROR: []: debug point: requested to error out after 7103 rows, 7103 rows seen +drop table metrics_dist_bs; +-- Table with broken receive for a data type. +create table metrics_dist_br(like metrics_dist_remote_error); +alter table metrics_dist_br alter column v0 type br; +select table_name from create_distributed_hypertable('metrics_dist_br', + 'time', 'device_id'); + table_name +----------------- + metrics_dist_br +(1 row) + +select hypertable_name, replication_factor from timescaledb_information.hypertables +where hypertable_name = 'metrics_dist_br'; + hypertable_name | replication_factor +-----------------+-------------------- + metrics_dist_br | 1 +(1 row) + +-- Test that INSERT and COPY fail on data nodes. +-- Note that we use the text format for the COPY input, so that the access node +-- doesn't call `recv` and fail by itself. It's going to use binary format for +-- transfer to data nodes regardless of the input format. +set timescaledb.dist_copy_transfer_format = 'binary'; +-- First, create the reference. +\copy (select * from metrics_dist_remote_error) to 'dist_remote_error.text' with (format text); +-- We have to test various interleavings of COPY and INSERT to check that +-- one can recover from connection failure states introduced by another. +\copy metrics_dist_br from 'dist_remote_error.text' with (format text); +ERROR: []: debug point: requested to error out after 7103 rows, 7103 rows seen +\copy metrics_dist_br from 'dist_remote_error.text' with (format text); +ERROR: []: debug point: requested to error out after 7103 rows, 7103 rows seen +insert into metrics_dist_br select * from metrics_dist_remote_error; +ERROR: []: debug point: requested to error out after 7103 rows, 7103 rows seen +insert into metrics_dist_br select * from metrics_dist_remote_error; +ERROR: []: debug point: requested to error out after 7103 rows, 7103 rows seen +\copy metrics_dist_br from 'dist_remote_error.text' with (format text); +ERROR: []: debug point: requested to error out after 7103 rows, 7103 rows seen +-- Fail at different points +set timescaledb.debug_broken_sendrecv_error_after = 1; +\copy metrics_dist_br from 'dist_remote_error.text' with (format text); +ERROR: []: debug point: requested to error out after 1 rows, 1 rows seen +set timescaledb.debug_broken_sendrecv_error_after = 2; +\copy metrics_dist_br from 'dist_remote_error.text' with (format text); +ERROR: []: debug point: requested to error out after 2 rows, 2 rows seen +set timescaledb.debug_broken_sendrecv_error_after = 1023; +\copy metrics_dist_br from 'dist_remote_error.text' with (format text); +ERROR: []: debug point: requested to error out after 1023 rows, 1023 rows seen +set timescaledb.debug_broken_sendrecv_error_after = 1024; +\copy metrics_dist_br from 'dist_remote_error.text' with (format text); +ERROR: []: debug point: requested to error out after 1024 rows, 1024 rows seen +set timescaledb.debug_broken_sendrecv_error_after = 1025; +\copy metrics_dist_br from 'dist_remote_error.text' with (format text); +ERROR: []: debug point: requested to error out after 1025 rows, 1025 rows seen +reset timescaledb.debug_broken_sendrecv_error_after; +-- Same with different replication factor +truncate metrics_dist_br; +select set_replication_factor('metrics_dist_br', 2); + set_replication_factor +------------------------ + +(1 row) + +select hypertable_name, replication_factor from timescaledb_information.hypertables +where hypertable_name = 'metrics_dist_br'; + hypertable_name | replication_factor +-----------------+-------------------- + metrics_dist_br | 2 +(1 row) + +\copy metrics_dist_br from 'dist_remote_error.text' with (format text); +ERROR: []: debug point: requested to error out after 7103 rows, 7103 rows seen +\copy metrics_dist_br from 'dist_remote_error.text' with (format text); +ERROR: []: debug point: requested to error out after 7103 rows, 7103 rows seen +insert into metrics_dist_br select * from metrics_dist_remote_error; +ERROR: []: debug point: requested to error out after 7103 rows, 7103 rows seen +insert into metrics_dist_br select * from metrics_dist_remote_error; +ERROR: []: debug point: requested to error out after 7103 rows, 7103 rows seen +set timescaledb.debug_broken_sendrecv_error_after = 1; +\copy metrics_dist_br from 'dist_remote_error.text' with (format text); +ERROR: []: debug point: requested to error out after 1 rows, 1 rows seen +set timescaledb.debug_broken_sendrecv_error_after = 2; +\copy metrics_dist_br from 'dist_remote_error.text' with (format text); +ERROR: []: debug point: requested to error out after 2 rows, 2 rows seen +set timescaledb.debug_broken_sendrecv_error_after = 1023; +\copy metrics_dist_br from 'dist_remote_error.text' with (format text); +ERROR: []: debug point: requested to error out after 1023 rows, 1023 rows seen +set timescaledb.debug_broken_sendrecv_error_after = 1024; +\copy metrics_dist_br from 'dist_remote_error.text' with (format text); +ERROR: []: debug point: requested to error out after 1024 rows, 1024 rows seen +set timescaledb.debug_broken_sendrecv_error_after = 1025; +\copy metrics_dist_br from 'dist_remote_error.text' with (format text); +ERROR: []: debug point: requested to error out after 1025 rows, 1025 rows seen +-- Should succeed with text format for data transfer. +set timescaledb.dist_copy_transfer_format = 'text'; +\copy metrics_dist_br from 'dist_remote_error.text' with (format text); +-- Final check. +set timescaledb.enable_connection_binary_data = false; +select count(*) from metrics_dist_br; + count +------- + 22800 +(1 row) + +set timescaledb.enable_connection_binary_data = true; +reset timescaledb.debug_broken_sendrecv_error_after; +drop table metrics_dist_br; +-- Table with sleepy receive for a data type, to improve coverage of the waiting +-- code on the access node. +create table metrics_dist_sr(like metrics_dist_remote_error); +alter table metrics_dist_sr alter column v0 type sr; +select table_name from create_distributed_hypertable('metrics_dist_sr', + 'time', 'device_id'); + table_name +----------------- + metrics_dist_sr +(1 row) + +-- We're using sleepy recv function, so need the binary transfer format for it +-- to be called on the data nodes. +set timescaledb.dist_copy_transfer_format = 'binary'; +-- Test INSERT and COPY with slow data node. +\copy metrics_dist_sr from 'dist_remote_error.text' with (format text); +insert into metrics_dist_sr select * from metrics_dist_remote_error; +select count(*) from metrics_dist_sr; + count +------- + 45600 +(1 row) + +drop table metrics_dist_sr; +-- Table with sleepy send for a data type, on one data node, to improve coverage +-- of waiting in data fetchers. +create table metrics_dist_ss(like metrics_dist_remote_error); +alter table metrics_dist_ss alter column v0 type ss; +select table_name from create_distributed_hypertable('metrics_dist_ss', + 'time', 'device_id'); + table_name +----------------- + metrics_dist_ss +(1 row) + +-- Populate the table, using text COPY to avoid the sleepy stuff. +set timescaledb.dist_copy_transfer_format = 'text'; +\copy metrics_dist_ss from 'dist_remote_error.text' with (format text); +-- We're using sleepy send function, so need the binary transfer format for it +-- to be called on the data nodes. +set timescaledb.enable_connection_binary_data = true; +set timescaledb.remote_data_fetcher = 'prepared'; +explain (analyze, verbose, costs off, timing off, summary off) +select * from metrics_dist_ss; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Custom Scan (AsyncAppend) (actual rows=22800 loops=1) + Output: metrics_dist_ss."time", metrics_dist_ss.device_id, metrics_dist_ss.v0, metrics_dist_ss.v1, metrics_dist_ss.v2, metrics_dist_ss.v3 + -> Append (actual rows=22800 loops=1) + -> Custom Scan (DataNodeScan) on public.metrics_dist_ss metrics_dist_ss_1 (actual rows=4560 loops=1) + Output: metrics_dist_ss_1."time", metrics_dist_ss_1.device_id, metrics_dist_ss_1.v0, metrics_dist_ss_1.v1, metrics_dist_ss_1.v2, metrics_dist_ss_1.v3 + Data node: db_dist_remote_error_1 + Fetcher Type: Prepared statement + Chunks: _dist_hyper_5_123_chunk, _dist_hyper_5_126_chunk, _dist_hyper_5_129_chunk + Remote SQL: SELECT "time", device_id, v0, v1, v2, v3 FROM public.metrics_dist_ss WHERE _timescaledb_functions.chunks_in(public.metrics_dist_ss.*, ARRAY[57, 58, 59]) + -> Custom Scan (DataNodeScan) on public.metrics_dist_ss metrics_dist_ss_2 (actual rows=13680 loops=1) + Output: metrics_dist_ss_2."time", metrics_dist_ss_2.device_id, metrics_dist_ss_2.v0, metrics_dist_ss_2.v1, metrics_dist_ss_2.v2, metrics_dist_ss_2.v3 + Data node: db_dist_remote_error_2 + Fetcher Type: Prepared statement + Chunks: _dist_hyper_5_124_chunk, _dist_hyper_5_127_chunk, _dist_hyper_5_130_chunk + Remote SQL: SELECT "time", device_id, v0, v1, v2, v3 FROM public.metrics_dist_ss WHERE _timescaledb_functions.chunks_in(public.metrics_dist_ss.*, ARRAY[49, 50, 51]) + -> Custom Scan (DataNodeScan) on public.metrics_dist_ss metrics_dist_ss_3 (actual rows=4560 loops=1) + Output: metrics_dist_ss_3."time", metrics_dist_ss_3.device_id, metrics_dist_ss_3.v0, metrics_dist_ss_3.v1, metrics_dist_ss_3.v2, metrics_dist_ss_3.v3 + Data node: db_dist_remote_error_3 + Fetcher Type: Prepared statement + Chunks: _dist_hyper_5_125_chunk, _dist_hyper_5_128_chunk, _dist_hyper_5_131_chunk + Remote SQL: SELECT "time", device_id, v0, v1, v2, v3 FROM public.metrics_dist_ss WHERE _timescaledb_functions.chunks_in(public.metrics_dist_ss.*, ARRAY[38, 39, 40]) +(21 rows) + +set timescaledb.remote_data_fetcher = 'copy'; +explain (analyze, verbose, costs off, timing off, summary off) +select * from metrics_dist_ss; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Custom Scan (AsyncAppend) (actual rows=22800 loops=1) + Output: metrics_dist_ss."time", metrics_dist_ss.device_id, metrics_dist_ss.v0, metrics_dist_ss.v1, metrics_dist_ss.v2, metrics_dist_ss.v3 + -> Append (actual rows=22800 loops=1) + -> Custom Scan (DataNodeScan) on public.metrics_dist_ss metrics_dist_ss_1 (actual rows=4560 loops=1) + Output: metrics_dist_ss_1."time", metrics_dist_ss_1.device_id, metrics_dist_ss_1.v0, metrics_dist_ss_1.v1, metrics_dist_ss_1.v2, metrics_dist_ss_1.v3 + Data node: db_dist_remote_error_1 + Fetcher Type: COPY + Chunks: _dist_hyper_5_123_chunk, _dist_hyper_5_126_chunk, _dist_hyper_5_129_chunk + Remote SQL: SELECT "time", device_id, v0, v1, v2, v3 FROM public.metrics_dist_ss WHERE _timescaledb_functions.chunks_in(public.metrics_dist_ss.*, ARRAY[57, 58, 59]) + -> Custom Scan (DataNodeScan) on public.metrics_dist_ss metrics_dist_ss_2 (actual rows=13680 loops=1) + Output: metrics_dist_ss_2."time", metrics_dist_ss_2.device_id, metrics_dist_ss_2.v0, metrics_dist_ss_2.v1, metrics_dist_ss_2.v2, metrics_dist_ss_2.v3 + Data node: db_dist_remote_error_2 + Fetcher Type: COPY + Chunks: _dist_hyper_5_124_chunk, _dist_hyper_5_127_chunk, _dist_hyper_5_130_chunk + Remote SQL: SELECT "time", device_id, v0, v1, v2, v3 FROM public.metrics_dist_ss WHERE _timescaledb_functions.chunks_in(public.metrics_dist_ss.*, ARRAY[49, 50, 51]) + -> Custom Scan (DataNodeScan) on public.metrics_dist_ss metrics_dist_ss_3 (actual rows=4560 loops=1) + Output: metrics_dist_ss_3."time", metrics_dist_ss_3.device_id, metrics_dist_ss_3.v0, metrics_dist_ss_3.v1, metrics_dist_ss_3.v2, metrics_dist_ss_3.v3 + Data node: db_dist_remote_error_3 + Fetcher Type: COPY + Chunks: _dist_hyper_5_125_chunk, _dist_hyper_5_128_chunk, _dist_hyper_5_131_chunk + Remote SQL: SELECT "time", device_id, v0, v1, v2, v3 FROM public.metrics_dist_ss WHERE _timescaledb_functions.chunks_in(public.metrics_dist_ss.*, ARRAY[38, 39, 40]) +(21 rows) + +set timescaledb.remote_data_fetcher = 'cursor'; +explain (analyze, verbose, costs off, timing off, summary off) +select * from metrics_dist_ss; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Custom Scan (AsyncAppend) (actual rows=22800 loops=1) + Output: metrics_dist_ss."time", metrics_dist_ss.device_id, metrics_dist_ss.v0, metrics_dist_ss.v1, metrics_dist_ss.v2, metrics_dist_ss.v3 + -> Append (actual rows=22800 loops=1) + -> Custom Scan (DataNodeScan) on public.metrics_dist_ss metrics_dist_ss_1 (actual rows=4560 loops=1) + Output: metrics_dist_ss_1."time", metrics_dist_ss_1.device_id, metrics_dist_ss_1.v0, metrics_dist_ss_1.v1, metrics_dist_ss_1.v2, metrics_dist_ss_1.v3 + Data node: db_dist_remote_error_1 + Fetcher Type: Cursor + Chunks: _dist_hyper_5_123_chunk, _dist_hyper_5_126_chunk, _dist_hyper_5_129_chunk + Remote SQL: SELECT "time", device_id, v0, v1, v2, v3 FROM public.metrics_dist_ss WHERE _timescaledb_functions.chunks_in(public.metrics_dist_ss.*, ARRAY[57, 58, 59]) + -> Custom Scan (DataNodeScan) on public.metrics_dist_ss metrics_dist_ss_2 (actual rows=13680 loops=1) + Output: metrics_dist_ss_2."time", metrics_dist_ss_2.device_id, metrics_dist_ss_2.v0, metrics_dist_ss_2.v1, metrics_dist_ss_2.v2, metrics_dist_ss_2.v3 + Data node: db_dist_remote_error_2 + Fetcher Type: Cursor + Chunks: _dist_hyper_5_124_chunk, _dist_hyper_5_127_chunk, _dist_hyper_5_130_chunk + Remote SQL: SELECT "time", device_id, v0, v1, v2, v3 FROM public.metrics_dist_ss WHERE _timescaledb_functions.chunks_in(public.metrics_dist_ss.*, ARRAY[49, 50, 51]) + -> Custom Scan (DataNodeScan) on public.metrics_dist_ss metrics_dist_ss_3 (actual rows=4560 loops=1) + Output: metrics_dist_ss_3."time", metrics_dist_ss_3.device_id, metrics_dist_ss_3.v0, metrics_dist_ss_3.v1, metrics_dist_ss_3.v2, metrics_dist_ss_3.v3 + Data node: db_dist_remote_error_3 + Fetcher Type: Cursor + Chunks: _dist_hyper_5_125_chunk, _dist_hyper_5_128_chunk, _dist_hyper_5_131_chunk + Remote SQL: SELECT "time", device_id, v0, v1, v2, v3 FROM public.metrics_dist_ss WHERE _timescaledb_functions.chunks_in(public.metrics_dist_ss.*, ARRAY[38, 39, 40]) +(21 rows) + +-- Incorrect int output, to cover the error handling in tuplefactory. +create table metrics_dist_io(like metrics_dist_remote_error); +alter table metrics_dist_io alter column v0 type io; +select table_name from create_distributed_hypertable('metrics_dist_io', + 'time', 'device_id'); + table_name +----------------- + metrics_dist_io +(1 row) + +-- Populate the table, using binary COPY to avoid the broken in4out. +set timescaledb.enable_connection_binary_data = true; +set timescaledb.dist_copy_transfer_format = 'binary'; +\copy metrics_dist_io from 'dist_remote_error.text' with (format text); +-- For testing, force the text format to exerices our broken out function. +set timescaledb.enable_connection_binary_data = false; +set timescaledb.dist_copy_transfer_format = 'text'; +set timescaledb.remote_data_fetcher = 'prepared'; +explain (analyze, verbose, costs off, timing off, summary off) +select * from metrics_dist_io; +ERROR: invalid input syntax for type integer: "surprise" +set timescaledb.remote_data_fetcher = 'copy'; +explain (analyze, verbose, costs off, timing off, summary off) +select * from metrics_dist_io; +ERROR: cannot use COPY fetcher because some of the column types do not have binary serialization +set timescaledb.remote_data_fetcher = 'cursor'; +explain (analyze, verbose, costs off, timing off, summary off) +select * from metrics_dist_io; +ERROR: invalid input syntax for type integer: "surprise" +-- cleanup +\c :TEST_DBNAME :ROLE_SUPERUSER; +DROP DATABASE :DATA_NODE_1 WITH (FORCE); +DROP DATABASE :DATA_NODE_2 WITH (FORCE); +DROP DATABASE :DATA_NODE_3 WITH (FORCE); diff --git a/tsl/test/expected/jit-16.out b/tsl/test/expected/jit-16.out new file mode 100644 index 00000000000..516294a5496 --- /dev/null +++ b/tsl/test/expected/jit-16.out @@ -0,0 +1,223 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\set TEST_BASE_NAME jit +SELECT format('include/%s_load.sql', :'TEST_BASE_NAME') as "TEST_LOAD_NAME", + format('include/%s_query.sql', :'TEST_BASE_NAME') as "TEST_QUERY_NAME", + format('include/%s_cleanup.sql', :'TEST_BASE_NAME') as "TEST_CLEANUP_NAME", + format('%s/results/%s_results_optimized.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') as "TEST_RESULTS_OPTIMIZED", + format('%s/results/%s_results_unoptimized.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') as "TEST_RESULTS_UNOPTIMIZED" +\gset +SELECT format('\! diff -u --label "Unoptimized results" --label "Optimized results" %s %s', :'TEST_RESULTS_UNOPTIMIZED', :'TEST_RESULTS_OPTIMIZED') as "DIFF_CMD" +\gset +-- enable all jit optimizations +SET jit=on; +SET jit_above_cost=0; +SET jit_inline_above_cost=0; +SET jit_optimize_above_cost=0; +SET jit_tuple_deforming=on; +\ir :TEST_LOAD_NAME +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +CREATE TABLE jit_test(time timestamp NOT NULL, device int, temp float); +SELECT create_hypertable('jit_test', 'time'); +psql:include/jit_load.sql:6: WARNING: column type "timestamp without time zone" used for "time" does not follow best practices + create_hypertable +----------------------- + (1,public,jit_test,t) +(1 row) + +ALTER TABLE jit_test DROP COLUMN device; +CREATE TABLE jit_test_interval(id int NOT NULL, temp float); +SELECT create_hypertable('jit_test_interval', 'id', chunk_time_interval => 10); + create_hypertable +-------------------------------- + (2,public,jit_test_interval,t) +(1 row) + +CREATE TABLE jit_test_contagg ( + observation_time TIMESTAMPTZ NOT NULL, + device_id TEXT NOT NULL, + metric DOUBLE PRECISION NOT NULL, + PRIMARY KEY(observation_time, device_id) +); +SELECT table_name FROM create_hypertable('jit_test_contagg', 'observation_time'); + table_name +------------------ + jit_test_contagg +(1 row) + +CREATE MATERIALIZED VIEW jit_device_summary +WITH (timescaledb.continuous, timescaledb.materialized_only=false) +AS +SELECT + time_bucket('1 hour', observation_time) as bucket, + device_id, + avg(metric) as metric_avg, + max(metric)-min(metric) as metric_spread +FROM + jit_test_contagg +GROUP BY bucket, device_id WITH NO DATA; +INSERT INTO jit_test_contagg +SELECT ts, 'device_1', (EXTRACT(EPOCH FROM ts)) from generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '30 minutes') ts; +INSERT INTO jit_test_contagg +SELECT ts, 'device_2', (EXTRACT(EPOCH FROM ts)) from generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '30 minutes') ts; +CALL refresh_continuous_aggregate('jit_device_summary', NULL, NULL); +\set PREFIX 'EXPLAIN (VERBOSE, TIMING OFF, COSTS OFF, SUMMARY OFF)' +\ir :TEST_QUERY_NAME +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- github issue #1262 +-- +:PREFIX +INSERT INTO jit_test VALUES('2017-01-20T09:00:01', 22.5) RETURNING *; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) + Output: jit_test."time", jit_test.temp + -> Insert on public.jit_test + Output: jit_test."time", jit_test.temp + -> Custom Scan (ChunkDispatch) + Output: 'Fri Jan 20 09:00:01 2017'::timestamp without time zone, NULL::integer, '22.5'::double precision + -> Result + Output: 'Fri Jan 20 09:00:01 2017'::timestamp without time zone, NULL::integer, '22.5'::double precision +(8 rows) + +:PREFIX +INSERT INTO jit_test VALUES ('2017-01-20T09:00:02', 2), + ('2017-01-20T09:00:03', 5), + ('2017-01-20T09:00:04', 10); + QUERY PLAN +----------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Insert on public.jit_test + -> Custom Scan (ChunkDispatch) + Output: "*VALUES*".column1, NULL::integer, "*VALUES*".column2 + -> Values Scan on "*VALUES*" + Output: "*VALUES*".column1, NULL::integer, "*VALUES*".column2 +(6 rows) + +:PREFIX +SELECT * FROM jit_test WHERE temp > 5 and temp <= 10 ORDER BY time; + QUERY PLAN +-------------------------------- + Sort + Output: "time", temp + Sort Key: jit_test."time" + -> Result + Output: "time", temp + One-Time Filter: false +(6 rows) + +-- update with iteration over chunks +-- +:PREFIX +INSERT INTO jit_test_interval (SELECT x, x / 2.3 FROM generate_series(0, 100) x) RETURNING *; + QUERY PLAN +------------------------------------------------------------------------- + Custom Scan (HypertableModify) + Output: jit_test_interval.id, jit_test_interval.temp + -> Insert on public.jit_test_interval + Output: jit_test_interval.id, jit_test_interval.temp + -> Custom Scan (ChunkDispatch) + Output: x.x, ((((x.x)::numeric / 2.3))::double precision) + -> Function Scan on pg_catalog.generate_series x + Output: x.x, ((x.x)::numeric / 2.3) + Function Call: generate_series(0, 100) +(9 rows) + +:PREFIX +SELECT * FROM jit_test_interval WHERE id >= 23 and id < 73 ORDER BY id; + QUERY PLAN +---------------------------------- + Sort + Output: id, temp + Sort Key: jit_test_interval.id + -> Result + Output: id, temp + One-Time Filter: false +(6 rows) + +:PREFIX +UPDATE jit_test_interval SET temp = temp * 2.3 WHERE id >= 23 and id < 73; + QUERY PLAN +------------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) + -> Update on public.jit_test_interval + -> Index Scan using jit_test_interval_id_idx on public.jit_test_interval + Output: (temp * '2.3'::double precision), ctid + Index Cond: ((jit_test_interval.id >= 23) AND (jit_test_interval.id < 73)) +(5 rows) + +:PREFIX +SELECT * FROM jit_test_interval ORDER BY id; + QUERY PLAN +---------------------------------- + Sort + Output: id, temp + Sort Key: jit_test_interval.id + -> Result + Output: id, temp + One-Time Filter: false +(6 rows) + +:PREFIX +SELECT time_bucket(10, id), avg(temp) +FROM jit_test_interval +GROUP BY 1 +ORDER BY 1; + QUERY PLAN +----------------------------------------------------------- + GroupAggregate + Output: (time_bucket(10, id)), avg(temp) + Group Key: (time_bucket(10, jit_test_interval.id)) + -> Sort + Output: (time_bucket(10, id)), temp + Sort Key: (time_bucket(10, jit_test_interval.id)) + -> Result + Output: time_bucket(10, id), temp + One-Time Filter: false +(9 rows) + +-- test continuous aggregates usage with forced jit (based on continuous_aggs_usage.sql) +-- +:PREFIX +SELECT * FROM jit_device_summary WHERE metric_spread = 1800 ORDER BY bucket DESC, device_id LIMIT 10; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: _materialized_hypertable_4.bucket, _materialized_hypertable_4.device_id, _materialized_hypertable_4.metric_avg, _materialized_hypertable_4.metric_spread + -> Sort + Output: _materialized_hypertable_4.bucket, _materialized_hypertable_4.device_id, _materialized_hypertable_4.metric_avg, _materialized_hypertable_4.metric_spread + Sort Key: _materialized_hypertable_4.bucket DESC, _materialized_hypertable_4.device_id + -> Append + -> Custom Scan (ChunkAppend) on _timescaledb_internal._materialized_hypertable_4 + Output: _materialized_hypertable_4.bucket, _materialized_hypertable_4.device_id, _materialized_hypertable_4.metric_avg, _materialized_hypertable_4.metric_spread + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 0 + -> Index Scan using _hyper_4_6_chunk__materialized_hypertable_4_bucket_idx on _timescaledb_internal._hyper_4_6_chunk + Output: _hyper_4_6_chunk.bucket, _hyper_4_6_chunk.device_id, _hyper_4_6_chunk.metric_avg, _hyper_4_6_chunk.metric_spread + Index Cond: (_hyper_4_6_chunk.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(4)), '-infinity'::timestamp with time zone)) + Filter: (_hyper_4_6_chunk.metric_spread = '1800'::double precision) + -> HashAggregate + Output: (time_bucket('@ 1 hour'::interval, jit_test_contagg.observation_time)), jit_test_contagg.device_id, avg(jit_test_contagg.metric), (max(jit_test_contagg.metric) - min(jit_test_contagg.metric)) + Group Key: time_bucket('@ 1 hour'::interval, jit_test_contagg.observation_time), jit_test_contagg.device_id + Filter: ((max(jit_test_contagg.metric) - min(jit_test_contagg.metric)) = '1800'::double precision) + -> Result + Output: time_bucket('@ 1 hour'::interval, jit_test_contagg.observation_time), jit_test_contagg.device_id, jit_test_contagg.metric + -> Custom Scan (ChunkAppend) on public.jit_test_contagg + Output: jit_test_contagg.observation_time, jit_test_contagg.device_id, jit_test_contagg.metric + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 4 + -> Index Scan using _hyper_3_5_chunk_jit_test_contagg_observation_time_idx on _timescaledb_internal._hyper_3_5_chunk + Output: _hyper_3_5_chunk.observation_time, _hyper_3_5_chunk.device_id, _hyper_3_5_chunk.metric + Index Cond: (_hyper_3_5_chunk.observation_time >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(4)), '-infinity'::timestamp with time zone)) +(29 rows) + +-- generate the results into two different files +\set ECHO errors +--TEST END-- diff --git a/tsl/test/expected/merge_append_partially_compressed-16.out b/tsl/test/expected/merge_append_partially_compressed-16.out new file mode 100644 index 00000000000..1091b753905 --- /dev/null +++ b/tsl/test/expected/merge_append_partially_compressed-16.out @@ -0,0 +1,1425 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- this test checks the validity of the produced plans for partially compressed chunks +-- when injecting query_pathkeys on top of the append +-- path that combines the uncompressed and compressed parts of a chunk. +set timescaledb.enable_decompression_sorted_merge = off; +\set PREFIX 'EXPLAIN (analyze, costs off, timing off, summary off)' +CREATE TABLE ht_metrics_compressed(time timestamptz, device int, value float); +SELECT create_hypertable('ht_metrics_compressed','time'); +NOTICE: adding not-null constraint to column "time" + create_hypertable +------------------------------------ + (1,public,ht_metrics_compressed,t) +(1 row) + +ALTER TABLE ht_metrics_compressed SET (timescaledb.compress, timescaledb.compress_segmentby='device', timescaledb.compress_orderby='time'); +INSERT INTO ht_metrics_compressed +SELECT time, device, device * 0.1 +FROM generate_series('2020-01-02'::timestamptz,'2020-01-18'::timestamptz,'6 hour') time, +generate_series(1,3) device; +SELECT compress_chunk(c) FROM show_chunks('ht_metrics_compressed') c; + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk + _timescaledb_internal._hyper_1_2_chunk + _timescaledb_internal._hyper_1_3_chunk +(3 rows) + +-- make them partially compressed +INSERT INTO ht_metrics_compressed +SELECT time, device, device * 0.1 +FROM generate_series('2020-01-02'::timestamptz,'2020-01-18'::timestamptz,'9 hour') time, +generate_series(1,3) device; +-- chunkAppend eligible queries (from tsbench) +-- sort is not pushed down +:PREFIX SELECT * FROM ht_metrics_compressed ORDER BY time DESC, device LIMIT 1; + QUERY PLAN +---------------------------------------------------------------------------------------------------- + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on ht_metrics_compressed (actual rows=1 loops=1) + Order: ht_metrics_compressed."time" DESC, ht_metrics_compressed.device + -> Merge Append (actual rows=1 loops=1) + Sort Key: _hyper_1_3_chunk."time" DESC, _hyper_1_3_chunk.device + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_1_3_chunk."time" DESC, _hyper_1_3_chunk.device + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=30 loops=1) + -> Seq Scan on compress_hyper_2_6_chunk (actual rows=3 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_1_3_chunk."time" DESC, _hyper_1_3_chunk.device + Sort Method: top-N heapsort + -> Seq Scan on _hyper_1_3_chunk (actual rows=18 loops=1) + -> Merge Append (never executed) + Sort Key: _hyper_1_2_chunk."time" DESC, _hyper_1_2_chunk.device + -> Sort (never executed) + Sort Key: _hyper_1_2_chunk."time" DESC, _hyper_1_2_chunk.device + -> Custom Scan (DecompressChunk) on _hyper_1_2_chunk (never executed) + -> Seq Scan on compress_hyper_2_5_chunk (never executed) + -> Sort (never executed) + Sort Key: _hyper_1_2_chunk."time" DESC, _hyper_1_2_chunk.device + -> Seq Scan on _hyper_1_2_chunk (never executed) + -> Merge Append (never executed) + Sort Key: _hyper_1_1_chunk."time" DESC, _hyper_1_1_chunk.device + -> Sort (never executed) + Sort Key: _hyper_1_1_chunk."time" DESC, _hyper_1_1_chunk.device + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (never executed) + -> Seq Scan on compress_hyper_2_4_chunk (never executed) + -> Sort (never executed) + Sort Key: _hyper_1_1_chunk."time" DESC, _hyper_1_1_chunk.device + -> Seq Scan on _hyper_1_1_chunk (never executed) +(32 rows) + +:PREFIX SELECT * FROM ht_metrics_compressed ORDER BY time_bucket('1d', time) DESC, device LIMIT 1; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=1 loops=1) + -> Merge Append (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_1_1_chunk."time")) DESC, _hyper_1_1_chunk.device + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_1_1_chunk."time")) DESC, _hyper_1_1_chunk.device + Sort Method: top-N heapsort + -> Result (actual rows=81 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=81 loops=1) + -> Seq Scan on compress_hyper_2_4_chunk (actual rows=3 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_1_1_chunk."time")) DESC, _hyper_1_1_chunk.device + Sort Method: top-N heapsort + -> Seq Scan on _hyper_1_1_chunk (actual rows=54 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk."time")) DESC, _hyper_1_2_chunk.device + Sort Method: top-N heapsort + -> Result (actual rows=84 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_2_chunk (actual rows=84 loops=1) + -> Seq Scan on compress_hyper_2_5_chunk (actual rows=3 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk."time")) DESC, _hyper_1_2_chunk.device + Sort Method: top-N heapsort + -> Seq Scan on _hyper_1_2_chunk (actual rows=57 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_1_3_chunk."time")) DESC, _hyper_1_3_chunk.device + Sort Method: top-N heapsort + -> Result (actual rows=30 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=30 loops=1) + -> Seq Scan on compress_hyper_2_6_chunk (actual rows=3 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_1_3_chunk."time")) DESC, _hyper_1_3_chunk.device + Sort Method: top-N heapsort + -> Seq Scan on _hyper_1_3_chunk (actual rows=18 loops=1) +(33 rows) + +:PREFIX SELECT * FROM ht_metrics_compressed ORDER BY time desc limit 10; + QUERY PLAN +---------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Custom Scan (ChunkAppend) on ht_metrics_compressed (actual rows=10 loops=1) + Order: ht_metrics_compressed."time" DESC + -> Merge Append (actual rows=10 loops=1) + Sort Key: _hyper_1_3_chunk."time" DESC + -> Sort (actual rows=7 loops=1) + Sort Key: _hyper_1_3_chunk."time" DESC + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=30 loops=1) + -> Seq Scan on compress_hyper_2_6_chunk (actual rows=3 loops=1) + -> Sort (actual rows=4 loops=1) + Sort Key: _hyper_1_3_chunk."time" DESC + Sort Method: quicksort + -> Seq Scan on _hyper_1_3_chunk (actual rows=18 loops=1) + -> Merge Append (never executed) + Sort Key: _hyper_1_2_chunk."time" DESC + -> Sort (never executed) + Sort Key: _hyper_1_2_chunk."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_1_2_chunk (never executed) + -> Seq Scan on compress_hyper_2_5_chunk (never executed) + -> Sort (never executed) + Sort Key: _hyper_1_2_chunk."time" DESC + -> Seq Scan on _hyper_1_2_chunk (never executed) + -> Merge Append (never executed) + Sort Key: _hyper_1_1_chunk."time" DESC + -> Sort (never executed) + Sort Key: _hyper_1_1_chunk."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (never executed) + -> Seq Scan on compress_hyper_2_4_chunk (never executed) + -> Sort (never executed) + Sort Key: _hyper_1_1_chunk."time" DESC + -> Seq Scan on _hyper_1_1_chunk (never executed) +(32 rows) + +:PREFIX SELECT * FROM ht_metrics_compressed ORDER BY time_bucket('2d',time) DESC LIMIT 1; + QUERY PLAN +---------------------------------------------------------------------------------------------------------- + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on ht_metrics_compressed (actual rows=1 loops=1) + Order: time_bucket('@ 2 days'::interval, ht_metrics_compressed."time") DESC + -> Merge Append (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 2 days'::interval, _hyper_1_3_chunk."time")) DESC + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 2 days'::interval, _hyper_1_3_chunk."time")) DESC + Sort Method: top-N heapsort + -> Result (actual rows=30 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=30 loops=1) + -> Seq Scan on compress_hyper_2_6_chunk (actual rows=3 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 2 days'::interval, _hyper_1_3_chunk."time")) DESC + Sort Method: top-N heapsort + -> Seq Scan on _hyper_1_3_chunk (actual rows=18 loops=1) + -> Merge Append (never executed) + Sort Key: (time_bucket('@ 2 days'::interval, _hyper_1_2_chunk."time")) DESC + -> Sort (never executed) + Sort Key: (time_bucket('@ 2 days'::interval, _hyper_1_2_chunk."time")) DESC + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_1_2_chunk (never executed) + -> Seq Scan on compress_hyper_2_5_chunk (never executed) + -> Sort (never executed) + Sort Key: (time_bucket('@ 2 days'::interval, _hyper_1_2_chunk."time")) DESC + -> Seq Scan on _hyper_1_2_chunk (never executed) + -> Merge Append (never executed) + Sort Key: (time_bucket('@ 2 days'::interval, _hyper_1_1_chunk."time")) DESC + -> Sort (never executed) + Sort Key: (time_bucket('@ 2 days'::interval, _hyper_1_1_chunk."time")) DESC + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (never executed) + -> Seq Scan on compress_hyper_2_4_chunk (never executed) + -> Sort (never executed) + Sort Key: (time_bucket('@ 2 days'::interval, _hyper_1_1_chunk."time")) DESC + -> Seq Scan on _hyper_1_1_chunk (never executed) +(35 rows) + +:PREFIX SELECT * FROM ht_metrics_compressed WHERE device IN (1,2,3) ORDER BY time DESC LIMIT 1; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on ht_metrics_compressed (actual rows=1 loops=1) + Order: ht_metrics_compressed."time" DESC + -> Merge Append (actual rows=1 loops=1) + Sort Key: _hyper_1_3_chunk."time" DESC + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_1_3_chunk."time" DESC + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=30 loops=1) + Filter: (device = ANY ('{1,2,3}'::integer[])) + -> Index Scan using compress_hyper_2_6_chunk__compressed_hypertable_2_device__ts_me on compress_hyper_2_6_chunk (actual rows=3 loops=1) + Index Cond: (device = ANY ('{1,2,3}'::integer[])) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_1_3_chunk."time" DESC + Sort Method: top-N heapsort + -> Seq Scan on _hyper_1_3_chunk (actual rows=18 loops=1) + Filter: (device = ANY ('{1,2,3}'::integer[])) + -> Merge Append (never executed) + Sort Key: _hyper_1_2_chunk."time" DESC + -> Sort (never executed) + Sort Key: _hyper_1_2_chunk."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_1_2_chunk (never executed) + Filter: (device = ANY ('{1,2,3}'::integer[])) + -> Index Scan using compress_hyper_2_5_chunk__compressed_hypertable_2_device__ts_me on compress_hyper_2_5_chunk (never executed) + Index Cond: (device = ANY ('{1,2,3}'::integer[])) + -> Sort (never executed) + Sort Key: _hyper_1_2_chunk."time" DESC + -> Seq Scan on _hyper_1_2_chunk (never executed) + Filter: (device = ANY ('{1,2,3}'::integer[])) + -> Merge Append (never executed) + Sort Key: _hyper_1_1_chunk."time" DESC + -> Sort (never executed) + Sort Key: _hyper_1_1_chunk."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (never executed) + Filter: (device = ANY ('{1,2,3}'::integer[])) + -> Index Scan using compress_hyper_2_4_chunk__compressed_hypertable_2_device__ts_me on compress_hyper_2_4_chunk (never executed) + Index Cond: (device = ANY ('{1,2,3}'::integer[])) + -> Sort (never executed) + Sort Key: _hyper_1_1_chunk."time" DESC + -> Seq Scan on _hyper_1_1_chunk (never executed) + Filter: (device = ANY ('{1,2,3}'::integer[])) +(41 rows) + +:PREFIX SELECT * FROM ht_metrics_compressed WHERE device IN (1,2,3) ORDER BY time, device DESC LIMIT 1; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on ht_metrics_compressed (actual rows=1 loops=1) + Order: ht_metrics_compressed."time", ht_metrics_compressed.device DESC + -> Merge Append (actual rows=1 loops=1) + Sort Key: _hyper_1_1_chunk."time", _hyper_1_1_chunk.device DESC + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_1_1_chunk."time", _hyper_1_1_chunk.device DESC + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=81 loops=1) + Filter: (device = ANY ('{1,2,3}'::integer[])) + -> Index Scan using compress_hyper_2_4_chunk__compressed_hypertable_2_device__ts_me on compress_hyper_2_4_chunk (actual rows=3 loops=1) + Index Cond: (device = ANY ('{1,2,3}'::integer[])) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_1_1_chunk."time", _hyper_1_1_chunk.device DESC + Sort Method: top-N heapsort + -> Seq Scan on _hyper_1_1_chunk (actual rows=54 loops=1) + Filter: (device = ANY ('{1,2,3}'::integer[])) + -> Merge Append (never executed) + Sort Key: _hyper_1_2_chunk."time", _hyper_1_2_chunk.device DESC + -> Sort (never executed) + Sort Key: _hyper_1_2_chunk."time", _hyper_1_2_chunk.device DESC + -> Custom Scan (DecompressChunk) on _hyper_1_2_chunk (never executed) + Filter: (device = ANY ('{1,2,3}'::integer[])) + -> Index Scan using compress_hyper_2_5_chunk__compressed_hypertable_2_device__ts_me on compress_hyper_2_5_chunk (never executed) + Index Cond: (device = ANY ('{1,2,3}'::integer[])) + -> Sort (never executed) + Sort Key: _hyper_1_2_chunk."time", _hyper_1_2_chunk.device DESC + -> Seq Scan on _hyper_1_2_chunk (never executed) + Filter: (device = ANY ('{1,2,3}'::integer[])) + -> Merge Append (never executed) + Sort Key: _hyper_1_3_chunk."time", _hyper_1_3_chunk.device DESC + -> Sort (never executed) + Sort Key: _hyper_1_3_chunk."time", _hyper_1_3_chunk.device DESC + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (never executed) + Filter: (device = ANY ('{1,2,3}'::integer[])) + -> Index Scan using compress_hyper_2_6_chunk__compressed_hypertable_2_device__ts_me on compress_hyper_2_6_chunk (never executed) + Index Cond: (device = ANY ('{1,2,3}'::integer[])) + -> Sort (never executed) + Sort Key: _hyper_1_3_chunk."time", _hyper_1_3_chunk.device DESC + -> Seq Scan on _hyper_1_3_chunk (never executed) + Filter: (device = ANY ('{1,2,3}'::integer[])) +(41 rows) + +-- index scan, no sort on top +:PREFIX SELECT * FROM ht_metrics_compressed WHERE device = 3 ORDER BY time DESC LIMIT 1; -- index scan, no resorting required + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on ht_metrics_compressed (actual rows=1 loops=1) + Order: ht_metrics_compressed."time" DESC + -> Merge Append (actual rows=1 loops=1) + Sort Key: _hyper_1_3_chunk."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=1 loops=1) + Filter: (device = 3) + -> Index Scan Backward using compress_hyper_2_6_chunk__compressed_hypertable_2_device__ts_me on compress_hyper_2_6_chunk (actual rows=1 loops=1) + Index Cond: (device = 3) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_1_3_chunk."time" DESC + Sort Method: top-N heapsort + -> Seq Scan on _hyper_1_3_chunk (actual rows=6 loops=1) + Filter: (device = 3) + Rows Removed by Filter: 12 + -> Merge Append (never executed) + Sort Key: _hyper_1_2_chunk."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_1_2_chunk (never executed) + Filter: (device = 3) + -> Index Scan Backward using compress_hyper_2_5_chunk__compressed_hypertable_2_device__ts_me on compress_hyper_2_5_chunk (never executed) + Index Cond: (device = 3) + -> Sort (never executed) + Sort Key: _hyper_1_2_chunk."time" DESC + -> Seq Scan on _hyper_1_2_chunk (never executed) + Filter: (device = 3) + -> Merge Append (never executed) + Sort Key: _hyper_1_1_chunk."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (never executed) + Filter: (device = 3) + -> Index Scan Backward using compress_hyper_2_4_chunk__compressed_hypertable_2_device__ts_me on compress_hyper_2_4_chunk (never executed) + Index Cond: (device = 3) + -> Sort (never executed) + Sort Key: _hyper_1_1_chunk."time" DESC + -> Seq Scan on _hyper_1_1_chunk (never executed) + Filter: (device = 3) +(35 rows) + +SELECT * FROM ht_metrics_compressed WHERE device = 3 ORDER BY time DESC LIMIT 1; + time | device | value +------------------------------+--------+------- + Sat Jan 18 00:00:00 2020 PST | 3 | 0.3 +(1 row) + +:PREFIX SELECT * FROM ht_metrics_compressed WHERE device = 3 ORDER BY device, time DESC LIMIT 1; -- this uses the index and does not do sort on top + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=1 loops=1) + -> Merge Append (actual rows=1 loops=1) + Sort Key: _hyper_1_1_chunk."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=1 loops=1) + Filter: (device = 3) + -> Index Scan Backward using compress_hyper_2_4_chunk__compressed_hypertable_2_device__ts_me on compress_hyper_2_4_chunk (actual rows=1 loops=1) + Index Cond: (device = 3) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_1_1_chunk."time" DESC + Sort Method: top-N heapsort + -> Seq Scan on _hyper_1_1_chunk (actual rows=18 loops=1) + Filter: (device = 3) + Rows Removed by Filter: 36 + -> Custom Scan (DecompressChunk) on _hyper_1_2_chunk (actual rows=1 loops=1) + Filter: (device = 3) + -> Index Scan Backward using compress_hyper_2_5_chunk__compressed_hypertable_2_device__ts_me on compress_hyper_2_5_chunk (actual rows=1 loops=1) + Index Cond: (device = 3) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_1_2_chunk."time" DESC + Sort Method: top-N heapsort + -> Seq Scan on _hyper_1_2_chunk (actual rows=19 loops=1) + Filter: (device = 3) + Rows Removed by Filter: 38 + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=1 loops=1) + Filter: (device = 3) + -> Index Scan Backward using compress_hyper_2_6_chunk__compressed_hypertable_2_device__ts_me on compress_hyper_2_6_chunk (actual rows=1 loops=1) + Index Cond: (device = 3) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_1_3_chunk."time" DESC + Sort Method: top-N heapsort + -> Seq Scan on _hyper_1_3_chunk (actual rows=6 loops=1) + Filter: (device = 3) + Rows Removed by Filter: 12 +(33 rows) + +SELECT * FROM ht_metrics_compressed WHERE device = 3 ORDER BY device, time DESC LIMIT 1; + time | device | value +------------------------------+--------+------- + Sat Jan 18 00:00:00 2020 PST | 3 | 0.3 +(1 row) + +:PREFIX SELECT * FROM ht_metrics_compressed WHERE device = 3 ORDER BY time, device DESC LIMIT 1; -- this also uses the index and does not do sort on top + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on ht_metrics_compressed (actual rows=1 loops=1) + Order: ht_metrics_compressed."time" + -> Merge Append (actual rows=1 loops=1) + Sort Key: _hyper_1_1_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=1 loops=1) + Filter: (device = 3) + -> Index Scan using compress_hyper_2_4_chunk__compressed_hypertable_2_device__ts_me on compress_hyper_2_4_chunk (actual rows=1 loops=1) + Index Cond: (device = 3) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_1_1_chunk."time" + Sort Method: top-N heapsort + -> Seq Scan on _hyper_1_1_chunk (actual rows=18 loops=1) + Filter: (device = 3) + Rows Removed by Filter: 36 + -> Merge Append (never executed) + Sort Key: _hyper_1_2_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_1_2_chunk (never executed) + Filter: (device = 3) + -> Index Scan using compress_hyper_2_5_chunk__compressed_hypertable_2_device__ts_me on compress_hyper_2_5_chunk (never executed) + Index Cond: (device = 3) + -> Sort (never executed) + Sort Key: _hyper_1_2_chunk."time" + -> Seq Scan on _hyper_1_2_chunk (never executed) + Filter: (device = 3) + -> Merge Append (never executed) + Sort Key: _hyper_1_3_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (never executed) + Filter: (device = 3) + -> Index Scan using compress_hyper_2_6_chunk__compressed_hypertable_2_device__ts_me on compress_hyper_2_6_chunk (never executed) + Index Cond: (device = 3) + -> Sort (never executed) + Sort Key: _hyper_1_3_chunk."time" + -> Seq Scan on _hyper_1_3_chunk (never executed) + Filter: (device = 3) +(35 rows) + +SELECT * FROM ht_metrics_compressed WHERE device = 3 ORDER BY time, device DESC LIMIT 1; + time | device | value +------------------------------+--------+------- + Thu Jan 02 00:00:00 2020 PST | 3 | 0.3 +(1 row) + +-- not eligible for chunkAppend, but eligible for sort pushdown +:PREFIX SELECT * FROM ht_metrics_compressed ORDER BY device, time DESC LIMIT 1; -- with pushdown + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=1 loops=1) + -> Merge Append (actual rows=1 loops=1) + Sort Key: _hyper_1_1_chunk.device, _hyper_1_1_chunk."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: compress_hyper_2_4_chunk.device, compress_hyper_2_4_chunk._ts_meta_sequence_num DESC + Sort Method: quicksort + -> Seq Scan on compress_hyper_2_4_chunk (actual rows=3 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_1_1_chunk.device, _hyper_1_1_chunk."time" DESC + Sort Method: top-N heapsort + -> Seq Scan on _hyper_1_1_chunk (actual rows=54 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_2_chunk (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: compress_hyper_2_5_chunk.device, compress_hyper_2_5_chunk._ts_meta_sequence_num DESC + Sort Method: quicksort + -> Seq Scan on compress_hyper_2_5_chunk (actual rows=3 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_1_2_chunk.device, _hyper_1_2_chunk."time" DESC + Sort Method: top-N heapsort + -> Seq Scan on _hyper_1_2_chunk (actual rows=57 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: compress_hyper_2_6_chunk.device, compress_hyper_2_6_chunk._ts_meta_sequence_num DESC + Sort Method: quicksort + -> Seq Scan on compress_hyper_2_6_chunk (actual rows=3 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_1_3_chunk.device, _hyper_1_3_chunk."time" DESC + Sort Method: top-N heapsort + -> Seq Scan on _hyper_1_3_chunk (actual rows=18 loops=1) +(30 rows) + +:PREFIX SELECT * FROM ht_metrics_compressed WHERE device IN (1,2,3) ORDER BY device, time DESC LIMIT 1; -- with pushdown + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=1 loops=1) + -> Merge Append (actual rows=1 loops=1) + Sort Key: _hyper_1_1_chunk.device, _hyper_1_1_chunk."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=1 loops=1) + Filter: (device = ANY ('{1,2,3}'::integer[])) + -> Sort (actual rows=1 loops=1) + Sort Key: compress_hyper_2_4_chunk.device, compress_hyper_2_4_chunk._ts_meta_sequence_num DESC + Sort Method: quicksort + -> Index Scan using compress_hyper_2_4_chunk__compressed_hypertable_2_device__ts_me on compress_hyper_2_4_chunk (actual rows=3 loops=1) + Index Cond: (device = ANY ('{1,2,3}'::integer[])) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_1_1_chunk.device, _hyper_1_1_chunk."time" DESC + Sort Method: top-N heapsort + -> Seq Scan on _hyper_1_1_chunk (actual rows=54 loops=1) + Filter: (device = ANY ('{1,2,3}'::integer[])) + -> Custom Scan (DecompressChunk) on _hyper_1_2_chunk (actual rows=1 loops=1) + Filter: (device = ANY ('{1,2,3}'::integer[])) + -> Sort (actual rows=1 loops=1) + Sort Key: compress_hyper_2_5_chunk.device, compress_hyper_2_5_chunk._ts_meta_sequence_num DESC + Sort Method: quicksort + -> Index Scan using compress_hyper_2_5_chunk__compressed_hypertable_2_device__ts_me on compress_hyper_2_5_chunk (actual rows=3 loops=1) + Index Cond: (device = ANY ('{1,2,3}'::integer[])) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_1_2_chunk.device, _hyper_1_2_chunk."time" DESC + Sort Method: top-N heapsort + -> Seq Scan on _hyper_1_2_chunk (actual rows=57 loops=1) + Filter: (device = ANY ('{1,2,3}'::integer[])) + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=1 loops=1) + Filter: (device = ANY ('{1,2,3}'::integer[])) + -> Sort (actual rows=1 loops=1) + Sort Key: compress_hyper_2_6_chunk.device, compress_hyper_2_6_chunk._ts_meta_sequence_num DESC + Sort Method: quicksort + -> Index Scan using compress_hyper_2_6_chunk__compressed_hypertable_2_device__ts_me on compress_hyper_2_6_chunk (actual rows=3 loops=1) + Index Cond: (device = ANY ('{1,2,3}'::integer[])) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_1_3_chunk.device, _hyper_1_3_chunk."time" DESC + Sort Method: top-N heapsort + -> Seq Scan on _hyper_1_3_chunk (actual rows=18 loops=1) + Filter: (device = ANY ('{1,2,3}'::integer[])) +(39 rows) + +CREATE TABLE test1 ( +time timestamptz NOT NULL, + x1 integer, + x2 integer, + x3 integer, + x4 integer, + x5 integer); +SELECT FROM create_hypertable('test1', 'time'); +-- +(1 row) + +ALTER TABLE test1 SET (timescaledb.compress, timescaledb.compress_segmentby='x1, x2, x5', timescaledb.compress_orderby = 'time DESC, x3 ASC, x4 ASC'); +INSERT INTO test1 (time, x1, x2, x3, x4, x5) values('2000-01-01 00:00:00-00', 1, 2, 1, 1, 0); +INSERT INTO test1 (time, x1, x2, x3, x4, x5) values('2000-01-01 01:00:00-00', 1, 3, 2, 2, 0); +INSERT INTO test1 (time, x1, x2, x3, x4, x5) values('2000-01-01 02:00:00-00', 2, 1, 3, 3, 0); +INSERT INTO test1 (time, x1, x2, x3, x4, x5) values('2000-01-01 03:00:00-00', 1, 2, 4, 4, 0); +SELECT compress_chunk(i) FROM show_chunks('test1') i; + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_3_7_chunk +(1 row) + +ANALYZE test1; +-- make all the chunks partially compressed +INSERT INTO test1 (time, x1, x2, x3, x4, x5) values('2000-01-01 02:01:00-00', 10, 20, 30, 40 ,50); +-- tests that require resorting (pushdown below decompressChunk node cannot happen) +-- requires resorting, no pushdown can happen +:PREFIX +SELECT * FROM test1 ORDER BY time DESC; + QUERY PLAN +--------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on test1 (actual rows=5 loops=1) + Order: test1."time" DESC + -> Merge Append (actual rows=5 loops=1) + Sort Key: _hyper_3_7_chunk."time" DESC + -> Sort (actual rows=4 loops=1) + Sort Key: _hyper_3_7_chunk."time" DESC + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_3_7_chunk (actual rows=4 loops=1) + -> Seq Scan on compress_hyper_4_8_chunk (actual rows=3 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_3_7_chunk."time" DESC + Sort Method: quicksort + -> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1) +(13 rows) + +:PREFIX +SELECT * FROM test1 ORDER BY time DESC LIMIT 10; + QUERY PLAN +--------------------------------------------------------------------------------------------------- + Limit (actual rows=5 loops=1) + -> Custom Scan (ChunkAppend) on test1 (actual rows=5 loops=1) + Order: test1."time" DESC + -> Merge Append (actual rows=5 loops=1) + Sort Key: _hyper_3_7_chunk."time" DESC + -> Sort (actual rows=4 loops=1) + Sort Key: _hyper_3_7_chunk."time" DESC + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_3_7_chunk (actual rows=4 loops=1) + -> Seq Scan on compress_hyper_4_8_chunk (actual rows=3 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_3_7_chunk."time" DESC + Sort Method: quicksort + -> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1) +(14 rows) + +-- requires resorting +:PREFIX +SELECT * FROM test1 ORDER BY time DESC NULLS FIRST, x3 ASC NULLS LAST; + QUERY PLAN +--------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on test1 (actual rows=5 loops=1) + Order: test1."time" DESC, test1.x3 + -> Merge Append (actual rows=5 loops=1) + Sort Key: _hyper_3_7_chunk."time" DESC, _hyper_3_7_chunk.x3 + -> Sort (actual rows=4 loops=1) + Sort Key: _hyper_3_7_chunk."time" DESC, _hyper_3_7_chunk.x3 + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_3_7_chunk (actual rows=4 loops=1) + -> Seq Scan on compress_hyper_4_8_chunk (actual rows=3 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_3_7_chunk."time" DESC, _hyper_3_7_chunk.x3 + Sort Method: quicksort + -> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1) +(13 rows) + +-- all these require resorting, no pushdown can happen +:PREFIX +SELECT * FROM test1 ORDER BY time DESC NULLS FIRST, x3 ASC NULLS LAST, x4 ASC NULLS LAST; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Custom Scan (ChunkAppend) on test1 (actual rows=5 loops=1) + Order: test1."time" DESC, test1.x3, test1.x4 + -> Merge Append (actual rows=5 loops=1) + Sort Key: _hyper_3_7_chunk."time" DESC, _hyper_3_7_chunk.x3, _hyper_3_7_chunk.x4 + -> Sort (actual rows=4 loops=1) + Sort Key: _hyper_3_7_chunk."time" DESC, _hyper_3_7_chunk.x3, _hyper_3_7_chunk.x4 + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_3_7_chunk (actual rows=4 loops=1) + -> Seq Scan on compress_hyper_4_8_chunk (actual rows=3 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_3_7_chunk."time" DESC, _hyper_3_7_chunk.x3, _hyper_3_7_chunk.x4 + Sort Method: quicksort + -> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1) +(13 rows) + +:PREFIX +SELECT * FROM test1 ORDER BY time DESC NULLS FIRST, x3 ASC NULLS LAST, x4 DESC NULLS FIRST; + QUERY PLAN +----------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on test1 (actual rows=5 loops=1) + Order: test1."time" DESC, test1.x3, test1.x4 DESC + -> Merge Append (actual rows=5 loops=1) + Sort Key: _hyper_3_7_chunk."time" DESC, _hyper_3_7_chunk.x3, _hyper_3_7_chunk.x4 DESC + -> Sort (actual rows=4 loops=1) + Sort Key: _hyper_3_7_chunk."time" DESC, _hyper_3_7_chunk.x3, _hyper_3_7_chunk.x4 DESC + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_3_7_chunk (actual rows=4 loops=1) + -> Seq Scan on compress_hyper_4_8_chunk (actual rows=3 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_3_7_chunk."time" DESC, _hyper_3_7_chunk.x3, _hyper_3_7_chunk.x4 DESC + Sort Method: quicksort + -> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1) +(13 rows) + +:PREFIX +SELECT * FROM test1 ORDER BY time ASC NULLS LAST; + QUERY PLAN +--------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on test1 (actual rows=5 loops=1) + Order: test1."time" + -> Merge Append (actual rows=5 loops=1) + Sort Key: _hyper_3_7_chunk."time" + -> Sort (actual rows=4 loops=1) + Sort Key: _hyper_3_7_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_3_7_chunk (actual rows=4 loops=1) + -> Seq Scan on compress_hyper_4_8_chunk (actual rows=3 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_3_7_chunk."time" + Sort Method: quicksort + -> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1) +(13 rows) + +:PREFIX +SELECT * FROM test1 ORDER BY time ASC NULLS LAST, x3 DESC NULLS FIRST; + QUERY PLAN +--------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on test1 (actual rows=5 loops=1) + Order: test1."time", test1.x3 DESC + -> Merge Append (actual rows=5 loops=1) + Sort Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x3 DESC + -> Sort (actual rows=4 loops=1) + Sort Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x3 DESC + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_3_7_chunk (actual rows=4 loops=1) + -> Seq Scan on compress_hyper_4_8_chunk (actual rows=3 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x3 DESC + Sort Method: quicksort + -> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1) +(13 rows) + +:PREFIX +SELECT * FROM test1 ORDER BY time ASC NULLS LAST, x3 DESC NULLS FIRST, x4 DESC NULLS FIRST; + QUERY PLAN +----------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on test1 (actual rows=5 loops=1) + Order: test1."time", test1.x3 DESC, test1.x4 DESC + -> Merge Append (actual rows=5 loops=1) + Sort Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x3 DESC, _hyper_3_7_chunk.x4 DESC + -> Sort (actual rows=4 loops=1) + Sort Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x3 DESC, _hyper_3_7_chunk.x4 DESC + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_3_7_chunk (actual rows=4 loops=1) + -> Seq Scan on compress_hyper_4_8_chunk (actual rows=3 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x3 DESC, _hyper_3_7_chunk.x4 DESC + Sort Method: quicksort + -> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1) +(13 rows) + +:PREFIX +SELECT * FROM test1 ORDER BY time ASC NULLS FIRST, x3 DESC NULLS LAST, x4 ASC; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on test1 (actual rows=5 loops=1) + Order: test1."time" NULLS FIRST, test1.x3 DESC NULLS LAST, test1.x4 + -> Merge Append (actual rows=5 loops=1) + Sort Key: _hyper_3_7_chunk."time" NULLS FIRST, _hyper_3_7_chunk.x3 DESC NULLS LAST, _hyper_3_7_chunk.x4 + -> Sort (actual rows=4 loops=1) + Sort Key: _hyper_3_7_chunk."time" NULLS FIRST, _hyper_3_7_chunk.x3 DESC NULLS LAST, _hyper_3_7_chunk.x4 + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_3_7_chunk (actual rows=4 loops=1) + -> Seq Scan on compress_hyper_4_8_chunk (actual rows=3 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_3_7_chunk."time" NULLS FIRST, _hyper_3_7_chunk.x3 DESC NULLS LAST, _hyper_3_7_chunk.x4 + Sort Method: quicksort + -> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1) +(13 rows) + +:PREFIX +SELECT x1, x2, max(time) FROM test1 GROUP BY x1, x2, time ORDER BY time limit 10; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=5 loops=1) + -> Sort (actual rows=5 loops=1) + Sort Key: test1."time" + Sort Method: quicksort + -> Finalize HashAggregate (actual rows=5 loops=1) + Group Key: test1.x1, test1.x2, test1."time" + Batches: 1 + -> Custom Scan (ChunkAppend) on test1 (actual rows=5 loops=1) + Order: test1."time", test1.x1, test1.x2 + -> Merge Append (actual rows=5 loops=1) + Sort Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2 + -> Sort (actual rows=4 loops=1) + Sort Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2 + Sort Method: quicksort + -> Partial HashAggregate (actual rows=4 loops=1) + Group Key: _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2, _hyper_3_7_chunk."time" + Batches: 1 + -> Custom Scan (DecompressChunk) on _hyper_3_7_chunk (actual rows=4 loops=1) + -> Seq Scan on compress_hyper_4_8_chunk (actual rows=3 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2 + Sort Method: quicksort + -> Partial HashAggregate (actual rows=1 loops=1) + Group Key: _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2, _hyper_3_7_chunk."time" + Batches: 1 + -> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1) +(26 rows) + +:PREFIX +SELECT * FROM test1 ORDER BY x1, x2, x5, x4, time LIMIT 10; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=5 loops=1) + -> Merge Append (actual rows=5 loops=1) + Sort Key: _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2, _hyper_3_7_chunk.x5, _hyper_3_7_chunk.x4, _hyper_3_7_chunk."time" + -> Sort (actual rows=4 loops=1) + Sort Key: _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2, _hyper_3_7_chunk.x5, _hyper_3_7_chunk.x4, _hyper_3_7_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_3_7_chunk (actual rows=4 loops=1) + -> Seq Scan on compress_hyper_4_8_chunk (actual rows=3 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2, _hyper_3_7_chunk.x5, _hyper_3_7_chunk.x4, _hyper_3_7_chunk."time" + Sort Method: quicksort + -> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1) +(12 rows) + +:PREFIX +SELECT * FROM test1 ORDER BY x1, x2, x5, time, x4 LIMIT 10; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=5 loops=1) + -> Merge Append (actual rows=5 loops=1) + Sort Key: _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2, _hyper_3_7_chunk.x5, _hyper_3_7_chunk."time", _hyper_3_7_chunk.x4 + -> Sort (actual rows=4 loops=1) + Sort Key: _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2, _hyper_3_7_chunk.x5, _hyper_3_7_chunk."time", _hyper_3_7_chunk.x4 + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_3_7_chunk (actual rows=4 loops=1) + -> Seq Scan on compress_hyper_4_8_chunk (actual rows=3 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2, _hyper_3_7_chunk.x5, _hyper_3_7_chunk."time", _hyper_3_7_chunk.x4 + Sort Method: quicksort + -> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1) +(12 rows) + +:PREFIX +SELECT * FROM test1 ORDER BY x1, x2, x5, time, x3 LIMIT 10; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=5 loops=1) + -> Merge Append (actual rows=5 loops=1) + Sort Key: _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2, _hyper_3_7_chunk.x5, _hyper_3_7_chunk."time", _hyper_3_7_chunk.x3 + -> Sort (actual rows=4 loops=1) + Sort Key: _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2, _hyper_3_7_chunk.x5, _hyper_3_7_chunk."time", _hyper_3_7_chunk.x3 + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_3_7_chunk (actual rows=4 loops=1) + -> Seq Scan on compress_hyper_4_8_chunk (actual rows=3 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2, _hyper_3_7_chunk.x5, _hyper_3_7_chunk."time", _hyper_3_7_chunk.x3 + Sort Method: quicksort + -> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1) +(12 rows) + +:PREFIX +SELECT * FROM test1 ORDER BY x1, x2, x5, time, x3, x4 LIMIT 10; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=5 loops=1) + -> Merge Append (actual rows=5 loops=1) + Sort Key: _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2, _hyper_3_7_chunk.x5, _hyper_3_7_chunk."time", _hyper_3_7_chunk.x3, _hyper_3_7_chunk.x4 + -> Sort (actual rows=4 loops=1) + Sort Key: _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2, _hyper_3_7_chunk.x5, _hyper_3_7_chunk."time", _hyper_3_7_chunk.x3, _hyper_3_7_chunk.x4 + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_3_7_chunk (actual rows=4 loops=1) + -> Seq Scan on compress_hyper_4_8_chunk (actual rows=3 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2, _hyper_3_7_chunk.x5, _hyper_3_7_chunk."time", _hyper_3_7_chunk.x3, _hyper_3_7_chunk.x4 + Sort Method: quicksort + -> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1) +(12 rows) + +:PREFIX +SELECT * FROM test1 ORDER BY x1, x2, x5, time, x4 DESC LIMIT 10; -- no pushdown because orderby does not match + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------ + Limit (actual rows=5 loops=1) + -> Merge Append (actual rows=5 loops=1) + Sort Key: _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2, _hyper_3_7_chunk.x5, _hyper_3_7_chunk."time", _hyper_3_7_chunk.x4 DESC + -> Sort (actual rows=4 loops=1) + Sort Key: _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2, _hyper_3_7_chunk.x5, _hyper_3_7_chunk."time", _hyper_3_7_chunk.x4 DESC + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_3_7_chunk (actual rows=4 loops=1) + -> Seq Scan on compress_hyper_4_8_chunk (actual rows=3 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2, _hyper_3_7_chunk.x5, _hyper_3_7_chunk."time", _hyper_3_7_chunk.x4 DESC + Sort Method: quicksort + -> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1) +(12 rows) + +-- queries with pushdown +:PREFIX +SELECT * FROM test1 ORDER BY x1, x2, x5, time LIMIT 10; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=5 loops=1) + -> Merge Append (actual rows=5 loops=1) + Sort Key: _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2, _hyper_3_7_chunk.x5, _hyper_3_7_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_3_7_chunk (actual rows=4 loops=1) + -> Sort (actual rows=3 loops=1) + Sort Key: compress_hyper_4_8_chunk.x1, compress_hyper_4_8_chunk.x2, compress_hyper_4_8_chunk.x5, compress_hyper_4_8_chunk._ts_meta_sequence_num DESC + Sort Method: quicksort + -> Seq Scan on compress_hyper_4_8_chunk (actual rows=3 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2, _hyper_3_7_chunk.x5, _hyper_3_7_chunk."time" + Sort Method: quicksort + -> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1) +(12 rows) + +:PREFIX +SELECT * FROM test1 ORDER BY x1, x2, x5, time DESC, x3 ASC, x4 ASC LIMIT 10; -- pushdown + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=5 loops=1) + -> Merge Append (actual rows=5 loops=1) + Sort Key: _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2, _hyper_3_7_chunk.x5, _hyper_3_7_chunk."time" DESC, _hyper_3_7_chunk.x3, _hyper_3_7_chunk.x4 + -> Custom Scan (DecompressChunk) on _hyper_3_7_chunk (actual rows=4 loops=1) + -> Sort (actual rows=3 loops=1) + Sort Key: compress_hyper_4_8_chunk.x1, compress_hyper_4_8_chunk.x2, compress_hyper_4_8_chunk.x5, compress_hyper_4_8_chunk._ts_meta_sequence_num + Sort Method: quicksort + -> Seq Scan on compress_hyper_4_8_chunk (actual rows=3 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2, _hyper_3_7_chunk.x5, _hyper_3_7_chunk."time" DESC, _hyper_3_7_chunk.x3, _hyper_3_7_chunk.x4 + Sort Method: quicksort + -> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1) +(12 rows) + +:PREFIX +SELECT * FROM test1 ORDER BY x1, x2, x5, time ASC, x3 DESC, x4 DESC LIMIT 10; -- pushdown + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=5 loops=1) + -> Merge Append (actual rows=5 loops=1) + Sort Key: _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2, _hyper_3_7_chunk.x5, _hyper_3_7_chunk."time", _hyper_3_7_chunk.x3 DESC, _hyper_3_7_chunk.x4 DESC + -> Custom Scan (DecompressChunk) on _hyper_3_7_chunk (actual rows=4 loops=1) + -> Sort (actual rows=3 loops=1) + Sort Key: compress_hyper_4_8_chunk.x1, compress_hyper_4_8_chunk.x2, compress_hyper_4_8_chunk.x5, compress_hyper_4_8_chunk._ts_meta_sequence_num DESC + Sort Method: quicksort + -> Seq Scan on compress_hyper_4_8_chunk (actual rows=3 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2, _hyper_3_7_chunk.x5, _hyper_3_7_chunk."time", _hyper_3_7_chunk.x3 DESC, _hyper_3_7_chunk.x4 DESC + Sort Method: quicksort + -> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1) +(12 rows) + +:PREFIX +SELECT * FROM test1 ORDER BY x1, x2, x5, time, x3 DESC LIMIT 10; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=5 loops=1) + -> Merge Append (actual rows=5 loops=1) + Sort Key: _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2, _hyper_3_7_chunk.x5, _hyper_3_7_chunk."time", _hyper_3_7_chunk.x3 DESC + -> Custom Scan (DecompressChunk) on _hyper_3_7_chunk (actual rows=4 loops=1) + -> Sort (actual rows=3 loops=1) + Sort Key: compress_hyper_4_8_chunk.x1, compress_hyper_4_8_chunk.x2, compress_hyper_4_8_chunk.x5, compress_hyper_4_8_chunk._ts_meta_sequence_num DESC + Sort Method: quicksort + -> Seq Scan on compress_hyper_4_8_chunk (actual rows=3 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2, _hyper_3_7_chunk.x5, _hyper_3_7_chunk."time", _hyper_3_7_chunk.x3 DESC + Sort Method: quicksort + -> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1) +(12 rows) + +--------------------------------------------------------------------------- +-- test queries without ordered append, but still eligible for sort pushdown +--------------------------------------------------------------------------- +CREATE TABLE test2 ( +time timestamptz NOT NULL, + x1 integer, + x2 integer, + x3 integer, + x4 integer, + x5 integer); +SELECT FROM create_hypertable('test2', 'time'); +-- +(1 row) + +ALTER TABLE test2 SET (timescaledb.compress, timescaledb.compress_segmentby='x1, x2, x5', timescaledb.compress_orderby = 'x3 ASC, x4 ASC'); +INSERT INTO test2 (time, x1, x2, x3, x4, x5) values('2000-01-01 00:00:00-00', 1, 2, 1, 1, 0); +INSERT INTO test2 (time, x1, x2, x3, x4, x5) values('2000-01-01 01:00:00-00', 1, 3, 2, 2, 0); +INSERT INTO test2 (time, x1, x2, x3, x4, x5) values('2000-01-01 02:00:00-00', 2, 1, 3, 3, 0); +INSERT INTO test2 (time, x1, x2, x3, x4, x5) values('2000-01-01 03:00:00-00', 1, 2, 4, 4, 0); +-- chunk 2 +INSERT INTO test2 (time, x1, x2, x3, x4, x5) values('2000-01-10 00:00:00-00', 1, 2, 1, 1, 0); +INSERT INTO test2 (time, x1, x2, x3, x4, x5) values('2000-01-10 01:00:00-00', 1, 3, 2, 2, 0); +INSERT INTO test2 (time, x1, x2, x3, x4, x5) values('2000-01-10 02:00:00-00', 2, 1, 3, 3, 0); +INSERT INTO test2 (time, x1, x2, x3, x4, x5) values('2000-01-10 03:00:00-00', 1, 2, 4, 4, 0); +SELECT compress_chunk(i) FROM show_chunks('test2') i; + compress_chunk +----------------------------------------- + _timescaledb_internal._hyper_5_9_chunk + _timescaledb_internal._hyper_5_10_chunk +(2 rows) + +-- make them partially compressed +INSERT INTO test2 (time, x1, x2, x3, x4, x5) values('2000-01-01 00:02:01-00', 1, 2, 1, 1, 0); +INSERT INTO test2 (time, x1, x2, x3, x4, x5) values('2000-01-10 00:02:01-00', 1, 2, 1, 1, 0); +set enable_indexscan = off; +-- queries where sort is pushed down +:PREFIX SELECT * FROM test2 ORDER BY x1, x2, x5, x3; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Append (actual rows=10 loops=1) + Sort Key: _hyper_5_9_chunk.x1, _hyper_5_9_chunk.x2, _hyper_5_9_chunk.x5, _hyper_5_9_chunk.x3 + -> Custom Scan (DecompressChunk) on _hyper_5_9_chunk (actual rows=4 loops=1) + -> Sort (actual rows=3 loops=1) + Sort Key: compress_hyper_6_11_chunk.x1, compress_hyper_6_11_chunk.x2, compress_hyper_6_11_chunk.x5, compress_hyper_6_11_chunk._ts_meta_sequence_num + Sort Method: quicksort + -> Seq Scan on compress_hyper_6_11_chunk (actual rows=3 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_5_9_chunk.x1, _hyper_5_9_chunk.x2, _hyper_5_9_chunk.x5, _hyper_5_9_chunk.x3 + Sort Method: quicksort + -> Seq Scan on _hyper_5_9_chunk (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_5_10_chunk (actual rows=4 loops=1) + -> Sort (actual rows=3 loops=1) + Sort Key: compress_hyper_6_12_chunk.x1, compress_hyper_6_12_chunk.x2, compress_hyper_6_12_chunk.x5, compress_hyper_6_12_chunk._ts_meta_sequence_num + Sort Method: quicksort + -> Seq Scan on compress_hyper_6_12_chunk (actual rows=3 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_5_10_chunk.x1, _hyper_5_10_chunk.x2, _hyper_5_10_chunk.x5, _hyper_5_10_chunk.x3 + Sort Method: quicksort + -> Seq Scan on _hyper_5_10_chunk (actual rows=1 loops=1) +(20 rows) + +SELECT * FROM test2 ORDER BY x1, x2, x5, x3; + time | x1 | x2 | x3 | x4 | x5 +------------------------------+----+----+----+----+---- + Fri Dec 31 16:00:00 1999 PST | 1 | 2 | 1 | 1 | 0 + Fri Dec 31 16:02:01 1999 PST | 1 | 2 | 1 | 1 | 0 + Sun Jan 09 16:02:01 2000 PST | 1 | 2 | 1 | 1 | 0 + Sun Jan 09 16:00:00 2000 PST | 1 | 2 | 1 | 1 | 0 + Sun Jan 09 19:00:00 2000 PST | 1 | 2 | 4 | 4 | 0 + Fri Dec 31 19:00:00 1999 PST | 1 | 2 | 4 | 4 | 0 + Fri Dec 31 17:00:00 1999 PST | 1 | 3 | 2 | 2 | 0 + Sun Jan 09 17:00:00 2000 PST | 1 | 3 | 2 | 2 | 0 + Sun Jan 09 18:00:00 2000 PST | 2 | 1 | 3 | 3 | 0 + Fri Dec 31 18:00:00 1999 PST | 2 | 1 | 3 | 3 | 0 +(10 rows) + +:PREFIX SELECT * FROM test2 ORDER BY x1, x2, x5, x3, x4; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Append (actual rows=10 loops=1) + Sort Key: _hyper_5_9_chunk.x1, _hyper_5_9_chunk.x2, _hyper_5_9_chunk.x5, _hyper_5_9_chunk.x3, _hyper_5_9_chunk.x4 + -> Custom Scan (DecompressChunk) on _hyper_5_9_chunk (actual rows=4 loops=1) + -> Sort (actual rows=3 loops=1) + Sort Key: compress_hyper_6_11_chunk.x1, compress_hyper_6_11_chunk.x2, compress_hyper_6_11_chunk.x5, compress_hyper_6_11_chunk._ts_meta_sequence_num + Sort Method: quicksort + -> Seq Scan on compress_hyper_6_11_chunk (actual rows=3 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_5_9_chunk.x1, _hyper_5_9_chunk.x2, _hyper_5_9_chunk.x5, _hyper_5_9_chunk.x3, _hyper_5_9_chunk.x4 + Sort Method: quicksort + -> Seq Scan on _hyper_5_9_chunk (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_5_10_chunk (actual rows=4 loops=1) + -> Sort (actual rows=3 loops=1) + Sort Key: compress_hyper_6_12_chunk.x1, compress_hyper_6_12_chunk.x2, compress_hyper_6_12_chunk.x5, compress_hyper_6_12_chunk._ts_meta_sequence_num + Sort Method: quicksort + -> Seq Scan on compress_hyper_6_12_chunk (actual rows=3 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_5_10_chunk.x1, _hyper_5_10_chunk.x2, _hyper_5_10_chunk.x5, _hyper_5_10_chunk.x3, _hyper_5_10_chunk.x4 + Sort Method: quicksort + -> Seq Scan on _hyper_5_10_chunk (actual rows=1 loops=1) +(20 rows) + +SELECT * FROM test2 ORDER BY x1, x2, x5, x3, x4; + time | x1 | x2 | x3 | x4 | x5 +------------------------------+----+----+----+----+---- + Fri Dec 31 16:00:00 1999 PST | 1 | 2 | 1 | 1 | 0 + Fri Dec 31 16:02:01 1999 PST | 1 | 2 | 1 | 1 | 0 + Sun Jan 09 16:02:01 2000 PST | 1 | 2 | 1 | 1 | 0 + Sun Jan 09 16:00:00 2000 PST | 1 | 2 | 1 | 1 | 0 + Sun Jan 09 19:00:00 2000 PST | 1 | 2 | 4 | 4 | 0 + Fri Dec 31 19:00:00 1999 PST | 1 | 2 | 4 | 4 | 0 + Fri Dec 31 17:00:00 1999 PST | 1 | 3 | 2 | 2 | 0 + Sun Jan 09 17:00:00 2000 PST | 1 | 3 | 2 | 2 | 0 + Sun Jan 09 18:00:00 2000 PST | 2 | 1 | 3 | 3 | 0 + Fri Dec 31 18:00:00 1999 PST | 2 | 1 | 3 | 3 | 0 +(10 rows) + +-- queries where sort is not pushed down +:PREFIX SELECT * FROM test2 ORDER BY x1, x2, x3; + QUERY PLAN +---------------------------------------------------------------------------------------- + Merge Append (actual rows=10 loops=1) + Sort Key: _hyper_5_9_chunk.x1, _hyper_5_9_chunk.x2, _hyper_5_9_chunk.x3 + -> Sort (actual rows=4 loops=1) + Sort Key: _hyper_5_9_chunk.x1, _hyper_5_9_chunk.x2, _hyper_5_9_chunk.x3 + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_5_9_chunk (actual rows=4 loops=1) + -> Seq Scan on compress_hyper_6_11_chunk (actual rows=3 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_5_9_chunk.x1, _hyper_5_9_chunk.x2, _hyper_5_9_chunk.x3 + Sort Method: quicksort + -> Seq Scan on _hyper_5_9_chunk (actual rows=1 loops=1) + -> Sort (actual rows=4 loops=1) + Sort Key: _hyper_5_10_chunk.x1, _hyper_5_10_chunk.x2, _hyper_5_10_chunk.x3 + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_5_10_chunk (actual rows=4 loops=1) + -> Seq Scan on compress_hyper_6_12_chunk (actual rows=3 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_5_10_chunk.x1, _hyper_5_10_chunk.x2, _hyper_5_10_chunk.x3 + Sort Method: quicksort + -> Seq Scan on _hyper_5_10_chunk (actual rows=1 loops=1) +(20 rows) + +SELECT * FROM test2 ORDER BY x1, x2, x3; + time | x1 | x2 | x3 | x4 | x5 +------------------------------+----+----+----+----+---- + Fri Dec 31 16:00:00 1999 PST | 1 | 2 | 1 | 1 | 0 + Fri Dec 31 16:02:01 1999 PST | 1 | 2 | 1 | 1 | 0 + Sun Jan 09 16:02:01 2000 PST | 1 | 2 | 1 | 1 | 0 + Sun Jan 09 16:00:00 2000 PST | 1 | 2 | 1 | 1 | 0 + Sun Jan 09 19:00:00 2000 PST | 1 | 2 | 4 | 4 | 0 + Fri Dec 31 19:00:00 1999 PST | 1 | 2 | 4 | 4 | 0 + Fri Dec 31 17:00:00 1999 PST | 1 | 3 | 2 | 2 | 0 + Sun Jan 09 17:00:00 2000 PST | 1 | 3 | 2 | 2 | 0 + Sun Jan 09 18:00:00 2000 PST | 2 | 1 | 3 | 3 | 0 + Fri Dec 31 18:00:00 1999 PST | 2 | 1 | 3 | 3 | 0 +(10 rows) + +:PREFIX SELECT * FROM test2 ORDER BY x1, x2, x5, x4; + QUERY PLAN +---------------------------------------------------------------------------------------------------------- + Merge Append (actual rows=10 loops=1) + Sort Key: _hyper_5_9_chunk.x1, _hyper_5_9_chunk.x2, _hyper_5_9_chunk.x5, _hyper_5_9_chunk.x4 + -> Sort (actual rows=4 loops=1) + Sort Key: _hyper_5_9_chunk.x1, _hyper_5_9_chunk.x2, _hyper_5_9_chunk.x5, _hyper_5_9_chunk.x4 + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_5_9_chunk (actual rows=4 loops=1) + -> Seq Scan on compress_hyper_6_11_chunk (actual rows=3 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_5_9_chunk.x1, _hyper_5_9_chunk.x2, _hyper_5_9_chunk.x5, _hyper_5_9_chunk.x4 + Sort Method: quicksort + -> Seq Scan on _hyper_5_9_chunk (actual rows=1 loops=1) + -> Sort (actual rows=4 loops=1) + Sort Key: _hyper_5_10_chunk.x1, _hyper_5_10_chunk.x2, _hyper_5_10_chunk.x5, _hyper_5_10_chunk.x4 + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_5_10_chunk (actual rows=4 loops=1) + -> Seq Scan on compress_hyper_6_12_chunk (actual rows=3 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_5_10_chunk.x1, _hyper_5_10_chunk.x2, _hyper_5_10_chunk.x5, _hyper_5_10_chunk.x4 + Sort Method: quicksort + -> Seq Scan on _hyper_5_10_chunk (actual rows=1 loops=1) +(20 rows) + +SELECT * FROM test2 ORDER BY x1, x2, x5, x4; + time | x1 | x2 | x3 | x4 | x5 +------------------------------+----+----+----+----+---- + Fri Dec 31 16:00:00 1999 PST | 1 | 2 | 1 | 1 | 0 + Fri Dec 31 16:02:01 1999 PST | 1 | 2 | 1 | 1 | 0 + Sun Jan 09 16:02:01 2000 PST | 1 | 2 | 1 | 1 | 0 + Sun Jan 09 16:00:00 2000 PST | 1 | 2 | 1 | 1 | 0 + Sun Jan 09 19:00:00 2000 PST | 1 | 2 | 4 | 4 | 0 + Fri Dec 31 19:00:00 1999 PST | 1 | 2 | 4 | 4 | 0 + Fri Dec 31 17:00:00 1999 PST | 1 | 3 | 2 | 2 | 0 + Sun Jan 09 17:00:00 2000 PST | 1 | 3 | 2 | 2 | 0 + Sun Jan 09 18:00:00 2000 PST | 2 | 1 | 3 | 3 | 0 + Fri Dec 31 18:00:00 1999 PST | 2 | 1 | 3 | 3 | 0 +(10 rows) + +:PREFIX SELECT * FROM test2 ORDER BY x1, x2, x5, time; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------- + Merge Append (actual rows=10 loops=1) + Sort Key: _hyper_5_9_chunk.x1, _hyper_5_9_chunk.x2, _hyper_5_9_chunk.x5, _hyper_5_9_chunk."time" + -> Sort (actual rows=4 loops=1) + Sort Key: _hyper_5_9_chunk.x1, _hyper_5_9_chunk.x2, _hyper_5_9_chunk.x5, _hyper_5_9_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_5_9_chunk (actual rows=4 loops=1) + -> Seq Scan on compress_hyper_6_11_chunk (actual rows=3 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_5_9_chunk.x1, _hyper_5_9_chunk.x2, _hyper_5_9_chunk.x5, _hyper_5_9_chunk."time" + Sort Method: quicksort + -> Seq Scan on _hyper_5_9_chunk (actual rows=1 loops=1) + -> Sort (actual rows=4 loops=1) + Sort Key: _hyper_5_10_chunk.x1, _hyper_5_10_chunk.x2, _hyper_5_10_chunk.x5, _hyper_5_10_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_5_10_chunk (actual rows=4 loops=1) + -> Seq Scan on compress_hyper_6_12_chunk (actual rows=3 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_5_10_chunk.x1, _hyper_5_10_chunk.x2, _hyper_5_10_chunk.x5, _hyper_5_10_chunk."time" + Sort Method: quicksort + -> Seq Scan on _hyper_5_10_chunk (actual rows=1 loops=1) +(20 rows) + +SELECT * FROM test2 ORDER BY x1, x2, x5, time; + time | x1 | x2 | x3 | x4 | x5 +------------------------------+----+----+----+----+---- + Fri Dec 31 16:00:00 1999 PST | 1 | 2 | 1 | 1 | 0 + Fri Dec 31 16:02:01 1999 PST | 1 | 2 | 1 | 1 | 0 + Fri Dec 31 19:00:00 1999 PST | 1 | 2 | 4 | 4 | 0 + Sun Jan 09 16:00:00 2000 PST | 1 | 2 | 1 | 1 | 0 + Sun Jan 09 16:02:01 2000 PST | 1 | 2 | 1 | 1 | 0 + Sun Jan 09 19:00:00 2000 PST | 1 | 2 | 4 | 4 | 0 + Fri Dec 31 17:00:00 1999 PST | 1 | 3 | 2 | 2 | 0 + Sun Jan 09 17:00:00 2000 PST | 1 | 3 | 2 | 2 | 0 + Fri Dec 31 18:00:00 1999 PST | 2 | 1 | 3 | 3 | 0 + Sun Jan 09 18:00:00 2000 PST | 2 | 1 | 3 | 3 | 0 +(10 rows) + +----------------------------- +-- tests with space partitioning +----------------------------- +CREATE TABLE test3 ( +time timestamptz NOT NULL, + x1 integer, + x2 integer, + x3 integer, + x4 integer, + x5 integer); +SELECT FROM create_hypertable('test3', 'time'); +-- +(1 row) + +SELECT add_dimension('test3', 'x1', number_partitions => 2); + add_dimension +----------------------- + (5,public,test3,x1,t) +(1 row) + +ALTER TABLE test3 SET (timescaledb.compress, timescaledb.compress_segmentby='x1, x2, x5', timescaledb.compress_orderby = 'x3 ASC, x4 ASC'); +INSERT INTO test3 (time, x1, x2, x3, x4, x5) values('2000-01-01 00:00:00-00', 1, 2, 1, 1, 0); +INSERT INTO test3 (time, x1, x2, x3, x4, x5) values('2000-01-01 01:00:00-00', 1, 3, 2, 2, 0); +INSERT INTO test3 (time, x1, x2, x3, x4, x5) values('2000-01-01 02:00:00-00', 2, 1, 3, 3, 0); +INSERT INTO test3 (time, x1, x2, x3, x4, x5) values('2000-01-01 03:00:00-00', 1, 2, 4, 4, 0); +-- chunk 2 +INSERT INTO test3 (time, x1, x2, x3, x4, x5) values('2000-01-10 00:00:00-00', 1, 2, 1, 1, 0); +INSERT INTO test3 (time, x1, x2, x3, x4, x5) values('2000-01-10 01:00:00-00', 1, 3, 2, 2, 0); +INSERT INTO test3 (time, x1, x2, x3, x4, x5) values('2000-01-10 02:00:00-00', 2, 1, 3, 3, 0); +INSERT INTO test3 (time, x1, x2, x3, x4, x5) values('2000-01-10 03:00:00-00', 1, 2, 4, 4, 0); +SELECT compress_chunk(i) FROM show_chunks('test3') i; + compress_chunk +----------------------------------------- + _timescaledb_internal._hyper_7_13_chunk + _timescaledb_internal._hyper_7_14_chunk + _timescaledb_internal._hyper_7_15_chunk + _timescaledb_internal._hyper_7_16_chunk +(4 rows) + +-- make them partially compressed +INSERT INTO test3 (time, x1, x2, x3, x4, x5) values('2000-01-01 00:02:01-00', 1, 2, 1, 1, 0); +INSERT INTO test3 (time, x1, x2, x3, x4, x5) values('2000-01-10 00:02:01-00', 1, 2, 1, 1, 0); +set enable_indexscan = off; +-- queries where sort is pushed down +:PREFIX SELECT * FROM test3 ORDER BY x1, x2, x5, x3; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Append (actual rows=10 loops=1) + Sort Key: _hyper_7_13_chunk.x1, _hyper_7_13_chunk.x2, _hyper_7_13_chunk.x5, _hyper_7_13_chunk.x3 + -> Custom Scan (DecompressChunk) on _hyper_7_13_chunk (actual rows=3 loops=1) + -> Sort (actual rows=2 loops=1) + Sort Key: compress_hyper_8_17_chunk.x1, compress_hyper_8_17_chunk.x2, compress_hyper_8_17_chunk.x5, compress_hyper_8_17_chunk._ts_meta_sequence_num + Sort Method: quicksort + -> Seq Scan on compress_hyper_8_17_chunk (actual rows=2 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_7_13_chunk.x1, _hyper_7_13_chunk.x2, _hyper_7_13_chunk.x5, _hyper_7_13_chunk.x3 + Sort Method: quicksort + -> Seq Scan on _hyper_7_13_chunk (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_7_14_chunk (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: compress_hyper_8_18_chunk.x1, compress_hyper_8_18_chunk.x2, compress_hyper_8_18_chunk.x5, compress_hyper_8_18_chunk._ts_meta_sequence_num + Sort Method: quicksort + -> Seq Scan on compress_hyper_8_18_chunk (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_7_15_chunk (actual rows=3 loops=1) + -> Sort (actual rows=2 loops=1) + Sort Key: compress_hyper_8_19_chunk.x1, compress_hyper_8_19_chunk.x2, compress_hyper_8_19_chunk.x5, compress_hyper_8_19_chunk._ts_meta_sequence_num + Sort Method: quicksort + -> Seq Scan on compress_hyper_8_19_chunk (actual rows=2 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_7_15_chunk.x1, _hyper_7_15_chunk.x2, _hyper_7_15_chunk.x5, _hyper_7_15_chunk.x3 + Sort Method: quicksort + -> Seq Scan on _hyper_7_15_chunk (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_7_16_chunk (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: compress_hyper_8_20_chunk.x1, compress_hyper_8_20_chunk.x2, compress_hyper_8_20_chunk.x5, compress_hyper_8_20_chunk._ts_meta_sequence_num + Sort Method: quicksort + -> Seq Scan on compress_hyper_8_20_chunk (actual rows=1 loops=1) +(30 rows) + +SELECT * FROM test3 ORDER BY x1, x2, x5, x3; + time | x1 | x2 | x3 | x4 | x5 +------------------------------+----+----+----+----+---- + Fri Dec 31 16:00:00 1999 PST | 1 | 2 | 1 | 1 | 0 + Fri Dec 31 16:02:01 1999 PST | 1 | 2 | 1 | 1 | 0 + Sun Jan 09 16:00:00 2000 PST | 1 | 2 | 1 | 1 | 0 + Sun Jan 09 16:02:01 2000 PST | 1 | 2 | 1 | 1 | 0 + Sun Jan 09 19:00:00 2000 PST | 1 | 2 | 4 | 4 | 0 + Fri Dec 31 19:00:00 1999 PST | 1 | 2 | 4 | 4 | 0 + Fri Dec 31 17:00:00 1999 PST | 1 | 3 | 2 | 2 | 0 + Sun Jan 09 17:00:00 2000 PST | 1 | 3 | 2 | 2 | 0 + Fri Dec 31 18:00:00 1999 PST | 2 | 1 | 3 | 3 | 0 + Sun Jan 09 18:00:00 2000 PST | 2 | 1 | 3 | 3 | 0 +(10 rows) + +:PREFIX SELECT * FROM test3 ORDER BY x1, x2, x5, x3, x4; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Append (actual rows=10 loops=1) + Sort Key: _hyper_7_13_chunk.x1, _hyper_7_13_chunk.x2, _hyper_7_13_chunk.x5, _hyper_7_13_chunk.x3, _hyper_7_13_chunk.x4 + -> Custom Scan (DecompressChunk) on _hyper_7_13_chunk (actual rows=3 loops=1) + -> Sort (actual rows=2 loops=1) + Sort Key: compress_hyper_8_17_chunk.x1, compress_hyper_8_17_chunk.x2, compress_hyper_8_17_chunk.x5, compress_hyper_8_17_chunk._ts_meta_sequence_num + Sort Method: quicksort + -> Seq Scan on compress_hyper_8_17_chunk (actual rows=2 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_7_13_chunk.x1, _hyper_7_13_chunk.x2, _hyper_7_13_chunk.x5, _hyper_7_13_chunk.x3, _hyper_7_13_chunk.x4 + Sort Method: quicksort + -> Seq Scan on _hyper_7_13_chunk (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_7_14_chunk (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: compress_hyper_8_18_chunk.x1, compress_hyper_8_18_chunk.x2, compress_hyper_8_18_chunk.x5, compress_hyper_8_18_chunk._ts_meta_sequence_num + Sort Method: quicksort + -> Seq Scan on compress_hyper_8_18_chunk (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_7_15_chunk (actual rows=3 loops=1) + -> Sort (actual rows=2 loops=1) + Sort Key: compress_hyper_8_19_chunk.x1, compress_hyper_8_19_chunk.x2, compress_hyper_8_19_chunk.x5, compress_hyper_8_19_chunk._ts_meta_sequence_num + Sort Method: quicksort + -> Seq Scan on compress_hyper_8_19_chunk (actual rows=2 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_7_15_chunk.x1, _hyper_7_15_chunk.x2, _hyper_7_15_chunk.x5, _hyper_7_15_chunk.x3, _hyper_7_15_chunk.x4 + Sort Method: quicksort + -> Seq Scan on _hyper_7_15_chunk (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_7_16_chunk (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: compress_hyper_8_20_chunk.x1, compress_hyper_8_20_chunk.x2, compress_hyper_8_20_chunk.x5, compress_hyper_8_20_chunk._ts_meta_sequence_num + Sort Method: quicksort + -> Seq Scan on compress_hyper_8_20_chunk (actual rows=1 loops=1) +(30 rows) + +SELECT * FROM test3 ORDER BY x1, x2, x5, x3, x4; + time | x1 | x2 | x3 | x4 | x5 +------------------------------+----+----+----+----+---- + Fri Dec 31 16:00:00 1999 PST | 1 | 2 | 1 | 1 | 0 + Fri Dec 31 16:02:01 1999 PST | 1 | 2 | 1 | 1 | 0 + Sun Jan 09 16:00:00 2000 PST | 1 | 2 | 1 | 1 | 0 + Sun Jan 09 16:02:01 2000 PST | 1 | 2 | 1 | 1 | 0 + Sun Jan 09 19:00:00 2000 PST | 1 | 2 | 4 | 4 | 0 + Fri Dec 31 19:00:00 1999 PST | 1 | 2 | 4 | 4 | 0 + Fri Dec 31 17:00:00 1999 PST | 1 | 3 | 2 | 2 | 0 + Sun Jan 09 17:00:00 2000 PST | 1 | 3 | 2 | 2 | 0 + Fri Dec 31 18:00:00 1999 PST | 2 | 1 | 3 | 3 | 0 + Sun Jan 09 18:00:00 2000 PST | 2 | 1 | 3 | 3 | 0 +(10 rows) + +-- queries where sort is not pushed down +:PREFIX SELECT * FROM test3 ORDER BY x1, x2, x3; + QUERY PLAN +---------------------------------------------------------------------------------------- + Merge Append (actual rows=10 loops=1) + Sort Key: _hyper_7_13_chunk.x1, _hyper_7_13_chunk.x2, _hyper_7_13_chunk.x3 + -> Sort (actual rows=3 loops=1) + Sort Key: _hyper_7_13_chunk.x1, _hyper_7_13_chunk.x2, _hyper_7_13_chunk.x3 + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_7_13_chunk (actual rows=3 loops=1) + -> Seq Scan on compress_hyper_8_17_chunk (actual rows=2 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_7_13_chunk.x1, _hyper_7_13_chunk.x2, _hyper_7_13_chunk.x3 + Sort Method: quicksort + -> Seq Scan on _hyper_7_13_chunk (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_7_14_chunk.x1, _hyper_7_14_chunk.x2, _hyper_7_14_chunk.x3 + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_7_14_chunk (actual rows=1 loops=1) + -> Seq Scan on compress_hyper_8_18_chunk (actual rows=1 loops=1) + -> Sort (actual rows=3 loops=1) + Sort Key: _hyper_7_15_chunk.x1, _hyper_7_15_chunk.x2, _hyper_7_15_chunk.x3 + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_7_15_chunk (actual rows=3 loops=1) + -> Seq Scan on compress_hyper_8_19_chunk (actual rows=2 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_7_15_chunk.x1, _hyper_7_15_chunk.x2, _hyper_7_15_chunk.x3 + Sort Method: quicksort + -> Seq Scan on _hyper_7_15_chunk (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_7_16_chunk.x1, _hyper_7_16_chunk.x2, _hyper_7_16_chunk.x3 + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_7_16_chunk (actual rows=1 loops=1) + -> Seq Scan on compress_hyper_8_20_chunk (actual rows=1 loops=1) +(30 rows) + +SELECT * FROM test3 ORDER BY x1, x2, x3; + time | x1 | x2 | x3 | x4 | x5 +------------------------------+----+----+----+----+---- + Fri Dec 31 16:00:00 1999 PST | 1 | 2 | 1 | 1 | 0 + Fri Dec 31 16:02:01 1999 PST | 1 | 2 | 1 | 1 | 0 + Sun Jan 09 16:00:00 2000 PST | 1 | 2 | 1 | 1 | 0 + Sun Jan 09 16:02:01 2000 PST | 1 | 2 | 1 | 1 | 0 + Sun Jan 09 19:00:00 2000 PST | 1 | 2 | 4 | 4 | 0 + Fri Dec 31 19:00:00 1999 PST | 1 | 2 | 4 | 4 | 0 + Fri Dec 31 17:00:00 1999 PST | 1 | 3 | 2 | 2 | 0 + Sun Jan 09 17:00:00 2000 PST | 1 | 3 | 2 | 2 | 0 + Fri Dec 31 18:00:00 1999 PST | 2 | 1 | 3 | 3 | 0 + Sun Jan 09 18:00:00 2000 PST | 2 | 1 | 3 | 3 | 0 +(10 rows) + +:PREFIX SELECT * FROM test3 ORDER BY x1, x2, x5, x4; + QUERY PLAN +---------------------------------------------------------------------------------------------------------- + Merge Append (actual rows=10 loops=1) + Sort Key: _hyper_7_13_chunk.x1, _hyper_7_13_chunk.x2, _hyper_7_13_chunk.x5, _hyper_7_13_chunk.x4 + -> Sort (actual rows=3 loops=1) + Sort Key: _hyper_7_13_chunk.x1, _hyper_7_13_chunk.x2, _hyper_7_13_chunk.x5, _hyper_7_13_chunk.x4 + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_7_13_chunk (actual rows=3 loops=1) + -> Seq Scan on compress_hyper_8_17_chunk (actual rows=2 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_7_13_chunk.x1, _hyper_7_13_chunk.x2, _hyper_7_13_chunk.x5, _hyper_7_13_chunk.x4 + Sort Method: quicksort + -> Seq Scan on _hyper_7_13_chunk (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_7_14_chunk.x1, _hyper_7_14_chunk.x2, _hyper_7_14_chunk.x5, _hyper_7_14_chunk.x4 + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_7_14_chunk (actual rows=1 loops=1) + -> Seq Scan on compress_hyper_8_18_chunk (actual rows=1 loops=1) + -> Sort (actual rows=3 loops=1) + Sort Key: _hyper_7_15_chunk.x1, _hyper_7_15_chunk.x2, _hyper_7_15_chunk.x5, _hyper_7_15_chunk.x4 + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_7_15_chunk (actual rows=3 loops=1) + -> Seq Scan on compress_hyper_8_19_chunk (actual rows=2 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_7_15_chunk.x1, _hyper_7_15_chunk.x2, _hyper_7_15_chunk.x5, _hyper_7_15_chunk.x4 + Sort Method: quicksort + -> Seq Scan on _hyper_7_15_chunk (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_7_16_chunk.x1, _hyper_7_16_chunk.x2, _hyper_7_16_chunk.x5, _hyper_7_16_chunk.x4 + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_7_16_chunk (actual rows=1 loops=1) + -> Seq Scan on compress_hyper_8_20_chunk (actual rows=1 loops=1) +(30 rows) + +SELECT * FROM test3 ORDER BY x1, x2, x5, x4; + time | x1 | x2 | x3 | x4 | x5 +------------------------------+----+----+----+----+---- + Fri Dec 31 16:00:00 1999 PST | 1 | 2 | 1 | 1 | 0 + Fri Dec 31 16:02:01 1999 PST | 1 | 2 | 1 | 1 | 0 + Sun Jan 09 16:00:00 2000 PST | 1 | 2 | 1 | 1 | 0 + Sun Jan 09 16:02:01 2000 PST | 1 | 2 | 1 | 1 | 0 + Sun Jan 09 19:00:00 2000 PST | 1 | 2 | 4 | 4 | 0 + Fri Dec 31 19:00:00 1999 PST | 1 | 2 | 4 | 4 | 0 + Fri Dec 31 17:00:00 1999 PST | 1 | 3 | 2 | 2 | 0 + Sun Jan 09 17:00:00 2000 PST | 1 | 3 | 2 | 2 | 0 + Fri Dec 31 18:00:00 1999 PST | 2 | 1 | 3 | 3 | 0 + Sun Jan 09 18:00:00 2000 PST | 2 | 1 | 3 | 3 | 0 +(10 rows) + +:PREFIX SELECT * FROM test3 ORDER BY x1, x2, x5, time; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------- + Merge Append (actual rows=10 loops=1) + Sort Key: _hyper_7_13_chunk.x1, _hyper_7_13_chunk.x2, _hyper_7_13_chunk.x5, _hyper_7_13_chunk."time" + -> Sort (actual rows=3 loops=1) + Sort Key: _hyper_7_13_chunk.x1, _hyper_7_13_chunk.x2, _hyper_7_13_chunk.x5, _hyper_7_13_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_7_13_chunk (actual rows=3 loops=1) + -> Seq Scan on compress_hyper_8_17_chunk (actual rows=2 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_7_13_chunk.x1, _hyper_7_13_chunk.x2, _hyper_7_13_chunk.x5, _hyper_7_13_chunk."time" + Sort Method: quicksort + -> Seq Scan on _hyper_7_13_chunk (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_7_14_chunk.x1, _hyper_7_14_chunk.x2, _hyper_7_14_chunk.x5, _hyper_7_14_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_7_14_chunk (actual rows=1 loops=1) + -> Seq Scan on compress_hyper_8_18_chunk (actual rows=1 loops=1) + -> Sort (actual rows=3 loops=1) + Sort Key: _hyper_7_15_chunk.x1, _hyper_7_15_chunk.x2, _hyper_7_15_chunk.x5, _hyper_7_15_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_7_15_chunk (actual rows=3 loops=1) + -> Seq Scan on compress_hyper_8_19_chunk (actual rows=2 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_7_15_chunk.x1, _hyper_7_15_chunk.x2, _hyper_7_15_chunk.x5, _hyper_7_15_chunk."time" + Sort Method: quicksort + -> Seq Scan on _hyper_7_15_chunk (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_7_16_chunk.x1, _hyper_7_16_chunk.x2, _hyper_7_16_chunk.x5, _hyper_7_16_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_7_16_chunk (actual rows=1 loops=1) + -> Seq Scan on compress_hyper_8_20_chunk (actual rows=1 loops=1) +(30 rows) + +SELECT * FROM test3 ORDER BY x1, x2, x5, time; + time | x1 | x2 | x3 | x4 | x5 +------------------------------+----+----+----+----+---- + Fri Dec 31 16:00:00 1999 PST | 1 | 2 | 1 | 1 | 0 + Fri Dec 31 16:02:01 1999 PST | 1 | 2 | 1 | 1 | 0 + Fri Dec 31 19:00:00 1999 PST | 1 | 2 | 4 | 4 | 0 + Sun Jan 09 16:00:00 2000 PST | 1 | 2 | 1 | 1 | 0 + Sun Jan 09 16:02:01 2000 PST | 1 | 2 | 1 | 1 | 0 + Sun Jan 09 19:00:00 2000 PST | 1 | 2 | 4 | 4 | 0 + Fri Dec 31 17:00:00 1999 PST | 1 | 3 | 2 | 2 | 0 + Sun Jan 09 17:00:00 2000 PST | 1 | 3 | 2 | 2 | 0 + Fri Dec 31 18:00:00 1999 PST | 2 | 1 | 3 | 3 | 0 + Sun Jan 09 18:00:00 2000 PST | 2 | 1 | 3 | 3 | 0 +(10 rows) + diff --git a/tsl/test/expected/modify_exclusion-16.out b/tsl/test/expected/modify_exclusion-16.out new file mode 100644 index 00000000000..ded42f5e288 --- /dev/null +++ b/tsl/test/expected/modify_exclusion-16.out @@ -0,0 +1,1986 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\set PREFIX 'EXPLAIN (ANALYZE,VERBOSE,SUMMARY OFF,TIMING OFF,COSTS OFF)' +\i include/modify_exclusion_load.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +CREATE TABLE metrics_int2(c1 int,c2 int, c3 int, c4 int, c5 int, time int2 NOT NULL, value float, data text); +CREATE TABLE metrics_int4(c1 int,c2 int, c3 int, c4 int, c5 int, time int4 NOT NULL, value float, data text); +CREATE TABLE metrics_int8(c1 int,c2 int, c3 int, c4 int, c5 int, time int8 NOT NULL, value float, data text); +CREATE TABLE metrics_date(c1 int,c2 int, c3 int, c4 int, c5 int, time date NOT NULL, value float, data text); +CREATE TABLE metrics_timestamp(c1 int,c2 int, c3 int, c4 int, c5 int, time timestamp NOT NULL, value float, data text); +CREATE TABLE metrics_timestamptz(c1 int,c2 int, c3 int, c4 int, c5 int, time timestamptz NOT NULL, value float, data text); +CREATE TABLE metrics_space(c1 int,c2 int, c3 int, c4 int, c5 int, time timestamp NOT NULL, device text, value float, data text); +SELECT table_name FROM create_hypertable('metrics_int2','time',chunk_time_interval:=10); + table_name +-------------- + metrics_int2 +(1 row) + +SELECT table_name FROM create_hypertable('metrics_int4','time',chunk_time_interval:=10); + table_name +-------------- + metrics_int4 +(1 row) + +SELECT table_name FROM create_hypertable('metrics_int8','time',chunk_time_interval:=10); + table_name +-------------- + metrics_int8 +(1 row) + +SELECT table_name FROM create_hypertable('metrics_date','time'); + table_name +-------------- + metrics_date +(1 row) + +SELECT table_name FROM create_hypertable('metrics_timestamp','time'); +psql:include/modify_exclusion_load.sql:17: WARNING: column type "timestamp without time zone" used for "time" does not follow best practices + table_name +------------------- + metrics_timestamp +(1 row) + +SELECT table_name FROM create_hypertable('metrics_timestamptz','time'); + table_name +--------------------- + metrics_timestamptz +(1 row) + +SELECT table_name FROM create_hypertable('metrics_space','time','device',4); +psql:include/modify_exclusion_load.sql:19: WARNING: column type "timestamp without time zone" used for "time" does not follow best practices + table_name +--------------- + metrics_space +(1 row) + +CREATE FUNCTION drop_column(text) RETURNS VOID LANGUAGE PLPGSQL AS $$ +DECLARE + tbl name; +BEGIN + FOR tbl IN SELECT 'metrics_' || unnest(ARRAY['int2','int4','int8','date','timestamp','timestamptz','space']) + LOOP + EXECUTE format('ALTER TABLE %I DROP COLUMN %I;', tbl, $1); + END LOOP; +END; +$$; +-- create 4 chunks each with different physical layout +SELECT drop_column('c1'); + drop_column +------------- + +(1 row) + +INSERT INTO metrics_int2(time) VALUES (0); +INSERT INTO metrics_int4(time) VALUES (0); +INSERT INTO metrics_int8(time) VALUES (0); +INSERT INTO metrics_date(time) VALUES ('2000-01-01'); +INSERT INTO metrics_timestamp(time) VALUES ('2000-01-01'); +INSERT INTO metrics_timestamptz(time) VALUES ('2000-01-01'); +INSERT INTO metrics_space(time,device) VALUES ('2000-01-01','1'),('2000-01-01','2'); +SELECT drop_column('c2'); + drop_column +------------- + +(1 row) + +INSERT INTO metrics_int2(time) VALUES (10); +INSERT INTO metrics_int4(time) VALUES (10); +INSERT INTO metrics_int8(time) VALUES (10); +INSERT INTO metrics_date(time) VALUES ('2001-01-01'); +INSERT INTO metrics_timestamp(time) VALUES ('2001-01-01'); +INSERT INTO metrics_timestamptz(time) VALUES ('2001-01-01'); +INSERT INTO metrics_space(time,device) VALUES ('2001-01-01','1'),('2001-01-01','2'); +SELECT drop_column('c3'); + drop_column +------------- + +(1 row) + +INSERT INTO metrics_int2(time) VALUES (20); +INSERT INTO metrics_int4(time) VALUES (20); +INSERT INTO metrics_int8(time) VALUES (20); +INSERT INTO metrics_date(time) VALUES ('2002-01-01'); +INSERT INTO metrics_timestamp(time) VALUES ('2002-01-01'); +INSERT INTO metrics_timestamptz(time) VALUES ('2002-01-01'); +INSERT INTO metrics_space(time,device) VALUES ('2002-01-01','1'),('2002-01-01','2'); +SELECT drop_column('c4'); + drop_column +------------- + +(1 row) + +INSERT INTO metrics_int2(time) VALUES (30); +INSERT INTO metrics_int4(time) VALUES (30); +INSERT INTO metrics_int8(time) VALUES (30); +INSERT INTO metrics_date(time) VALUES ('2003-01-01'); +INSERT INTO metrics_timestamp(time) VALUES ('2003-01-01'); +INSERT INTO metrics_timestamptz(time) VALUES ('2003-01-01'); +INSERT INTO metrics_space(time,device) VALUES ('2003-01-01','1'),('2003-01-01','2'); +SELECT drop_column('c5'); + drop_column +------------- + +(1 row) + +CREATE TABLE metrics_compressed(time timestamptz NOT NULL, device int, value float); +SELECT table_name FROM create_hypertable('metrics_compressed','time'); + table_name +-------------------- + metrics_compressed +(1 row) + +ALTER TABLE metrics_compressed SET (timescaledb.compress); +-- create first chunk and compress +INSERT INTO metrics_compressed VALUES ('2000-01-01',1,0.5); +SELECT count(compress_chunk(chunk)) FROM show_chunks('metrics_compressed') chunk; + count +------- + 1 +(1 row) + +-- create more chunks +INSERT INTO metrics_compressed VALUES ('2010-01-01',1,0.5),('2011-01-01',1,0.5),('2012-01-01',1,0.5); +-- immutable constraints +-- should not have ChunkAppend since constraint is immutable and postgres already does the exclusion +-- should only hit 1 chunk and base table +BEGIN; +:PREFIX DELETE FROM metrics_int2 WHERE time = 15; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Delete on public.metrics_int2 (actual rows=0 loops=1) + Delete on _timescaledb_internal._hyper_1_9_chunk metrics_int2_1 + -> Index Scan using _hyper_1_9_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_9_chunk metrics_int2_1 (actual rows=0 loops=1) + Output: metrics_int2_1.tableoid, metrics_int2_1.ctid + Index Cond: (metrics_int2_1."time" = 15) +(6 rows) + +:PREFIX DELETE FROM metrics_int4 WHERE time = 15; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Delete on public.metrics_int4 (actual rows=0 loops=1) + Delete on _timescaledb_internal._hyper_2_10_chunk metrics_int4_1 + -> Index Scan using _hyper_2_10_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_10_chunk metrics_int4_1 (actual rows=0 loops=1) + Output: metrics_int4_1.tableoid, metrics_int4_1.ctid + Index Cond: (metrics_int4_1."time" = 15) +(6 rows) + +:PREFIX DELETE FROM metrics_int8 WHERE time = 15; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Delete on public.metrics_int8 (actual rows=0 loops=1) + Delete on _timescaledb_internal._hyper_3_11_chunk metrics_int8_1 + -> Index Scan using _hyper_3_11_chunk_metrics_int8_time_idx on _timescaledb_internal._hyper_3_11_chunk metrics_int8_1 (actual rows=0 loops=1) + Output: metrics_int8_1.tableoid, metrics_int8_1.ctid + Index Cond: (metrics_int8_1."time" = 15) +(6 rows) + +ROLLBACK; +BEGIN; +:PREFIX UPDATE metrics_int2 SET value = 0.5 WHERE time = 15; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Update on public.metrics_int2 (actual rows=0 loops=1) + Update on _timescaledb_internal._hyper_1_9_chunk metrics_int2_1 + -> Result (actual rows=0 loops=1) + Output: '0.5'::double precision, metrics_int2_1.tableoid, metrics_int2_1.ctid + -> Index Scan using _hyper_1_9_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_9_chunk metrics_int2_1 (actual rows=0 loops=1) + Output: metrics_int2_1.tableoid, metrics_int2_1.ctid + Index Cond: (metrics_int2_1."time" = 15) +(8 rows) + +:PREFIX UPDATE metrics_int4 SET value = 0.5 WHERE time = 15; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Update on public.metrics_int4 (actual rows=0 loops=1) + Update on _timescaledb_internal._hyper_2_10_chunk metrics_int4_1 + -> Result (actual rows=0 loops=1) + Output: '0.5'::double precision, metrics_int4_1.tableoid, metrics_int4_1.ctid + -> Index Scan using _hyper_2_10_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_10_chunk metrics_int4_1 (actual rows=0 loops=1) + Output: metrics_int4_1.tableoid, metrics_int4_1.ctid + Index Cond: (metrics_int4_1."time" = 15) +(8 rows) + +:PREFIX UPDATE metrics_int8 SET value = 0.5 WHERE time = 15; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Update on public.metrics_int8 (actual rows=0 loops=1) + Update on _timescaledb_internal._hyper_3_11_chunk metrics_int8_1 + -> Result (actual rows=0 loops=1) + Output: '0.5'::double precision, metrics_int8_1.tableoid, metrics_int8_1.ctid + -> Index Scan using _hyper_3_11_chunk_metrics_int8_time_idx on _timescaledb_internal._hyper_3_11_chunk metrics_int8_1 (actual rows=0 loops=1) + Output: metrics_int8_1.tableoid, metrics_int8_1.ctid + Index Cond: (metrics_int8_1."time" = 15) +(8 rows) + +ROLLBACK; +-- stable constraints +-- should have ChunkAppend since constraint is stable +-- should only hit 1 chunk and base table +BEGIN; +:PREFIX DELETE FROM metrics_int2 WHERE time = length(substring(version(),1,23)); + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Delete on public.metrics_int2 (actual rows=0 loops=1) + Delete on _timescaledb_internal._hyper_1_1_chunk metrics_int2 + Delete on _timescaledb_internal._hyper_1_9_chunk metrics_int2 + Delete on _timescaledb_internal._hyper_1_17_chunk metrics_int2_1 + Delete on _timescaledb_internal._hyper_1_25_chunk metrics_int2 + -> Custom Scan (ChunkAppend) on public.metrics_int2 (actual rows=0 loops=1) + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 3 + -> Index Scan using _hyper_1_17_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_17_chunk metrics_int2_1 (actual rows=0 loops=1) + Output: metrics_int2_1.tableoid, metrics_int2_1.ctid + Index Cond: (metrics_int2_1."time" = length("substring"(version(), 1, 23))) +(13 rows) + +:PREFIX DELETE FROM metrics_int4 WHERE time = length(substring(version(),1,23)); + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Delete on public.metrics_int4 (actual rows=0 loops=1) + Delete on _timescaledb_internal._hyper_2_2_chunk metrics_int4 + Delete on _timescaledb_internal._hyper_2_10_chunk metrics_int4 + Delete on _timescaledb_internal._hyper_2_18_chunk metrics_int4_1 + Delete on _timescaledb_internal._hyper_2_26_chunk metrics_int4 + -> Custom Scan (ChunkAppend) on public.metrics_int4 (actual rows=0 loops=1) + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 3 + -> Index Scan using _hyper_2_18_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_18_chunk metrics_int4_1 (actual rows=0 loops=1) + Output: metrics_int4_1.tableoid, metrics_int4_1.ctid + Index Cond: (metrics_int4_1."time" = length("substring"(version(), 1, 23))) +(13 rows) + +:PREFIX DELETE FROM metrics_int8 WHERE time = length(substring(version(),1,23)); + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Delete on public.metrics_int8 (actual rows=0 loops=1) + Delete on _timescaledb_internal._hyper_3_3_chunk metrics_int8 + Delete on _timescaledb_internal._hyper_3_11_chunk metrics_int8 + Delete on _timescaledb_internal._hyper_3_19_chunk metrics_int8_1 + Delete on _timescaledb_internal._hyper_3_27_chunk metrics_int8 + -> Custom Scan (ChunkAppend) on public.metrics_int8 (actual rows=0 loops=1) + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 3 + -> Index Scan using _hyper_3_19_chunk_metrics_int8_time_idx on _timescaledb_internal._hyper_3_19_chunk metrics_int8_1 (actual rows=0 loops=1) + Output: metrics_int8_1.tableoid, metrics_int8_1.ctid + Index Cond: (metrics_int8_1."time" = length("substring"(version(), 1, 23))) +(13 rows) + +ROLLBACK; +BEGIN; +:PREFIX UPDATE metrics_int2 SET value = 0.3 WHERE time = length(substring(version(),1,23)); + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Update on public.metrics_int2 (actual rows=0 loops=1) + Update on _timescaledb_internal._hyper_1_1_chunk metrics_int2 + Update on _timescaledb_internal._hyper_1_9_chunk metrics_int2 + Update on _timescaledb_internal._hyper_1_17_chunk metrics_int2_1 + Update on _timescaledb_internal._hyper_1_25_chunk metrics_int2 + -> Result (actual rows=0 loops=1) + Output: '0.3'::double precision, metrics_int2.tableoid, metrics_int2.ctid + -> Custom Scan (ChunkAppend) on public.metrics_int2 (actual rows=0 loops=1) + Output: metrics_int2.tableoid, metrics_int2.ctid + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 3 + -> Index Scan using _hyper_1_17_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_17_chunk metrics_int2_1 (actual rows=0 loops=1) + Output: metrics_int2_1.tableoid, metrics_int2_1.ctid + Index Cond: (metrics_int2_1."time" = length("substring"(version(), 1, 23))) +(16 rows) + +:PREFIX UPDATE metrics_int4 SET value = 0.3 WHERE time = length(substring(version(),1,23)); + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Update on public.metrics_int4 (actual rows=0 loops=1) + Update on _timescaledb_internal._hyper_2_2_chunk metrics_int4 + Update on _timescaledb_internal._hyper_2_10_chunk metrics_int4 + Update on _timescaledb_internal._hyper_2_18_chunk metrics_int4_1 + Update on _timescaledb_internal._hyper_2_26_chunk metrics_int4 + -> Result (actual rows=0 loops=1) + Output: '0.3'::double precision, metrics_int4.tableoid, metrics_int4.ctid + -> Custom Scan (ChunkAppend) on public.metrics_int4 (actual rows=0 loops=1) + Output: metrics_int4.tableoid, metrics_int4.ctid + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 3 + -> Index Scan using _hyper_2_18_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_18_chunk metrics_int4_1 (actual rows=0 loops=1) + Output: metrics_int4_1.tableoid, metrics_int4_1.ctid + Index Cond: (metrics_int4_1."time" = length("substring"(version(), 1, 23))) +(16 rows) + +:PREFIX UPDATE metrics_int8 SET value = 0.3 WHERE time = length(substring(version(),1,23)); + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Update on public.metrics_int8 (actual rows=0 loops=1) + Update on _timescaledb_internal._hyper_3_3_chunk metrics_int8 + Update on _timescaledb_internal._hyper_3_11_chunk metrics_int8 + Update on _timescaledb_internal._hyper_3_19_chunk metrics_int8_1 + Update on _timescaledb_internal._hyper_3_27_chunk metrics_int8 + -> Result (actual rows=0 loops=1) + Output: '0.3'::double precision, metrics_int8.tableoid, metrics_int8.ctid + -> Custom Scan (ChunkAppend) on public.metrics_int8 (actual rows=0 loops=1) + Output: metrics_int8.tableoid, metrics_int8.ctid + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 3 + -> Index Scan using _hyper_3_19_chunk_metrics_int8_time_idx on _timescaledb_internal._hyper_3_19_chunk metrics_int8_1 (actual rows=0 loops=1) + Output: metrics_int8_1.tableoid, metrics_int8_1.ctid + Index Cond: (metrics_int8_1."time" = length("substring"(version(), 1, 23))) +(16 rows) + +ROLLBACK; +-- should have ChunkAppend since constraint is stable +-- should only hit 1 chunk and base table, toplevel rows should be 1 +BEGIN; +:PREFIX DELETE FROM metrics_int2 WHERE time = length(substring(version(),1,20)) RETURNING 'returning', time; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=1 loops=1) + Output: ('returning'::text), "time" + -> Delete on public.metrics_int2 (actual rows=1 loops=1) + Output: 'returning'::text, "time" + Delete on _timescaledb_internal._hyper_1_1_chunk metrics_int2 + Delete on _timescaledb_internal._hyper_1_9_chunk metrics_int2 + Delete on _timescaledb_internal._hyper_1_17_chunk metrics_int2_1 + Delete on _timescaledb_internal._hyper_1_25_chunk metrics_int2 + -> Custom Scan (ChunkAppend) on public.metrics_int2 (actual rows=1 loops=1) + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 3 + -> Index Scan using _hyper_1_17_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_17_chunk metrics_int2_1 (actual rows=1 loops=1) + Output: metrics_int2_1.tableoid, metrics_int2_1.ctid + Index Cond: (metrics_int2_1."time" = length("substring"(version(), 1, 20))) +(15 rows) + +:PREFIX DELETE FROM metrics_int4 WHERE time = length(substring(version(),1,20)) RETURNING 'returning', time; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=1 loops=1) + Output: ('returning'::text), "time" + -> Delete on public.metrics_int4 (actual rows=1 loops=1) + Output: 'returning'::text, "time" + Delete on _timescaledb_internal._hyper_2_2_chunk metrics_int4 + Delete on _timescaledb_internal._hyper_2_10_chunk metrics_int4 + Delete on _timescaledb_internal._hyper_2_18_chunk metrics_int4_1 + Delete on _timescaledb_internal._hyper_2_26_chunk metrics_int4 + -> Custom Scan (ChunkAppend) on public.metrics_int4 (actual rows=1 loops=1) + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 3 + -> Index Scan using _hyper_2_18_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_18_chunk metrics_int4_1 (actual rows=1 loops=1) + Output: metrics_int4_1.tableoid, metrics_int4_1.ctid + Index Cond: (metrics_int4_1."time" = length("substring"(version(), 1, 20))) +(15 rows) + +:PREFIX DELETE FROM metrics_int8 WHERE time = length(substring(version(),1,20)) RETURNING 'returning', time; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=1 loops=1) + Output: ('returning'::text), "time" + -> Delete on public.metrics_int8 (actual rows=1 loops=1) + Output: 'returning'::text, "time" + Delete on _timescaledb_internal._hyper_3_3_chunk metrics_int8 + Delete on _timescaledb_internal._hyper_3_11_chunk metrics_int8 + Delete on _timescaledb_internal._hyper_3_19_chunk metrics_int8_1 + Delete on _timescaledb_internal._hyper_3_27_chunk metrics_int8 + -> Custom Scan (ChunkAppend) on public.metrics_int8 (actual rows=1 loops=1) + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 3 + -> Index Scan using _hyper_3_19_chunk_metrics_int8_time_idx on _timescaledb_internal._hyper_3_19_chunk metrics_int8_1 (actual rows=1 loops=1) + Output: metrics_int8_1.tableoid, metrics_int8_1.ctid + Index Cond: (metrics_int8_1."time" = length("substring"(version(), 1, 20))) +(15 rows) + +ROLLBACK; +BEGIN; +:PREFIX UPDATE metrics_int2 SET value = 0.4 WHERE time = length(substring(version(),1,20)) RETURNING 'returning', time; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=1 loops=1) + Output: ('returning'::text), "time" + -> Update on public.metrics_int2 (actual rows=1 loops=1) + Output: 'returning'::text, "time" + Update on _timescaledb_internal._hyper_1_1_chunk metrics_int2 + Update on _timescaledb_internal._hyper_1_9_chunk metrics_int2 + Update on _timescaledb_internal._hyper_1_17_chunk metrics_int2_1 + Update on _timescaledb_internal._hyper_1_25_chunk metrics_int2 + -> Result (actual rows=1 loops=1) + Output: '0.4'::double precision, metrics_int2.tableoid, metrics_int2.ctid + -> Custom Scan (ChunkAppend) on public.metrics_int2 (actual rows=1 loops=1) + Output: metrics_int2.tableoid, metrics_int2.ctid + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 3 + -> Index Scan using _hyper_1_17_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_17_chunk metrics_int2_1 (actual rows=1 loops=1) + Output: metrics_int2_1.tableoid, metrics_int2_1.ctid + Index Cond: (metrics_int2_1."time" = length("substring"(version(), 1, 20))) +(18 rows) + +:PREFIX UPDATE metrics_int4 SET value = 0.4 WHERE time = length(substring(version(),1,20)) RETURNING 'returning', time; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=1 loops=1) + Output: ('returning'::text), "time" + -> Update on public.metrics_int4 (actual rows=1 loops=1) + Output: 'returning'::text, "time" + Update on _timescaledb_internal._hyper_2_2_chunk metrics_int4 + Update on _timescaledb_internal._hyper_2_10_chunk metrics_int4 + Update on _timescaledb_internal._hyper_2_18_chunk metrics_int4_1 + Update on _timescaledb_internal._hyper_2_26_chunk metrics_int4 + -> Result (actual rows=1 loops=1) + Output: '0.4'::double precision, metrics_int4.tableoid, metrics_int4.ctid + -> Custom Scan (ChunkAppend) on public.metrics_int4 (actual rows=1 loops=1) + Output: metrics_int4.tableoid, metrics_int4.ctid + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 3 + -> Index Scan using _hyper_2_18_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_18_chunk metrics_int4_1 (actual rows=1 loops=1) + Output: metrics_int4_1.tableoid, metrics_int4_1.ctid + Index Cond: (metrics_int4_1."time" = length("substring"(version(), 1, 20))) +(18 rows) + +:PREFIX UPDATE metrics_int8 SET value = 0.4 WHERE time = length(substring(version(),1,20)) RETURNING 'returning', time; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=1 loops=1) + Output: ('returning'::text), "time" + -> Update on public.metrics_int8 (actual rows=1 loops=1) + Output: 'returning'::text, "time" + Update on _timescaledb_internal._hyper_3_3_chunk metrics_int8 + Update on _timescaledb_internal._hyper_3_11_chunk metrics_int8 + Update on _timescaledb_internal._hyper_3_19_chunk metrics_int8_1 + Update on _timescaledb_internal._hyper_3_27_chunk metrics_int8 + -> Result (actual rows=1 loops=1) + Output: '0.4'::double precision, metrics_int8.tableoid, metrics_int8.ctid + -> Custom Scan (ChunkAppend) on public.metrics_int8 (actual rows=1 loops=1) + Output: metrics_int8.tableoid, metrics_int8.ctid + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 3 + -> Index Scan using _hyper_3_19_chunk_metrics_int8_time_idx on _timescaledb_internal._hyper_3_19_chunk metrics_int8_1 (actual rows=1 loops=1) + Output: metrics_int8_1.tableoid, metrics_int8_1.ctid + Index Cond: (metrics_int8_1."time" = length("substring"(version(), 1, 20))) +(18 rows) + +ROLLBACK; +BEGIN; +:PREFIX UPDATE metrics_int2 SET value = 0.1 * value, data = 'update' WHERE time = length(substring(version(),1,20)) RETURNING 'returning', time; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=1 loops=1) + Output: ('returning'::text), "time" + -> Update on public.metrics_int2 (actual rows=1 loops=1) + Output: 'returning'::text, "time" + Update on _timescaledb_internal._hyper_1_1_chunk metrics_int2 + Update on _timescaledb_internal._hyper_1_9_chunk metrics_int2 + Update on _timescaledb_internal._hyper_1_17_chunk metrics_int2_1 + Update on _timescaledb_internal._hyper_1_25_chunk metrics_int2 + -> Result (actual rows=1 loops=1) + Output: ('0.1'::double precision * metrics_int2.value), 'update'::text, metrics_int2.tableoid, metrics_int2.ctid + -> Custom Scan (ChunkAppend) on public.metrics_int2 (actual rows=1 loops=1) + Output: metrics_int2.value, metrics_int2.tableoid, metrics_int2.ctid + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 3 + -> Index Scan using _hyper_1_17_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_17_chunk metrics_int2_1 (actual rows=1 loops=1) + Output: metrics_int2_1.value, metrics_int2_1.tableoid, metrics_int2_1.ctid + Index Cond: (metrics_int2_1."time" = length("substring"(version(), 1, 20))) +(18 rows) + +:PREFIX UPDATE metrics_int4 SET value = 0.1 * value, data = 'update' WHERE time = length(substring(version(),1,20)) RETURNING 'returning', time; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=1 loops=1) + Output: ('returning'::text), "time" + -> Update on public.metrics_int4 (actual rows=1 loops=1) + Output: 'returning'::text, "time" + Update on _timescaledb_internal._hyper_2_2_chunk metrics_int4 + Update on _timescaledb_internal._hyper_2_10_chunk metrics_int4 + Update on _timescaledb_internal._hyper_2_18_chunk metrics_int4_1 + Update on _timescaledb_internal._hyper_2_26_chunk metrics_int4 + -> Result (actual rows=1 loops=1) + Output: ('0.1'::double precision * metrics_int4.value), 'update'::text, metrics_int4.tableoid, metrics_int4.ctid + -> Custom Scan (ChunkAppend) on public.metrics_int4 (actual rows=1 loops=1) + Output: metrics_int4.value, metrics_int4.tableoid, metrics_int4.ctid + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 3 + -> Index Scan using _hyper_2_18_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_18_chunk metrics_int4_1 (actual rows=1 loops=1) + Output: metrics_int4_1.value, metrics_int4_1.tableoid, metrics_int4_1.ctid + Index Cond: (metrics_int4_1."time" = length("substring"(version(), 1, 20))) +(18 rows) + +:PREFIX UPDATE metrics_int8 SET value = 0.1 * value, data = 'update' WHERE time = length(substring(version(),1,20)) RETURNING 'returning', time; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=1 loops=1) + Output: ('returning'::text), "time" + -> Update on public.metrics_int8 (actual rows=1 loops=1) + Output: 'returning'::text, "time" + Update on _timescaledb_internal._hyper_3_3_chunk metrics_int8 + Update on _timescaledb_internal._hyper_3_11_chunk metrics_int8 + Update on _timescaledb_internal._hyper_3_19_chunk metrics_int8_1 + Update on _timescaledb_internal._hyper_3_27_chunk metrics_int8 + -> Result (actual rows=1 loops=1) + Output: ('0.1'::double precision * metrics_int8.value), 'update'::text, metrics_int8.tableoid, metrics_int8.ctid + -> Custom Scan (ChunkAppend) on public.metrics_int8 (actual rows=1 loops=1) + Output: metrics_int8.value, metrics_int8.tableoid, metrics_int8.ctid + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 3 + -> Index Scan using _hyper_3_19_chunk_metrics_int8_time_idx on _timescaledb_internal._hyper_3_19_chunk metrics_int8_1 (actual rows=1 loops=1) + Output: metrics_int8_1.value, metrics_int8_1.tableoid, metrics_int8_1.ctid + Index Cond: (metrics_int8_1."time" = length("substring"(version(), 1, 20))) +(18 rows) + +ROLLBACK; +-- immutable constraints +-- should not have ChunkAppend since constraint is immutable and postgres already does the exclusion +-- should only hit 1 chunk and base table +BEGIN; +:PREFIX DELETE FROM metrics_date WHERE time = '2000-01-01'; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Delete on public.metrics_date (actual rows=0 loops=1) + Delete on _timescaledb_internal._hyper_4_4_chunk metrics_date_1 + -> Index Scan using _hyper_4_4_chunk_metrics_date_time_idx on _timescaledb_internal._hyper_4_4_chunk metrics_date_1 (actual rows=1 loops=1) + Output: metrics_date_1.tableoid, metrics_date_1.ctid + Index Cond: (metrics_date_1."time" = '01-01-2000'::date) +(6 rows) + +:PREFIX DELETE FROM metrics_timestamp WHERE time = '2000-01-01'; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Delete on public.metrics_timestamp (actual rows=0 loops=1) + Delete on _timescaledb_internal._hyper_5_5_chunk metrics_timestamp_1 + -> Index Scan using _hyper_5_5_chunk_metrics_timestamp_time_idx on _timescaledb_internal._hyper_5_5_chunk metrics_timestamp_1 (actual rows=1 loops=1) + Output: metrics_timestamp_1.tableoid, metrics_timestamp_1.ctid + Index Cond: (metrics_timestamp_1."time" = 'Sat Jan 01 00:00:00 2000'::timestamp without time zone) +(6 rows) + +:PREFIX DELETE FROM metrics_timestamptz WHERE time = '2000-01-01'; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Delete on public.metrics_timestamptz (actual rows=0 loops=1) + Delete on _timescaledb_internal._hyper_6_6_chunk metrics_timestamptz_1 + -> Index Scan using _hyper_6_6_chunk_metrics_timestamptz_time_idx on _timescaledb_internal._hyper_6_6_chunk metrics_timestamptz_1 (actual rows=1 loops=1) + Output: metrics_timestamptz_1.tableoid, metrics_timestamptz_1.ctid + Index Cond: (metrics_timestamptz_1."time" = 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) +(6 rows) + +:PREFIX DELETE FROM metrics_space WHERE time = '2000-01-01' AND device = '1'; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Delete on public.metrics_space (actual rows=0 loops=1) + Delete on _timescaledb_internal._hyper_7_7_chunk metrics_space_1 + -> Index Scan using _hyper_7_7_chunk_metrics_space_device_time_idx on _timescaledb_internal._hyper_7_7_chunk metrics_space_1 (actual rows=1 loops=1) + Output: metrics_space_1.tableoid, metrics_space_1.ctid + Index Cond: ((metrics_space_1.device = '1'::text) AND (metrics_space_1."time" = 'Sat Jan 01 00:00:00 2000'::timestamp without time zone)) +(6 rows) + +:PREFIX DELETE FROM metrics_space WHERE time = '2000-01-01'; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Delete on public.metrics_space (actual rows=0 loops=1) + Delete on _timescaledb_internal._hyper_7_7_chunk metrics_space_1 + Delete on _timescaledb_internal._hyper_7_8_chunk metrics_space_2 + -> Append (actual rows=1 loops=1) + -> Index Scan using _hyper_7_7_chunk_metrics_space_time_idx on _timescaledb_internal._hyper_7_7_chunk metrics_space_1 (actual rows=0 loops=1) + Output: metrics_space_1.tableoid, metrics_space_1.ctid + Index Cond: (metrics_space_1."time" = 'Sat Jan 01 00:00:00 2000'::timestamp without time zone) + -> Index Scan using _hyper_7_8_chunk_metrics_space_time_idx on _timescaledb_internal._hyper_7_8_chunk metrics_space_2 (actual rows=1 loops=1) + Output: metrics_space_2.tableoid, metrics_space_2.ctid + Index Cond: (metrics_space_2."time" = 'Sat Jan 01 00:00:00 2000'::timestamp without time zone) +(11 rows) + +ROLLBACK; +BEGIN; +:PREFIX UPDATE metrics_date SET value = 0.6 WHERE time = '2000-01-01'; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Update on public.metrics_date (actual rows=0 loops=1) + Update on _timescaledb_internal._hyper_4_4_chunk metrics_date_1 + -> Result (actual rows=1 loops=1) + Output: '0.6'::double precision, metrics_date_1.tableoid, metrics_date_1.ctid + -> Index Scan using _hyper_4_4_chunk_metrics_date_time_idx on _timescaledb_internal._hyper_4_4_chunk metrics_date_1 (actual rows=1 loops=1) + Output: metrics_date_1.tableoid, metrics_date_1.ctid + Index Cond: (metrics_date_1."time" = '01-01-2000'::date) +(8 rows) + +:PREFIX UPDATE metrics_timestamp SET value = 0.6 WHERE time = '2000-01-01'; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Update on public.metrics_timestamp (actual rows=0 loops=1) + Update on _timescaledb_internal._hyper_5_5_chunk metrics_timestamp_1 + -> Result (actual rows=1 loops=1) + Output: '0.6'::double precision, metrics_timestamp_1.tableoid, metrics_timestamp_1.ctid + -> Index Scan using _hyper_5_5_chunk_metrics_timestamp_time_idx on _timescaledb_internal._hyper_5_5_chunk metrics_timestamp_1 (actual rows=1 loops=1) + Output: metrics_timestamp_1.tableoid, metrics_timestamp_1.ctid + Index Cond: (metrics_timestamp_1."time" = 'Sat Jan 01 00:00:00 2000'::timestamp without time zone) +(8 rows) + +:PREFIX UPDATE metrics_timestamptz SET value = 0.6 WHERE time = '2000-01-01'; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Update on public.metrics_timestamptz (actual rows=0 loops=1) + Update on _timescaledb_internal._hyper_6_6_chunk metrics_timestamptz_1 + -> Result (actual rows=1 loops=1) + Output: '0.6'::double precision, metrics_timestamptz_1.tableoid, metrics_timestamptz_1.ctid + -> Index Scan using _hyper_6_6_chunk_metrics_timestamptz_time_idx on _timescaledb_internal._hyper_6_6_chunk metrics_timestamptz_1 (actual rows=1 loops=1) + Output: metrics_timestamptz_1.tableoid, metrics_timestamptz_1.ctid + Index Cond: (metrics_timestamptz_1."time" = 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) +(8 rows) + +:PREFIX UPDATE metrics_space SET value = 0.6 WHERE time = '2000-01-01' AND device = '1'; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Update on public.metrics_space (actual rows=0 loops=1) + Update on _timescaledb_internal._hyper_7_7_chunk metrics_space_1 + -> Result (actual rows=1 loops=1) + Output: '0.6'::double precision, metrics_space_1.tableoid, metrics_space_1.ctid + -> Index Scan using _hyper_7_7_chunk_metrics_space_device_time_idx on _timescaledb_internal._hyper_7_7_chunk metrics_space_1 (actual rows=1 loops=1) + Output: metrics_space_1.tableoid, metrics_space_1.ctid + Index Cond: ((metrics_space_1.device = '1'::text) AND (metrics_space_1."time" = 'Sat Jan 01 00:00:00 2000'::timestamp without time zone)) +(8 rows) + +:PREFIX UPDATE metrics_space SET value = 0.6 WHERE time = '2000-01-01'; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Update on public.metrics_space (actual rows=0 loops=1) + Update on _timescaledb_internal._hyper_7_7_chunk metrics_space_1 + Update on _timescaledb_internal._hyper_7_8_chunk metrics_space_2 + -> Result (actual rows=2 loops=1) + Output: '0.6'::double precision, metrics_space.tableoid, metrics_space.ctid + -> Append (actual rows=2 loops=1) + -> Index Scan using _hyper_7_7_chunk_metrics_space_time_idx on _timescaledb_internal._hyper_7_7_chunk metrics_space_1 (actual rows=1 loops=1) + Output: metrics_space_1.tableoid, metrics_space_1.ctid + Index Cond: (metrics_space_1."time" = 'Sat Jan 01 00:00:00 2000'::timestamp without time zone) + -> Index Scan using _hyper_7_8_chunk_metrics_space_time_idx on _timescaledb_internal._hyper_7_8_chunk metrics_space_2 (actual rows=1 loops=1) + Output: metrics_space_2.tableoid, metrics_space_2.ctid + Index Cond: (metrics_space_2."time" = 'Sat Jan 01 00:00:00 2000'::timestamp without time zone) +(13 rows) + +ROLLBACK; +-- stable constraints +-- should have ChunkAppend since constraint is stable +-- should only hit 1 chunk and base table +BEGIN; +:PREFIX DELETE FROM metrics_date WHERE time = '2000-01-01'::text::date; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Delete on public.metrics_date (actual rows=0 loops=1) + Delete on _timescaledb_internal._hyper_4_4_chunk metrics_date_1 + Delete on _timescaledb_internal._hyper_4_12_chunk metrics_date + Delete on _timescaledb_internal._hyper_4_20_chunk metrics_date + Delete on _timescaledb_internal._hyper_4_28_chunk metrics_date + -> Custom Scan (ChunkAppend) on public.metrics_date (actual rows=1 loops=1) + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 3 + -> Index Scan using _hyper_4_4_chunk_metrics_date_time_idx on _timescaledb_internal._hyper_4_4_chunk metrics_date_1 (actual rows=1 loops=1) + Output: metrics_date_1.tableoid, metrics_date_1.ctid + Index Cond: (metrics_date_1."time" = ('2000-01-01'::cstring)::date) +(13 rows) + +:PREFIX DELETE FROM metrics_timestamp WHERE time = '2000-01-01'::text::timestamp; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Delete on public.metrics_timestamp (actual rows=0 loops=1) + Delete on _timescaledb_internal._hyper_5_5_chunk metrics_timestamp_1 + Delete on _timescaledb_internal._hyper_5_13_chunk metrics_timestamp + Delete on _timescaledb_internal._hyper_5_21_chunk metrics_timestamp + Delete on _timescaledb_internal._hyper_5_29_chunk metrics_timestamp + -> Custom Scan (ChunkAppend) on public.metrics_timestamp (actual rows=1 loops=1) + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 3 + -> Index Scan using _hyper_5_5_chunk_metrics_timestamp_time_idx on _timescaledb_internal._hyper_5_5_chunk metrics_timestamp_1 (actual rows=1 loops=1) + Output: metrics_timestamp_1.tableoid, metrics_timestamp_1.ctid + Index Cond: (metrics_timestamp_1."time" = ('2000-01-01'::cstring)::timestamp without time zone) +(13 rows) + +:PREFIX DELETE FROM metrics_timestamptz WHERE time = '2000-01-01'::text::timestamptz; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Delete on public.metrics_timestamptz (actual rows=0 loops=1) + Delete on _timescaledb_internal._hyper_6_6_chunk metrics_timestamptz_1 + Delete on _timescaledb_internal._hyper_6_14_chunk metrics_timestamptz + Delete on _timescaledb_internal._hyper_6_22_chunk metrics_timestamptz + Delete on _timescaledb_internal._hyper_6_30_chunk metrics_timestamptz + -> Custom Scan (ChunkAppend) on public.metrics_timestamptz (actual rows=1 loops=1) + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 3 + -> Index Scan using _hyper_6_6_chunk_metrics_timestamptz_time_idx on _timescaledb_internal._hyper_6_6_chunk metrics_timestamptz_1 (actual rows=1 loops=1) + Output: metrics_timestamptz_1.tableoid, metrics_timestamptz_1.ctid + Index Cond: (metrics_timestamptz_1."time" = ('2000-01-01'::cstring)::timestamp with time zone) +(13 rows) + +ROLLBACK; +BEGIN; +:PREFIX UPDATE metrics_date SET value = 0.9 WHERE time = '2000-01-01'::text::date; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Update on public.metrics_date (actual rows=0 loops=1) + Update on _timescaledb_internal._hyper_4_4_chunk metrics_date_1 + Update on _timescaledb_internal._hyper_4_12_chunk metrics_date + Update on _timescaledb_internal._hyper_4_20_chunk metrics_date + Update on _timescaledb_internal._hyper_4_28_chunk metrics_date + -> Result (actual rows=1 loops=1) + Output: '0.9'::double precision, metrics_date.tableoid, metrics_date.ctid + -> Custom Scan (ChunkAppend) on public.metrics_date (actual rows=1 loops=1) + Output: metrics_date.tableoid, metrics_date.ctid + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 3 + -> Index Scan using _hyper_4_4_chunk_metrics_date_time_idx on _timescaledb_internal._hyper_4_4_chunk metrics_date_1 (actual rows=1 loops=1) + Output: metrics_date_1.tableoid, metrics_date_1.ctid + Index Cond: (metrics_date_1."time" = ('2000-01-01'::cstring)::date) +(16 rows) + +:PREFIX UPDATE metrics_timestamp SET value = 0.9 WHERE time = '2000-01-01'::text::timestamp; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Update on public.metrics_timestamp (actual rows=0 loops=1) + Update on _timescaledb_internal._hyper_5_5_chunk metrics_timestamp_1 + Update on _timescaledb_internal._hyper_5_13_chunk metrics_timestamp + Update on _timescaledb_internal._hyper_5_21_chunk metrics_timestamp + Update on _timescaledb_internal._hyper_5_29_chunk metrics_timestamp + -> Result (actual rows=1 loops=1) + Output: '0.9'::double precision, metrics_timestamp.tableoid, metrics_timestamp.ctid + -> Custom Scan (ChunkAppend) on public.metrics_timestamp (actual rows=1 loops=1) + Output: metrics_timestamp.tableoid, metrics_timestamp.ctid + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 3 + -> Index Scan using _hyper_5_5_chunk_metrics_timestamp_time_idx on _timescaledb_internal._hyper_5_5_chunk metrics_timestamp_1 (actual rows=1 loops=1) + Output: metrics_timestamp_1.tableoid, metrics_timestamp_1.ctid + Index Cond: (metrics_timestamp_1."time" = ('2000-01-01'::cstring)::timestamp without time zone) +(16 rows) + +:PREFIX UPDATE metrics_timestamptz SET value = 0.9 WHERE time = '2000-01-01'::text::timestamptz; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Update on public.metrics_timestamptz (actual rows=0 loops=1) + Update on _timescaledb_internal._hyper_6_6_chunk metrics_timestamptz_1 + Update on _timescaledb_internal._hyper_6_14_chunk metrics_timestamptz + Update on _timescaledb_internal._hyper_6_22_chunk metrics_timestamptz + Update on _timescaledb_internal._hyper_6_30_chunk metrics_timestamptz + -> Result (actual rows=1 loops=1) + Output: '0.9'::double precision, metrics_timestamptz.tableoid, metrics_timestamptz.ctid + -> Custom Scan (ChunkAppend) on public.metrics_timestamptz (actual rows=1 loops=1) + Output: metrics_timestamptz.tableoid, metrics_timestamptz.ctid + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 3 + -> Index Scan using _hyper_6_6_chunk_metrics_timestamptz_time_idx on _timescaledb_internal._hyper_6_6_chunk metrics_timestamptz_1 (actual rows=1 loops=1) + Output: metrics_timestamptz_1.tableoid, metrics_timestamptz_1.ctid + Index Cond: (metrics_timestamptz_1."time" = ('2000-01-01'::cstring)::timestamp with time zone) +(16 rows) + +ROLLBACK; +-- space partitioning +-- should have ChunkAppend since constraint is stable +-- should only hit 1 chunk and base table +BEGIN; +:PREFIX DELETE FROM metrics_space WHERE time = '2000-01-01'::text::timestamptz AND device = format('1'); + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Delete on public.metrics_space (actual rows=0 loops=1) + Delete on _timescaledb_internal._hyper_7_7_chunk metrics_space_1 + Delete on _timescaledb_internal._hyper_7_8_chunk metrics_space_2 + Delete on _timescaledb_internal._hyper_7_15_chunk metrics_space + Delete on _timescaledb_internal._hyper_7_16_chunk metrics_space + Delete on _timescaledb_internal._hyper_7_23_chunk metrics_space + Delete on _timescaledb_internal._hyper_7_24_chunk metrics_space + Delete on _timescaledb_internal._hyper_7_31_chunk metrics_space + Delete on _timescaledb_internal._hyper_7_32_chunk metrics_space + -> Custom Scan (ChunkAppend) on public.metrics_space (actual rows=1 loops=1) + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 6 + -> Index Scan using _hyper_7_7_chunk_metrics_space_device_time_idx on _timescaledb_internal._hyper_7_7_chunk metrics_space_1 (actual rows=1 loops=1) + Output: metrics_space_1.tableoid, metrics_space_1.ctid + Index Cond: ((metrics_space_1.device = format('1'::text)) AND (metrics_space_1."time" = ('2000-01-01'::cstring)::timestamp with time zone)) + -> Index Scan using _hyper_7_8_chunk_metrics_space_device_time_idx on _timescaledb_internal._hyper_7_8_chunk metrics_space_2 (actual rows=0 loops=1) + Output: metrics_space_2.tableoid, metrics_space_2.ctid + Index Cond: ((metrics_space_2.device = format('1'::text)) AND (metrics_space_2."time" = ('2000-01-01'::cstring)::timestamp with time zone)) +(20 rows) + +ROLLBACK; +BEGIN; +:PREFIX DELETE FROM metrics_space WHERE device = format('1'); + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Delete on public.metrics_space (actual rows=0 loops=1) + Delete on _timescaledb_internal._hyper_7_7_chunk metrics_space_1 + Delete on _timescaledb_internal._hyper_7_8_chunk metrics_space_2 + Delete on _timescaledb_internal._hyper_7_15_chunk metrics_space_3 + Delete on _timescaledb_internal._hyper_7_16_chunk metrics_space_4 + Delete on _timescaledb_internal._hyper_7_23_chunk metrics_space_5 + Delete on _timescaledb_internal._hyper_7_24_chunk metrics_space_6 + Delete on _timescaledb_internal._hyper_7_31_chunk metrics_space_7 + Delete on _timescaledb_internal._hyper_7_32_chunk metrics_space_8 + -> Custom Scan (ChunkAppend) on public.metrics_space (actual rows=4 loops=1) + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 0 + -> Index Scan using _hyper_7_7_chunk_metrics_space_device_time_idx on _timescaledb_internal._hyper_7_7_chunk metrics_space_1 (actual rows=1 loops=1) + Output: metrics_space_1.tableoid, metrics_space_1.ctid + Index Cond: (metrics_space_1.device = format('1'::text)) + -> Index Scan using _hyper_7_8_chunk_metrics_space_device_time_idx on _timescaledb_internal._hyper_7_8_chunk metrics_space_2 (actual rows=0 loops=1) + Output: metrics_space_2.tableoid, metrics_space_2.ctid + Index Cond: (metrics_space_2.device = format('1'::text)) + -> Index Scan using _hyper_7_15_chunk_metrics_space_device_time_idx on _timescaledb_internal._hyper_7_15_chunk metrics_space_3 (actual rows=1 loops=1) + Output: metrics_space_3.tableoid, metrics_space_3.ctid + Index Cond: (metrics_space_3.device = format('1'::text)) + -> Index Scan using _hyper_7_16_chunk_metrics_space_device_time_idx on _timescaledb_internal._hyper_7_16_chunk metrics_space_4 (actual rows=0 loops=1) + Output: metrics_space_4.tableoid, metrics_space_4.ctid + Index Cond: (metrics_space_4.device = format('1'::text)) + -> Index Scan using _hyper_7_23_chunk_metrics_space_device_time_idx on _timescaledb_internal._hyper_7_23_chunk metrics_space_5 (actual rows=1 loops=1) + Output: metrics_space_5.tableoid, metrics_space_5.ctid + Index Cond: (metrics_space_5.device = format('1'::text)) + -> Index Scan using _hyper_7_24_chunk_metrics_space_device_time_idx on _timescaledb_internal._hyper_7_24_chunk metrics_space_6 (actual rows=0 loops=1) + Output: metrics_space_6.tableoid, metrics_space_6.ctid + Index Cond: (metrics_space_6.device = format('1'::text)) + -> Index Scan using _hyper_7_31_chunk_metrics_space_device_time_idx on _timescaledb_internal._hyper_7_31_chunk metrics_space_7 (actual rows=1 loops=1) + Output: metrics_space_7.tableoid, metrics_space_7.ctid + Index Cond: (metrics_space_7.device = format('1'::text)) + -> Index Scan using _hyper_7_32_chunk_metrics_space_device_time_idx on _timescaledb_internal._hyper_7_32_chunk metrics_space_8 (actual rows=0 loops=1) + Output: metrics_space_8.tableoid, metrics_space_8.ctid + Index Cond: (metrics_space_8.device = format('1'::text)) +(38 rows) + +ROLLBACK; +BEGIN; +:PREFIX UPDATE metrics_space SET value = 0.1 WHERE time = '2000-01-01'::text::timestamptz AND device = format('1'); + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Update on public.metrics_space (actual rows=0 loops=1) + Update on _timescaledb_internal._hyper_7_7_chunk metrics_space_1 + Update on _timescaledb_internal._hyper_7_8_chunk metrics_space_2 + Update on _timescaledb_internal._hyper_7_15_chunk metrics_space + Update on _timescaledb_internal._hyper_7_16_chunk metrics_space + Update on _timescaledb_internal._hyper_7_23_chunk metrics_space + Update on _timescaledb_internal._hyper_7_24_chunk metrics_space + Update on _timescaledb_internal._hyper_7_31_chunk metrics_space + Update on _timescaledb_internal._hyper_7_32_chunk metrics_space + -> Result (actual rows=1 loops=1) + Output: '0.1'::double precision, metrics_space.tableoid, metrics_space.ctid + -> Custom Scan (ChunkAppend) on public.metrics_space (actual rows=1 loops=1) + Output: metrics_space.tableoid, metrics_space.ctid + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 6 + -> Index Scan using _hyper_7_7_chunk_metrics_space_device_time_idx on _timescaledb_internal._hyper_7_7_chunk metrics_space_1 (actual rows=1 loops=1) + Output: metrics_space_1.tableoid, metrics_space_1.ctid + Index Cond: ((metrics_space_1.device = format('1'::text)) AND (metrics_space_1."time" = ('2000-01-01'::cstring)::timestamp with time zone)) + -> Index Scan using _hyper_7_8_chunk_metrics_space_device_time_idx on _timescaledb_internal._hyper_7_8_chunk metrics_space_2 (actual rows=0 loops=1) + Output: metrics_space_2.tableoid, metrics_space_2.ctid + Index Cond: ((metrics_space_2.device = format('1'::text)) AND (metrics_space_2."time" = ('2000-01-01'::cstring)::timestamp with time zone)) +(23 rows) + +ROLLBACK; +BEGIN; +:PREFIX UPDATE metrics_space SET value = 0.1 WHERE device = format('1'); + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Update on public.metrics_space (actual rows=0 loops=1) + Update on _timescaledb_internal._hyper_7_7_chunk metrics_space_1 + Update on _timescaledb_internal._hyper_7_8_chunk metrics_space_2 + Update on _timescaledb_internal._hyper_7_15_chunk metrics_space_3 + Update on _timescaledb_internal._hyper_7_16_chunk metrics_space_4 + Update on _timescaledb_internal._hyper_7_23_chunk metrics_space_5 + Update on _timescaledb_internal._hyper_7_24_chunk metrics_space_6 + Update on _timescaledb_internal._hyper_7_31_chunk metrics_space_7 + Update on _timescaledb_internal._hyper_7_32_chunk metrics_space_8 + -> Result (actual rows=4 loops=1) + Output: '0.1'::double precision, metrics_space.tableoid, metrics_space.ctid + -> Custom Scan (ChunkAppend) on public.metrics_space (actual rows=4 loops=1) + Output: metrics_space.tableoid, metrics_space.ctid + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 0 + -> Index Scan using _hyper_7_7_chunk_metrics_space_device_time_idx on _timescaledb_internal._hyper_7_7_chunk metrics_space_1 (actual rows=1 loops=1) + Output: metrics_space_1.tableoid, metrics_space_1.ctid + Index Cond: (metrics_space_1.device = format('1'::text)) + -> Index Scan using _hyper_7_8_chunk_metrics_space_device_time_idx on _timescaledb_internal._hyper_7_8_chunk metrics_space_2 (actual rows=0 loops=1) + Output: metrics_space_2.tableoid, metrics_space_2.ctid + Index Cond: (metrics_space_2.device = format('1'::text)) + -> Index Scan using _hyper_7_15_chunk_metrics_space_device_time_idx on _timescaledb_internal._hyper_7_15_chunk metrics_space_3 (actual rows=1 loops=1) + Output: metrics_space_3.tableoid, metrics_space_3.ctid + Index Cond: (metrics_space_3.device = format('1'::text)) + -> Index Scan using _hyper_7_16_chunk_metrics_space_device_time_idx on _timescaledb_internal._hyper_7_16_chunk metrics_space_4 (actual rows=0 loops=1) + Output: metrics_space_4.tableoid, metrics_space_4.ctid + Index Cond: (metrics_space_4.device = format('1'::text)) + -> Index Scan using _hyper_7_23_chunk_metrics_space_device_time_idx on _timescaledb_internal._hyper_7_23_chunk metrics_space_5 (actual rows=1 loops=1) + Output: metrics_space_5.tableoid, metrics_space_5.ctid + Index Cond: (metrics_space_5.device = format('1'::text)) + -> Index Scan using _hyper_7_24_chunk_metrics_space_device_time_idx on _timescaledb_internal._hyper_7_24_chunk metrics_space_6 (actual rows=0 loops=1) + Output: metrics_space_6.tableoid, metrics_space_6.ctid + Index Cond: (metrics_space_6.device = format('1'::text)) + -> Index Scan using _hyper_7_31_chunk_metrics_space_device_time_idx on _timescaledb_internal._hyper_7_31_chunk metrics_space_7 (actual rows=1 loops=1) + Output: metrics_space_7.tableoid, metrics_space_7.ctid + Index Cond: (metrics_space_7.device = format('1'::text)) + -> Index Scan using _hyper_7_32_chunk_metrics_space_device_time_idx on _timescaledb_internal._hyper_7_32_chunk metrics_space_8 (actual rows=0 loops=1) + Output: metrics_space_8.tableoid, metrics_space_8.ctid + Index Cond: (metrics_space_8.device = format('1'::text)) +(41 rows) + +ROLLBACK; +-- should have ChunkAppend since constraint is stable +-- should only hit 1 chunk and base table, toplevel rows should be 1 +BEGIN; +:PREFIX DELETE FROM metrics_date WHERE time = '2000-01-01'::text::date RETURNING 'returning', time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=1 loops=1) + Output: ('returning'::text), metrics_date_1."time" + -> Delete on public.metrics_date (actual rows=1 loops=1) + Output: 'returning'::text, metrics_date_1."time" + Delete on _timescaledb_internal._hyper_4_4_chunk metrics_date_1 + Delete on _timescaledb_internal._hyper_4_12_chunk metrics_date + Delete on _timescaledb_internal._hyper_4_20_chunk metrics_date + Delete on _timescaledb_internal._hyper_4_28_chunk metrics_date + -> Custom Scan (ChunkAppend) on public.metrics_date (actual rows=1 loops=1) + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 3 + -> Index Scan using _hyper_4_4_chunk_metrics_date_time_idx on _timescaledb_internal._hyper_4_4_chunk metrics_date_1 (actual rows=1 loops=1) + Output: metrics_date_1.tableoid, metrics_date_1.ctid + Index Cond: (metrics_date_1."time" = ('2000-01-01'::cstring)::date) +(15 rows) + +:PREFIX DELETE FROM metrics_timestamp WHERE time = '2000-01-01'::text::timestamp RETURNING 'returning', time; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=1 loops=1) + Output: ('returning'::text), metrics_timestamp_1."time" + -> Delete on public.metrics_timestamp (actual rows=1 loops=1) + Output: 'returning'::text, metrics_timestamp_1."time" + Delete on _timescaledb_internal._hyper_5_5_chunk metrics_timestamp_1 + Delete on _timescaledb_internal._hyper_5_13_chunk metrics_timestamp + Delete on _timescaledb_internal._hyper_5_21_chunk metrics_timestamp + Delete on _timescaledb_internal._hyper_5_29_chunk metrics_timestamp + -> Custom Scan (ChunkAppend) on public.metrics_timestamp (actual rows=1 loops=1) + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 3 + -> Index Scan using _hyper_5_5_chunk_metrics_timestamp_time_idx on _timescaledb_internal._hyper_5_5_chunk metrics_timestamp_1 (actual rows=1 loops=1) + Output: metrics_timestamp_1.tableoid, metrics_timestamp_1.ctid + Index Cond: (metrics_timestamp_1."time" = ('2000-01-01'::cstring)::timestamp without time zone) +(15 rows) + +:PREFIX DELETE FROM metrics_timestamptz WHERE time = '2000-01-01'::text::timestamptz RETURNING 'returning', time; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=1 loops=1) + Output: ('returning'::text), metrics_timestamptz_1."time" + -> Delete on public.metrics_timestamptz (actual rows=1 loops=1) + Output: 'returning'::text, metrics_timestamptz_1."time" + Delete on _timescaledb_internal._hyper_6_6_chunk metrics_timestamptz_1 + Delete on _timescaledb_internal._hyper_6_14_chunk metrics_timestamptz + Delete on _timescaledb_internal._hyper_6_22_chunk metrics_timestamptz + Delete on _timescaledb_internal._hyper_6_30_chunk metrics_timestamptz + -> Custom Scan (ChunkAppend) on public.metrics_timestamptz (actual rows=1 loops=1) + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 3 + -> Index Scan using _hyper_6_6_chunk_metrics_timestamptz_time_idx on _timescaledb_internal._hyper_6_6_chunk metrics_timestamptz_1 (actual rows=1 loops=1) + Output: metrics_timestamptz_1.tableoid, metrics_timestamptz_1.ctid + Index Cond: (metrics_timestamptz_1."time" = ('2000-01-01'::cstring)::timestamp with time zone) +(15 rows) + +ROLLBACK; +BEGIN; +:PREFIX UPDATE metrics_date SET value = 0.2 WHERE time = '2000-01-01'::text::date RETURNING 'returning', time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=1 loops=1) + Output: ('returning'::text), metrics_date_1."time" + -> Update on public.metrics_date (actual rows=1 loops=1) + Output: 'returning'::text, metrics_date_1."time" + Update on _timescaledb_internal._hyper_4_4_chunk metrics_date_1 + Update on _timescaledb_internal._hyper_4_12_chunk metrics_date + Update on _timescaledb_internal._hyper_4_20_chunk metrics_date + Update on _timescaledb_internal._hyper_4_28_chunk metrics_date + -> Result (actual rows=1 loops=1) + Output: '0.2'::double precision, metrics_date.tableoid, metrics_date.ctid + -> Custom Scan (ChunkAppend) on public.metrics_date (actual rows=1 loops=1) + Output: metrics_date.tableoid, metrics_date.ctid + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 3 + -> Index Scan using _hyper_4_4_chunk_metrics_date_time_idx on _timescaledb_internal._hyper_4_4_chunk metrics_date_1 (actual rows=1 loops=1) + Output: metrics_date_1.tableoid, metrics_date_1.ctid + Index Cond: (metrics_date_1."time" = ('2000-01-01'::cstring)::date) +(18 rows) + +:PREFIX UPDATE metrics_timestamp SET value = 0.2 WHERE time = '2000-01-01'::text::timestamp RETURNING 'returning', time; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=1 loops=1) + Output: ('returning'::text), metrics_timestamp_1."time" + -> Update on public.metrics_timestamp (actual rows=1 loops=1) + Output: 'returning'::text, metrics_timestamp_1."time" + Update on _timescaledb_internal._hyper_5_5_chunk metrics_timestamp_1 + Update on _timescaledb_internal._hyper_5_13_chunk metrics_timestamp + Update on _timescaledb_internal._hyper_5_21_chunk metrics_timestamp + Update on _timescaledb_internal._hyper_5_29_chunk metrics_timestamp + -> Result (actual rows=1 loops=1) + Output: '0.2'::double precision, metrics_timestamp.tableoid, metrics_timestamp.ctid + -> Custom Scan (ChunkAppend) on public.metrics_timestamp (actual rows=1 loops=1) + Output: metrics_timestamp.tableoid, metrics_timestamp.ctid + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 3 + -> Index Scan using _hyper_5_5_chunk_metrics_timestamp_time_idx on _timescaledb_internal._hyper_5_5_chunk metrics_timestamp_1 (actual rows=1 loops=1) + Output: metrics_timestamp_1.tableoid, metrics_timestamp_1.ctid + Index Cond: (metrics_timestamp_1."time" = ('2000-01-01'::cstring)::timestamp without time zone) +(18 rows) + +:PREFIX UPDATE metrics_timestamptz SET value = 0.2 WHERE time = '2000-01-01'::text::timestamptz RETURNING 'returning', time; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=1 loops=1) + Output: ('returning'::text), metrics_timestamptz_1."time" + -> Update on public.metrics_timestamptz (actual rows=1 loops=1) + Output: 'returning'::text, metrics_timestamptz_1."time" + Update on _timescaledb_internal._hyper_6_6_chunk metrics_timestamptz_1 + Update on _timescaledb_internal._hyper_6_14_chunk metrics_timestamptz + Update on _timescaledb_internal._hyper_6_22_chunk metrics_timestamptz + Update on _timescaledb_internal._hyper_6_30_chunk metrics_timestamptz + -> Result (actual rows=1 loops=1) + Output: '0.2'::double precision, metrics_timestamptz.tableoid, metrics_timestamptz.ctid + -> Custom Scan (ChunkAppend) on public.metrics_timestamptz (actual rows=1 loops=1) + Output: metrics_timestamptz.tableoid, metrics_timestamptz.ctid + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 3 + -> Index Scan using _hyper_6_6_chunk_metrics_timestamptz_time_idx on _timescaledb_internal._hyper_6_6_chunk metrics_timestamptz_1 (actual rows=1 loops=1) + Output: metrics_timestamptz_1.tableoid, metrics_timestamptz_1.ctid + Index Cond: (metrics_timestamptz_1."time" = ('2000-01-01'::cstring)::timestamp with time zone) +(18 rows) + +ROLLBACK; +-- subselects +-- no chunk exclusion for subqueries joins atm +BEGIN; +:PREFIX DELETE FROM metrics_int4 WHERE time IN (SELECT time FROM metrics_int2) AND time < length(version()); + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Delete on public.metrics_int4 (actual rows=0 loops=1) + Delete on _timescaledb_internal._hyper_2_2_chunk metrics_int4_1 + Delete on _timescaledb_internal._hyper_2_10_chunk metrics_int4_2 + Delete on _timescaledb_internal._hyper_2_18_chunk metrics_int4_3 + Delete on _timescaledb_internal._hyper_2_26_chunk metrics_int4_4 + -> Hash Join (actual rows=4 loops=1) + Output: metrics_int2.ctid, metrics_int4.tableoid, metrics_int4.ctid, metrics_int2.tableoid + Inner Unique: true + Hash Cond: (metrics_int4."time" = metrics_int2."time") + -> Append (actual rows=4 loops=1) + -> Index Scan Backward using _hyper_2_2_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_2_chunk metrics_int4_1 (actual rows=1 loops=1) + Output: metrics_int4_1."time", metrics_int4_1.tableoid, metrics_int4_1.ctid + Index Cond: (metrics_int4_1."time" < length(version())) + -> Index Scan Backward using _hyper_2_10_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_10_chunk metrics_int4_2 (actual rows=1 loops=1) + Output: metrics_int4_2."time", metrics_int4_2.tableoid, metrics_int4_2.ctid + Index Cond: (metrics_int4_2."time" < length(version())) + -> Index Scan Backward using _hyper_2_18_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_18_chunk metrics_int4_3 (actual rows=1 loops=1) + Output: metrics_int4_3."time", metrics_int4_3.tableoid, metrics_int4_3.ctid + Index Cond: (metrics_int4_3."time" < length(version())) + -> Index Scan Backward using _hyper_2_26_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_26_chunk metrics_int4_4 (actual rows=1 loops=1) + Output: metrics_int4_4."time", metrics_int4_4.tableoid, metrics_int4_4.ctid + Index Cond: (metrics_int4_4."time" < length(version())) + -> Hash (actual rows=4 loops=1) + Output: metrics_int2.ctid, metrics_int2."time", metrics_int2.tableoid + Buckets: 1024 Batches: 1 + -> HashAggregate (actual rows=4 loops=1) + Output: metrics_int2.ctid, metrics_int2."time", metrics_int2.tableoid + Group Key: metrics_int2."time" + Batches: 1 + -> Append (actual rows=4 loops=1) + -> Seq Scan on _timescaledb_internal._hyper_1_1_chunk metrics_int2_1 (actual rows=1 loops=1) + Output: metrics_int2_1.ctid, metrics_int2_1."time", metrics_int2_1.tableoid + -> Seq Scan on _timescaledb_internal._hyper_1_9_chunk metrics_int2_2 (actual rows=1 loops=1) + Output: metrics_int2_2.ctid, metrics_int2_2."time", metrics_int2_2.tableoid + -> Seq Scan on _timescaledb_internal._hyper_1_17_chunk metrics_int2_3 (actual rows=1 loops=1) + Output: metrics_int2_3.ctid, metrics_int2_3."time", metrics_int2_3.tableoid + -> Seq Scan on _timescaledb_internal._hyper_1_25_chunk metrics_int2_4 (actual rows=1 loops=1) + Output: metrics_int2_4.ctid, metrics_int2_4."time", metrics_int2_4.tableoid +(39 rows) + +ROLLBACK; +BEGIN; +:PREFIX DELETE FROM metrics_int4 WHERE time IN (SELECT time FROM metrics_int2 WHERE time < length(version())); + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Delete on public.metrics_int4 (actual rows=0 loops=1) + Delete on _timescaledb_internal._hyper_2_2_chunk metrics_int4_1 + Delete on _timescaledb_internal._hyper_2_10_chunk metrics_int4_2 + Delete on _timescaledb_internal._hyper_2_18_chunk metrics_int4_3 + Delete on _timescaledb_internal._hyper_2_26_chunk metrics_int4_4 + -> Hash Join (actual rows=4 loops=1) + Output: metrics_int2.ctid, metrics_int4.tableoid, metrics_int4.ctid, metrics_int2.tableoid + Inner Unique: true + Hash Cond: (metrics_int4."time" = metrics_int2."time") + -> Append (actual rows=4 loops=1) + -> Seq Scan on _timescaledb_internal._hyper_2_2_chunk metrics_int4_1 (actual rows=1 loops=1) + Output: metrics_int4_1."time", metrics_int4_1.tableoid, metrics_int4_1.ctid + -> Seq Scan on _timescaledb_internal._hyper_2_10_chunk metrics_int4_2 (actual rows=1 loops=1) + Output: metrics_int4_2."time", metrics_int4_2.tableoid, metrics_int4_2.ctid + -> Seq Scan on _timescaledb_internal._hyper_2_18_chunk metrics_int4_3 (actual rows=1 loops=1) + Output: metrics_int4_3."time", metrics_int4_3.tableoid, metrics_int4_3.ctid + -> Seq Scan on _timescaledb_internal._hyper_2_26_chunk metrics_int4_4 (actual rows=1 loops=1) + Output: metrics_int4_4."time", metrics_int4_4.tableoid, metrics_int4_4.ctid + -> Hash (actual rows=4 loops=1) + Output: metrics_int2.ctid, metrics_int2."time", metrics_int2.tableoid + Buckets: 1024 Batches: 1 + -> HashAggregate (actual rows=4 loops=1) + Output: metrics_int2.ctid, metrics_int2."time", metrics_int2.tableoid + Group Key: metrics_int2."time" + Batches: 1 + -> Append (actual rows=4 loops=1) + -> Index Scan Backward using _hyper_1_1_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_1_chunk metrics_int2_1 (actual rows=1 loops=1) + Output: metrics_int2_1.ctid, metrics_int2_1."time", metrics_int2_1.tableoid + Index Cond: (metrics_int2_1."time" < length(version())) + -> Index Scan Backward using _hyper_1_9_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_9_chunk metrics_int2_2 (actual rows=1 loops=1) + Output: metrics_int2_2.ctid, metrics_int2_2."time", metrics_int2_2.tableoid + Index Cond: (metrics_int2_2."time" < length(version())) + -> Index Scan Backward using _hyper_1_17_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_17_chunk metrics_int2_3 (actual rows=1 loops=1) + Output: metrics_int2_3.ctid, metrics_int2_3."time", metrics_int2_3.tableoid + Index Cond: (metrics_int2_3."time" < length(version())) + -> Index Scan Backward using _hyper_1_25_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_25_chunk metrics_int2_4 (actual rows=1 loops=1) + Output: metrics_int2_4.ctid, metrics_int2_4."time", metrics_int2_4.tableoid + Index Cond: (metrics_int2_4."time" < length(version())) +(39 rows) + +ROLLBACK; +BEGIN; +:PREFIX DELETE FROM metrics_int4 WHERE time IN (SELECT time FROM metrics_int2 WHERE time < length(version())) AND time < length(version()); + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Delete on public.metrics_int4 (actual rows=0 loops=1) + Delete on _timescaledb_internal._hyper_2_2_chunk metrics_int4_1 + Delete on _timescaledb_internal._hyper_2_10_chunk metrics_int4_2 + Delete on _timescaledb_internal._hyper_2_18_chunk metrics_int4_3 + Delete on _timescaledb_internal._hyper_2_26_chunk metrics_int4_4 + -> Hash Join (actual rows=4 loops=1) + Output: metrics_int2.ctid, metrics_int4.tableoid, metrics_int4.ctid, metrics_int2.tableoid + Inner Unique: true + Hash Cond: (metrics_int4."time" = metrics_int2."time") + -> Append (actual rows=4 loops=1) + -> Index Scan Backward using _hyper_2_2_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_2_chunk metrics_int4_1 (actual rows=1 loops=1) + Output: metrics_int4_1."time", metrics_int4_1.tableoid, metrics_int4_1.ctid + Index Cond: (metrics_int4_1."time" < length(version())) + -> Index Scan Backward using _hyper_2_10_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_10_chunk metrics_int4_2 (actual rows=1 loops=1) + Output: metrics_int4_2."time", metrics_int4_2.tableoid, metrics_int4_2.ctid + Index Cond: (metrics_int4_2."time" < length(version())) + -> Index Scan Backward using _hyper_2_18_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_18_chunk metrics_int4_3 (actual rows=1 loops=1) + Output: metrics_int4_3."time", metrics_int4_3.tableoid, metrics_int4_3.ctid + Index Cond: (metrics_int4_3."time" < length(version())) + -> Index Scan Backward using _hyper_2_26_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_26_chunk metrics_int4_4 (actual rows=1 loops=1) + Output: metrics_int4_4."time", metrics_int4_4.tableoid, metrics_int4_4.ctid + Index Cond: (metrics_int4_4."time" < length(version())) + -> Hash (actual rows=4 loops=1) + Output: metrics_int2.ctid, metrics_int2."time", metrics_int2.tableoid + Buckets: 1024 Batches: 1 + -> HashAggregate (actual rows=4 loops=1) + Output: metrics_int2.ctid, metrics_int2."time", metrics_int2.tableoid + Group Key: metrics_int2."time" + Batches: 1 + -> Append (actual rows=4 loops=1) + -> Index Scan Backward using _hyper_1_1_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_1_chunk metrics_int2_1 (actual rows=1 loops=1) + Output: metrics_int2_1.ctid, metrics_int2_1."time", metrics_int2_1.tableoid + Index Cond: (metrics_int2_1."time" < length(version())) + -> Index Scan Backward using _hyper_1_9_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_9_chunk metrics_int2_2 (actual rows=1 loops=1) + Output: metrics_int2_2.ctid, metrics_int2_2."time", metrics_int2_2.tableoid + Index Cond: (metrics_int2_2."time" < length(version())) + -> Index Scan Backward using _hyper_1_17_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_17_chunk metrics_int2_3 (actual rows=1 loops=1) + Output: metrics_int2_3.ctid, metrics_int2_3."time", metrics_int2_3.tableoid + Index Cond: (metrics_int2_3."time" < length(version())) + -> Index Scan Backward using _hyper_1_25_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_25_chunk metrics_int2_4 (actual rows=1 loops=1) + Output: metrics_int2_4.ctid, metrics_int2_4."time", metrics_int2_4.tableoid + Index Cond: (metrics_int2_4."time" < length(version())) +(43 rows) + +ROLLBACK; +BEGIN; +:PREFIX UPDATE metrics_int4 SET value = 0.1 WHERE time IN (SELECT time FROM metrics_int2) AND time < length(version()); + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Update on public.metrics_int4 (actual rows=0 loops=1) + Update on _timescaledb_internal._hyper_2_2_chunk metrics_int4_1 + Update on _timescaledb_internal._hyper_2_10_chunk metrics_int4_2 + Update on _timescaledb_internal._hyper_2_18_chunk metrics_int4_3 + Update on _timescaledb_internal._hyper_2_26_chunk metrics_int4_4 + -> Hash Join (actual rows=4 loops=1) + Output: '0.1'::double precision, metrics_int2.ctid, metrics_int4.tableoid, metrics_int4.ctid, metrics_int2.tableoid + Inner Unique: true + Hash Cond: (metrics_int4."time" = metrics_int2."time") + -> Append (actual rows=4 loops=1) + -> Index Scan Backward using _hyper_2_2_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_2_chunk metrics_int4_1 (actual rows=1 loops=1) + Output: metrics_int4_1."time", metrics_int4_1.tableoid, metrics_int4_1.ctid + Index Cond: (metrics_int4_1."time" < length(version())) + -> Index Scan Backward using _hyper_2_10_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_10_chunk metrics_int4_2 (actual rows=1 loops=1) + Output: metrics_int4_2."time", metrics_int4_2.tableoid, metrics_int4_2.ctid + Index Cond: (metrics_int4_2."time" < length(version())) + -> Index Scan Backward using _hyper_2_18_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_18_chunk metrics_int4_3 (actual rows=1 loops=1) + Output: metrics_int4_3."time", metrics_int4_3.tableoid, metrics_int4_3.ctid + Index Cond: (metrics_int4_3."time" < length(version())) + -> Index Scan Backward using _hyper_2_26_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_26_chunk metrics_int4_4 (actual rows=1 loops=1) + Output: metrics_int4_4."time", metrics_int4_4.tableoid, metrics_int4_4.ctid + Index Cond: (metrics_int4_4."time" < length(version())) + -> Hash (actual rows=4 loops=1) + Output: metrics_int2.ctid, metrics_int2."time", metrics_int2.tableoid + Buckets: 1024 Batches: 1 + -> HashAggregate (actual rows=4 loops=1) + Output: metrics_int2.ctid, metrics_int2."time", metrics_int2.tableoid + Group Key: metrics_int2."time" + Batches: 1 + -> Append (actual rows=4 loops=1) + -> Seq Scan on _timescaledb_internal._hyper_1_1_chunk metrics_int2_1 (actual rows=1 loops=1) + Output: metrics_int2_1.ctid, metrics_int2_1."time", metrics_int2_1.tableoid + -> Seq Scan on _timescaledb_internal._hyper_1_9_chunk metrics_int2_2 (actual rows=1 loops=1) + Output: metrics_int2_2.ctid, metrics_int2_2."time", metrics_int2_2.tableoid + -> Seq Scan on _timescaledb_internal._hyper_1_17_chunk metrics_int2_3 (actual rows=1 loops=1) + Output: metrics_int2_3.ctid, metrics_int2_3."time", metrics_int2_3.tableoid + -> Seq Scan on _timescaledb_internal._hyper_1_25_chunk metrics_int2_4 (actual rows=1 loops=1) + Output: metrics_int2_4.ctid, metrics_int2_4."time", metrics_int2_4.tableoid +(39 rows) + +ROLLBACK; +BEGIN; +:PREFIX UPDATE metrics_int4 SET value = 0.1 WHERE time IN (SELECT time FROM metrics_int2 WHERE time < length(version())); + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Update on public.metrics_int4 (actual rows=0 loops=1) + Update on _timescaledb_internal._hyper_2_2_chunk metrics_int4_1 + Update on _timescaledb_internal._hyper_2_10_chunk metrics_int4_2 + Update on _timescaledb_internal._hyper_2_18_chunk metrics_int4_3 + Update on _timescaledb_internal._hyper_2_26_chunk metrics_int4_4 + -> Hash Join (actual rows=4 loops=1) + Output: '0.1'::double precision, metrics_int2.ctid, metrics_int4.tableoid, metrics_int4.ctid, metrics_int2.tableoid + Inner Unique: true + Hash Cond: (metrics_int4."time" = metrics_int2."time") + -> Append (actual rows=4 loops=1) + -> Seq Scan on _timescaledb_internal._hyper_2_2_chunk metrics_int4_1 (actual rows=1 loops=1) + Output: metrics_int4_1."time", metrics_int4_1.tableoid, metrics_int4_1.ctid + -> Seq Scan on _timescaledb_internal._hyper_2_10_chunk metrics_int4_2 (actual rows=1 loops=1) + Output: metrics_int4_2."time", metrics_int4_2.tableoid, metrics_int4_2.ctid + -> Seq Scan on _timescaledb_internal._hyper_2_18_chunk metrics_int4_3 (actual rows=1 loops=1) + Output: metrics_int4_3."time", metrics_int4_3.tableoid, metrics_int4_3.ctid + -> Seq Scan on _timescaledb_internal._hyper_2_26_chunk metrics_int4_4 (actual rows=1 loops=1) + Output: metrics_int4_4."time", metrics_int4_4.tableoid, metrics_int4_4.ctid + -> Hash (actual rows=4 loops=1) + Output: metrics_int2.ctid, metrics_int2."time", metrics_int2.tableoid + Buckets: 1024 Batches: 1 + -> HashAggregate (actual rows=4 loops=1) + Output: metrics_int2.ctid, metrics_int2."time", metrics_int2.tableoid + Group Key: metrics_int2."time" + Batches: 1 + -> Append (actual rows=4 loops=1) + -> Index Scan Backward using _hyper_1_1_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_1_chunk metrics_int2_1 (actual rows=1 loops=1) + Output: metrics_int2_1.ctid, metrics_int2_1."time", metrics_int2_1.tableoid + Index Cond: (metrics_int2_1."time" < length(version())) + -> Index Scan Backward using _hyper_1_9_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_9_chunk metrics_int2_2 (actual rows=1 loops=1) + Output: metrics_int2_2.ctid, metrics_int2_2."time", metrics_int2_2.tableoid + Index Cond: (metrics_int2_2."time" < length(version())) + -> Index Scan Backward using _hyper_1_17_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_17_chunk metrics_int2_3 (actual rows=1 loops=1) + Output: metrics_int2_3.ctid, metrics_int2_3."time", metrics_int2_3.tableoid + Index Cond: (metrics_int2_3."time" < length(version())) + -> Index Scan Backward using _hyper_1_25_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_25_chunk metrics_int2_4 (actual rows=1 loops=1) + Output: metrics_int2_4.ctid, metrics_int2_4."time", metrics_int2_4.tableoid + Index Cond: (metrics_int2_4."time" < length(version())) +(39 rows) + +ROLLBACK; +BEGIN; +:PREFIX UPDATE metrics_int4 SET value = 0.1 WHERE time IN (SELECT time FROM metrics_int2 WHERE time < length(version())) AND time < length(version()); + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Update on public.metrics_int4 (actual rows=0 loops=1) + Update on _timescaledb_internal._hyper_2_2_chunk metrics_int4_1 + Update on _timescaledb_internal._hyper_2_10_chunk metrics_int4_2 + Update on _timescaledb_internal._hyper_2_18_chunk metrics_int4_3 + Update on _timescaledb_internal._hyper_2_26_chunk metrics_int4_4 + -> Hash Join (actual rows=4 loops=1) + Output: '0.1'::double precision, metrics_int2.ctid, metrics_int4.tableoid, metrics_int4.ctid, metrics_int2.tableoid + Inner Unique: true + Hash Cond: (metrics_int4."time" = metrics_int2."time") + -> Append (actual rows=4 loops=1) + -> Index Scan Backward using _hyper_2_2_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_2_chunk metrics_int4_1 (actual rows=1 loops=1) + Output: metrics_int4_1."time", metrics_int4_1.tableoid, metrics_int4_1.ctid + Index Cond: (metrics_int4_1."time" < length(version())) + -> Index Scan Backward using _hyper_2_10_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_10_chunk metrics_int4_2 (actual rows=1 loops=1) + Output: metrics_int4_2."time", metrics_int4_2.tableoid, metrics_int4_2.ctid + Index Cond: (metrics_int4_2."time" < length(version())) + -> Index Scan Backward using _hyper_2_18_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_18_chunk metrics_int4_3 (actual rows=1 loops=1) + Output: metrics_int4_3."time", metrics_int4_3.tableoid, metrics_int4_3.ctid + Index Cond: (metrics_int4_3."time" < length(version())) + -> Index Scan Backward using _hyper_2_26_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_26_chunk metrics_int4_4 (actual rows=1 loops=1) + Output: metrics_int4_4."time", metrics_int4_4.tableoid, metrics_int4_4.ctid + Index Cond: (metrics_int4_4."time" < length(version())) + -> Hash (actual rows=4 loops=1) + Output: metrics_int2.ctid, metrics_int2."time", metrics_int2.tableoid + Buckets: 1024 Batches: 1 + -> HashAggregate (actual rows=4 loops=1) + Output: metrics_int2.ctid, metrics_int2."time", metrics_int2.tableoid + Group Key: metrics_int2."time" + Batches: 1 + -> Append (actual rows=4 loops=1) + -> Index Scan Backward using _hyper_1_1_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_1_chunk metrics_int2_1 (actual rows=1 loops=1) + Output: metrics_int2_1.ctid, metrics_int2_1."time", metrics_int2_1.tableoid + Index Cond: (metrics_int2_1."time" < length(version())) + -> Index Scan Backward using _hyper_1_9_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_9_chunk metrics_int2_2 (actual rows=1 loops=1) + Output: metrics_int2_2.ctid, metrics_int2_2."time", metrics_int2_2.tableoid + Index Cond: (metrics_int2_2."time" < length(version())) + -> Index Scan Backward using _hyper_1_17_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_17_chunk metrics_int2_3 (actual rows=1 loops=1) + Output: metrics_int2_3.ctid, metrics_int2_3."time", metrics_int2_3.tableoid + Index Cond: (metrics_int2_3."time" < length(version())) + -> Index Scan Backward using _hyper_1_25_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_25_chunk metrics_int2_4 (actual rows=1 loops=1) + Output: metrics_int2_4.ctid, metrics_int2_4."time", metrics_int2_4.tableoid + Index Cond: (metrics_int2_4."time" < length(version())) +(43 rows) + +ROLLBACK; +-- join +-- no chunk exclusion for subqueries joins atm +BEGIN; +:PREFIX DELETE FROM metrics_int4 m4 USING metrics_int2 m2; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Delete on public.metrics_int4 m4 (actual rows=0 loops=1) + Delete on _timescaledb_internal._hyper_2_2_chunk m4_1 + Delete on _timescaledb_internal._hyper_2_10_chunk m4_2 + Delete on _timescaledb_internal._hyper_2_18_chunk m4_3 + Delete on _timescaledb_internal._hyper_2_26_chunk m4_4 + -> Nested Loop (actual rows=16 loops=1) + Output: m2.ctid, m4.tableoid, m4.ctid, m2.tableoid + -> Append (actual rows=4 loops=1) + -> Seq Scan on _timescaledb_internal._hyper_1_1_chunk m2_1 (actual rows=1 loops=1) + Output: m2_1.ctid, m2_1.tableoid + -> Seq Scan on _timescaledb_internal._hyper_1_9_chunk m2_2 (actual rows=1 loops=1) + Output: m2_2.ctid, m2_2.tableoid + -> Seq Scan on _timescaledb_internal._hyper_1_17_chunk m2_3 (actual rows=1 loops=1) + Output: m2_3.ctid, m2_3.tableoid + -> Seq Scan on _timescaledb_internal._hyper_1_25_chunk m2_4 (actual rows=1 loops=1) + Output: m2_4.ctid, m2_4.tableoid + -> Materialize (actual rows=4 loops=4) + Output: m4.tableoid, m4.ctid + -> Append (actual rows=4 loops=1) + -> Seq Scan on _timescaledb_internal._hyper_2_2_chunk m4_1 (actual rows=1 loops=1) + Output: m4_1.tableoid, m4_1.ctid + -> Seq Scan on _timescaledb_internal._hyper_2_10_chunk m4_2 (actual rows=1 loops=1) + Output: m4_2.tableoid, m4_2.ctid + -> Seq Scan on _timescaledb_internal._hyper_2_18_chunk m4_3 (actual rows=1 loops=1) + Output: m4_3.tableoid, m4_3.ctid + -> Seq Scan on _timescaledb_internal._hyper_2_26_chunk m4_4 (actual rows=1 loops=1) + Output: m4_4.tableoid, m4_4.ctid +(28 rows) + +ROLLBACK; +BEGIN; +:PREFIX DELETE FROM metrics_int4 m4 USING metrics_int2 m2 WHERE m4.time = m2.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Delete on public.metrics_int4 m4 (actual rows=0 loops=1) + Delete on _timescaledb_internal._hyper_2_2_chunk m4_1 + Delete on _timescaledb_internal._hyper_2_10_chunk m4_2 + Delete on _timescaledb_internal._hyper_2_18_chunk m4_3 + Delete on _timescaledb_internal._hyper_2_26_chunk m4_4 + -> Merge Join (actual rows=4 loops=1) + Output: m2.ctid, m4.tableoid, m4.ctid, m2.tableoid + Merge Cond: (m4."time" = m2."time") + -> Merge Append (actual rows=4 loops=1) + Sort Key: m4."time" + -> Index Scan Backward using _hyper_2_2_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_2_chunk m4_1 (actual rows=1 loops=1) + Output: m4_1."time", m4_1.tableoid, m4_1.ctid + -> Index Scan Backward using _hyper_2_10_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_10_chunk m4_2 (actual rows=1 loops=1) + Output: m4_2."time", m4_2.tableoid, m4_2.ctid + -> Index Scan Backward using _hyper_2_18_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_18_chunk m4_3 (actual rows=1 loops=1) + Output: m4_3."time", m4_3.tableoid, m4_3.ctid + -> Index Scan Backward using _hyper_2_26_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_26_chunk m4_4 (actual rows=1 loops=1) + Output: m4_4."time", m4_4.tableoid, m4_4.ctid + -> Materialize (actual rows=4 loops=1) + Output: m2.ctid, m2."time", m2.tableoid + -> Merge Append (actual rows=4 loops=1) + Sort Key: m2."time" + -> Index Scan Backward using _hyper_1_1_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_1_chunk m2_1 (actual rows=1 loops=1) + Output: m2_1.ctid, m2_1."time", m2_1.tableoid + -> Index Scan Backward using _hyper_1_9_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_9_chunk m2_2 (actual rows=1 loops=1) + Output: m2_2.ctid, m2_2."time", m2_2.tableoid + -> Index Scan Backward using _hyper_1_17_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_17_chunk m2_3 (actual rows=1 loops=1) + Output: m2_3.ctid, m2_3."time", m2_3.tableoid + -> Index Scan Backward using _hyper_1_25_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_25_chunk m2_4 (actual rows=1 loops=1) + Output: m2_4.ctid, m2_4."time", m2_4.tableoid +(31 rows) + +ROLLBACK; +BEGIN; +:PREFIX DELETE FROM metrics_int4 m4 USING metrics_int2 m2 WHERE m4.time = m2.time AND m4.time < length(version()); + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Delete on public.metrics_int4 m4 (actual rows=0 loops=1) + Delete on _timescaledb_internal._hyper_2_2_chunk m4_1 + Delete on _timescaledb_internal._hyper_2_10_chunk m4_2 + Delete on _timescaledb_internal._hyper_2_18_chunk m4_3 + Delete on _timescaledb_internal._hyper_2_26_chunk m4_4 + -> Merge Join (actual rows=4 loops=1) + Output: m2.ctid, m4.tableoid, m4.ctid, m2.tableoid + Merge Cond: (m4."time" = m2."time") + -> Merge Append (actual rows=4 loops=1) + Sort Key: m4."time" + -> Index Scan Backward using _hyper_2_2_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_2_chunk m4_1 (actual rows=1 loops=1) + Output: m4_1."time", m4_1.tableoid, m4_1.ctid + Index Cond: (m4_1."time" < length(version())) + -> Index Scan Backward using _hyper_2_10_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_10_chunk m4_2 (actual rows=1 loops=1) + Output: m4_2."time", m4_2.tableoid, m4_2.ctid + Index Cond: (m4_2."time" < length(version())) + -> Index Scan Backward using _hyper_2_18_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_18_chunk m4_3 (actual rows=1 loops=1) + Output: m4_3."time", m4_3.tableoid, m4_3.ctid + Index Cond: (m4_3."time" < length(version())) + -> Index Scan Backward using _hyper_2_26_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_26_chunk m4_4 (actual rows=1 loops=1) + Output: m4_4."time", m4_4.tableoid, m4_4.ctid + Index Cond: (m4_4."time" < length(version())) + -> Materialize (actual rows=4 loops=1) + Output: m2.ctid, m2."time", m2.tableoid + -> Merge Append (actual rows=4 loops=1) + Sort Key: m2."time" + -> Index Scan Backward using _hyper_1_1_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_1_chunk m2_1 (actual rows=1 loops=1) + Output: m2_1.ctid, m2_1."time", m2_1.tableoid + -> Index Scan Backward using _hyper_1_9_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_9_chunk m2_2 (actual rows=1 loops=1) + Output: m2_2.ctid, m2_2."time", m2_2.tableoid + -> Index Scan Backward using _hyper_1_17_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_17_chunk m2_3 (actual rows=1 loops=1) + Output: m2_3.ctid, m2_3."time", m2_3.tableoid + -> Index Scan Backward using _hyper_1_25_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_25_chunk m2_4 (actual rows=1 loops=1) + Output: m2_4.ctid, m2_4."time", m2_4.tableoid +(35 rows) + +ROLLBACK; +BEGIN; +:PREFIX UPDATE metrics_int4 m4 SET value = 0.15 FROM metrics_int2 m2; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Update on public.metrics_int4 m4 (actual rows=0 loops=1) + Update on _timescaledb_internal._hyper_2_2_chunk m4_1 + Update on _timescaledb_internal._hyper_2_10_chunk m4_2 + Update on _timescaledb_internal._hyper_2_18_chunk m4_3 + Update on _timescaledb_internal._hyper_2_26_chunk m4_4 + -> Nested Loop (actual rows=16 loops=1) + Output: '0.15'::double precision, m2.ctid, m4.tableoid, m4.ctid, m2.tableoid + -> Append (actual rows=4 loops=1) + -> Seq Scan on _timescaledb_internal._hyper_1_1_chunk m2_1 (actual rows=1 loops=1) + Output: m2_1.ctid, m2_1.tableoid + -> Seq Scan on _timescaledb_internal._hyper_1_9_chunk m2_2 (actual rows=1 loops=1) + Output: m2_2.ctid, m2_2.tableoid + -> Seq Scan on _timescaledb_internal._hyper_1_17_chunk m2_3 (actual rows=1 loops=1) + Output: m2_3.ctid, m2_3.tableoid + -> Seq Scan on _timescaledb_internal._hyper_1_25_chunk m2_4 (actual rows=1 loops=1) + Output: m2_4.ctid, m2_4.tableoid + -> Materialize (actual rows=4 loops=4) + Output: m4.tableoid, m4.ctid + -> Append (actual rows=4 loops=1) + -> Seq Scan on _timescaledb_internal._hyper_2_2_chunk m4_1 (actual rows=1 loops=1) + Output: m4_1.tableoid, m4_1.ctid + -> Seq Scan on _timescaledb_internal._hyper_2_10_chunk m4_2 (actual rows=1 loops=1) + Output: m4_2.tableoid, m4_2.ctid + -> Seq Scan on _timescaledb_internal._hyper_2_18_chunk m4_3 (actual rows=1 loops=1) + Output: m4_3.tableoid, m4_3.ctid + -> Seq Scan on _timescaledb_internal._hyper_2_26_chunk m4_4 (actual rows=1 loops=1) + Output: m4_4.tableoid, m4_4.ctid +(28 rows) + +ROLLBACK; +BEGIN; +:PREFIX UPDATE metrics_int4 m4 SET value = 0.15 FROM metrics_int2 m2 WHERE m4.time = m2.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Update on public.metrics_int4 m4 (actual rows=0 loops=1) + Update on _timescaledb_internal._hyper_2_2_chunk m4_1 + Update on _timescaledb_internal._hyper_2_10_chunk m4_2 + Update on _timescaledb_internal._hyper_2_18_chunk m4_3 + Update on _timescaledb_internal._hyper_2_26_chunk m4_4 + -> Merge Join (actual rows=4 loops=1) + Output: '0.15'::double precision, m2.ctid, m4.tableoid, m4.ctid, m2.tableoid + Merge Cond: (m4."time" = m2."time") + -> Merge Append (actual rows=4 loops=1) + Sort Key: m4."time" + -> Index Scan Backward using _hyper_2_2_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_2_chunk m4_1 (actual rows=1 loops=1) + Output: m4_1."time", m4_1.tableoid, m4_1.ctid + -> Index Scan Backward using _hyper_2_10_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_10_chunk m4_2 (actual rows=1 loops=1) + Output: m4_2."time", m4_2.tableoid, m4_2.ctid + -> Index Scan Backward using _hyper_2_18_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_18_chunk m4_3 (actual rows=1 loops=1) + Output: m4_3."time", m4_3.tableoid, m4_3.ctid + -> Index Scan Backward using _hyper_2_26_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_26_chunk m4_4 (actual rows=1 loops=1) + Output: m4_4."time", m4_4.tableoid, m4_4.ctid + -> Materialize (actual rows=4 loops=1) + Output: m2.ctid, m2."time", m2.tableoid + -> Merge Append (actual rows=4 loops=1) + Sort Key: m2."time" + -> Index Scan Backward using _hyper_1_1_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_1_chunk m2_1 (actual rows=1 loops=1) + Output: m2_1.ctid, m2_1."time", m2_1.tableoid + -> Index Scan Backward using _hyper_1_9_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_9_chunk m2_2 (actual rows=1 loops=1) + Output: m2_2.ctid, m2_2."time", m2_2.tableoid + -> Index Scan Backward using _hyper_1_17_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_17_chunk m2_3 (actual rows=1 loops=1) + Output: m2_3.ctid, m2_3."time", m2_3.tableoid + -> Index Scan Backward using _hyper_1_25_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_25_chunk m2_4 (actual rows=1 loops=1) + Output: m2_4.ctid, m2_4."time", m2_4.tableoid +(31 rows) + +ROLLBACK; +BEGIN; +:PREFIX UPDATE metrics_int4 m4 SET value = 0.15 FROM metrics_int2 m2 WHERE m4.time = m2.time AND m4.time < length(version()); + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Update on public.metrics_int4 m4 (actual rows=0 loops=1) + Update on _timescaledb_internal._hyper_2_2_chunk m4_1 + Update on _timescaledb_internal._hyper_2_10_chunk m4_2 + Update on _timescaledb_internal._hyper_2_18_chunk m4_3 + Update on _timescaledb_internal._hyper_2_26_chunk m4_4 + -> Merge Join (actual rows=4 loops=1) + Output: '0.15'::double precision, m2.ctid, m4.tableoid, m4.ctid, m2.tableoid + Merge Cond: (m4."time" = m2."time") + -> Merge Append (actual rows=4 loops=1) + Sort Key: m4."time" + -> Index Scan Backward using _hyper_2_2_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_2_chunk m4_1 (actual rows=1 loops=1) + Output: m4_1."time", m4_1.tableoid, m4_1.ctid + Index Cond: (m4_1."time" < length(version())) + -> Index Scan Backward using _hyper_2_10_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_10_chunk m4_2 (actual rows=1 loops=1) + Output: m4_2."time", m4_2.tableoid, m4_2.ctid + Index Cond: (m4_2."time" < length(version())) + -> Index Scan Backward using _hyper_2_18_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_18_chunk m4_3 (actual rows=1 loops=1) + Output: m4_3."time", m4_3.tableoid, m4_3.ctid + Index Cond: (m4_3."time" < length(version())) + -> Index Scan Backward using _hyper_2_26_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_26_chunk m4_4 (actual rows=1 loops=1) + Output: m4_4."time", m4_4.tableoid, m4_4.ctid + Index Cond: (m4_4."time" < length(version())) + -> Materialize (actual rows=4 loops=1) + Output: m2.ctid, m2."time", m2.tableoid + -> Merge Append (actual rows=4 loops=1) + Sort Key: m2."time" + -> Index Scan Backward using _hyper_1_1_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_1_chunk m2_1 (actual rows=1 loops=1) + Output: m2_1.ctid, m2_1."time", m2_1.tableoid + -> Index Scan Backward using _hyper_1_9_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_9_chunk m2_2 (actual rows=1 loops=1) + Output: m2_2.ctid, m2_2."time", m2_2.tableoid + -> Index Scan Backward using _hyper_1_17_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_17_chunk m2_3 (actual rows=1 loops=1) + Output: m2_3.ctid, m2_3."time", m2_3.tableoid + -> Index Scan Backward using _hyper_1_25_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_25_chunk m2_4 (actual rows=1 loops=1) + Output: m2_4.ctid, m2_4."time", m2_4.tableoid +(35 rows) + +ROLLBACK; +-- cte +-- should all have chunkappend nodes +BEGIN; +:PREFIX WITH d AS (DELETE FROM metrics_int4 m4 WHERE m4.time < length(version()) RETURNING time) +SELECT * FROM d; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------ + CTE Scan on d (actual rows=4 loops=1) + Output: d."time" + CTE d + -> Custom Scan (HypertableModify) (actual rows=4 loops=1) + Output: m4_1."time" + -> Delete on public.metrics_int4 m4 (actual rows=4 loops=1) + Output: m4_1."time" + Delete on _timescaledb_internal._hyper_2_2_chunk m4_1 + Delete on _timescaledb_internal._hyper_2_10_chunk m4_2 + Delete on _timescaledb_internal._hyper_2_18_chunk m4_3 + Delete on _timescaledb_internal._hyper_2_26_chunk m4_4 + -> Custom Scan (ChunkAppend) on public.metrics_int4 m4 (actual rows=4 loops=1) + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 0 + -> Index Scan using _hyper_2_2_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_2_chunk m4_1 (actual rows=1 loops=1) + Output: m4_1.tableoid, m4_1.ctid + Index Cond: (m4_1."time" < length(version())) + -> Index Scan using _hyper_2_10_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_10_chunk m4_2 (actual rows=1 loops=1) + Output: m4_2.tableoid, m4_2.ctid + Index Cond: (m4_2."time" < length(version())) + -> Index Scan using _hyper_2_18_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_18_chunk m4_3 (actual rows=1 loops=1) + Output: m4_3.tableoid, m4_3.ctid + Index Cond: (m4_3."time" < length(version())) + -> Index Scan using _hyper_2_26_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_26_chunk m4_4 (actual rows=1 loops=1) + Output: m4_4.tableoid, m4_4.ctid + Index Cond: (m4_4."time" < length(version())) +(27 rows) + +ROLLBACK; +BEGIN; +:PREFIX WITH d AS (DELETE FROM metrics_int4 m4 WHERE m4.time < length(version())) +DELETE FROM metrics_int2 m2 WHERE m2.time < length(version()); + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + CTE d + -> Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Delete on public.metrics_int4 m4 (actual rows=0 loops=1) + Delete on _timescaledb_internal._hyper_2_2_chunk m4_1 + Delete on _timescaledb_internal._hyper_2_10_chunk m4_2 + Delete on _timescaledb_internal._hyper_2_18_chunk m4_3 + Delete on _timescaledb_internal._hyper_2_26_chunk m4_4 + -> Custom Scan (ChunkAppend) on public.metrics_int4 m4 (actual rows=4 loops=1) + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 0 + -> Index Scan using _hyper_2_2_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_2_chunk m4_1 (actual rows=1 loops=1) + Output: m4_1.tableoid, m4_1.ctid + Index Cond: (m4_1."time" < length(version())) + -> Index Scan using _hyper_2_10_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_10_chunk m4_2 (actual rows=1 loops=1) + Output: m4_2.tableoid, m4_2.ctid + Index Cond: (m4_2."time" < length(version())) + -> Index Scan using _hyper_2_18_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_18_chunk m4_3 (actual rows=1 loops=1) + Output: m4_3.tableoid, m4_3.ctid + Index Cond: (m4_3."time" < length(version())) + -> Index Scan using _hyper_2_26_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_26_chunk m4_4 (actual rows=1 loops=1) + Output: m4_4.tableoid, m4_4.ctid + Index Cond: (m4_4."time" < length(version())) + -> Delete on public.metrics_int2 m2 (actual rows=0 loops=1) + Delete on _timescaledb_internal._hyper_1_1_chunk m2_1 + Delete on _timescaledb_internal._hyper_1_9_chunk m2_2 + Delete on _timescaledb_internal._hyper_1_17_chunk m2_3 + Delete on _timescaledb_internal._hyper_1_25_chunk m2_4 + -> Custom Scan (ChunkAppend) on public.metrics_int2 m2 (actual rows=4 loops=1) + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 0 + -> Index Scan using _hyper_1_1_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_1_chunk m2_1 (actual rows=1 loops=1) + Output: m2_1.tableoid, m2_1.ctid + Index Cond: (m2_1."time" < length(version())) + -> Index Scan using _hyper_1_9_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_9_chunk m2_2 (actual rows=1 loops=1) + Output: m2_2.tableoid, m2_2.ctid + Index Cond: (m2_2."time" < length(version())) + -> Index Scan using _hyper_1_17_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_17_chunk m2_3 (actual rows=1 loops=1) + Output: m2_3.tableoid, m2_3.ctid + Index Cond: (m2_3."time" < length(version())) + -> Index Scan using _hyper_1_25_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_25_chunk m2_4 (actual rows=1 loops=1) + Output: m2_4.tableoid, m2_4.ctid + Index Cond: (m2_4."time" < length(version())) +(45 rows) + +ROLLBACK; +BEGIN; +:PREFIX WITH u AS (UPDATE metrics_int4 m4 SET data = 'cte update' WHERE m4.time < length(version()) RETURNING time) +SELECT * FROM u; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------ + CTE Scan on u (actual rows=4 loops=1) + Output: u."time" + CTE u + -> Custom Scan (HypertableModify) (actual rows=4 loops=1) + Output: m4_1."time" + -> Update on public.metrics_int4 m4 (actual rows=4 loops=1) + Output: m4_1."time" + Update on _timescaledb_internal._hyper_2_2_chunk m4_1 + Update on _timescaledb_internal._hyper_2_10_chunk m4_2 + Update on _timescaledb_internal._hyper_2_18_chunk m4_3 + Update on _timescaledb_internal._hyper_2_26_chunk m4_4 + -> Result (actual rows=4 loops=1) + Output: 'cte update'::text, m4.tableoid, m4.ctid + -> Custom Scan (ChunkAppend) on public.metrics_int4 m4 (actual rows=4 loops=1) + Output: m4.tableoid, m4.ctid + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 0 + -> Index Scan using _hyper_2_2_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_2_chunk m4_1 (actual rows=1 loops=1) + Output: m4_1.tableoid, m4_1.ctid + Index Cond: (m4_1."time" < length(version())) + -> Index Scan using _hyper_2_10_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_10_chunk m4_2 (actual rows=1 loops=1) + Output: m4_2.tableoid, m4_2.ctid + Index Cond: (m4_2."time" < length(version())) + -> Index Scan using _hyper_2_18_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_18_chunk m4_3 (actual rows=1 loops=1) + Output: m4_3.tableoid, m4_3.ctid + Index Cond: (m4_3."time" < length(version())) + -> Index Scan using _hyper_2_26_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_26_chunk m4_4 (actual rows=1 loops=1) + Output: m4_4.tableoid, m4_4.ctid + Index Cond: (m4_4."time" < length(version())) +(30 rows) + +ROLLBACK; +BEGIN; +:PREFIX WITH u AS (UPDATE metrics_int4 m4 SET data = 'cte update 1' WHERE m4.time < length(version())) +UPDATE metrics_int2 m2 SET data = 'cte update 2' WHERE m2.time < length(version()); + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + CTE u + -> Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Update on public.metrics_int4 m4 (actual rows=0 loops=1) + Update on _timescaledb_internal._hyper_2_2_chunk m4_1 + Update on _timescaledb_internal._hyper_2_10_chunk m4_2 + Update on _timescaledb_internal._hyper_2_18_chunk m4_3 + Update on _timescaledb_internal._hyper_2_26_chunk m4_4 + -> Result (actual rows=4 loops=1) + Output: 'cte update 1'::text, m4.tableoid, m4.ctid + -> Custom Scan (ChunkAppend) on public.metrics_int4 m4 (actual rows=4 loops=1) + Output: m4.tableoid, m4.ctid + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 0 + -> Index Scan using _hyper_2_2_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_2_chunk m4_1 (actual rows=1 loops=1) + Output: m4_1.tableoid, m4_1.ctid + Index Cond: (m4_1."time" < length(version())) + -> Index Scan using _hyper_2_10_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_10_chunk m4_2 (actual rows=1 loops=1) + Output: m4_2.tableoid, m4_2.ctid + Index Cond: (m4_2."time" < length(version())) + -> Index Scan using _hyper_2_18_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_18_chunk m4_3 (actual rows=1 loops=1) + Output: m4_3.tableoid, m4_3.ctid + Index Cond: (m4_3."time" < length(version())) + -> Index Scan using _hyper_2_26_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_26_chunk m4_4 (actual rows=1 loops=1) + Output: m4_4.tableoid, m4_4.ctid + Index Cond: (m4_4."time" < length(version())) + -> Update on public.metrics_int2 m2 (actual rows=0 loops=1) + Update on _timescaledb_internal._hyper_1_1_chunk m2_1 + Update on _timescaledb_internal._hyper_1_9_chunk m2_2 + Update on _timescaledb_internal._hyper_1_17_chunk m2_3 + Update on _timescaledb_internal._hyper_1_25_chunk m2_4 + -> Result (actual rows=4 loops=1) + Output: 'cte update 2'::text, m2.tableoid, m2.ctid + -> Custom Scan (ChunkAppend) on public.metrics_int2 m2 (actual rows=4 loops=1) + Output: m2.tableoid, m2.ctid + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 0 + -> Index Scan using _hyper_1_1_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_1_chunk m2_1 (actual rows=1 loops=1) + Output: m2_1.tableoid, m2_1.ctid + Index Cond: (m2_1."time" < length(version())) + -> Index Scan using _hyper_1_9_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_9_chunk m2_2 (actual rows=1 loops=1) + Output: m2_2.tableoid, m2_2.ctid + Index Cond: (m2_2."time" < length(version())) + -> Index Scan using _hyper_1_17_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_17_chunk m2_3 (actual rows=1 loops=1) + Output: m2_3.tableoid, m2_3.ctid + Index Cond: (m2_3."time" < length(version())) + -> Index Scan using _hyper_1_25_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_25_chunk m2_4 (actual rows=1 loops=1) + Output: m2_4.tableoid, m2_4.ctid + Index Cond: (m2_4."time" < length(version())) +(51 rows) + +ROLLBACK; +BEGIN; +:PREFIX WITH d AS (DELETE FROM metrics_int4 m4 WHERE m4.time < length(version()) RETURNING time) +UPDATE metrics_int2 m2 SET data = 'cte update' WHERE m2.time < length(version()); + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + CTE d + -> Custom Scan (HypertableModify) (actual rows=4 loops=1) + Output: m4_1."time" + -> Delete on public.metrics_int4 m4 (actual rows=4 loops=1) + Output: m4_1."time" + Delete on _timescaledb_internal._hyper_2_2_chunk m4_1 + Delete on _timescaledb_internal._hyper_2_10_chunk m4_2 + Delete on _timescaledb_internal._hyper_2_18_chunk m4_3 + Delete on _timescaledb_internal._hyper_2_26_chunk m4_4 + -> Custom Scan (ChunkAppend) on public.metrics_int4 m4 (actual rows=4 loops=1) + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 0 + -> Index Scan using _hyper_2_2_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_2_chunk m4_1 (actual rows=1 loops=1) + Output: m4_1.tableoid, m4_1.ctid + Index Cond: (m4_1."time" < length(version())) + -> Index Scan using _hyper_2_10_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_10_chunk m4_2 (actual rows=1 loops=1) + Output: m4_2.tableoid, m4_2.ctid + Index Cond: (m4_2."time" < length(version())) + -> Index Scan using _hyper_2_18_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_18_chunk m4_3 (actual rows=1 loops=1) + Output: m4_3.tableoid, m4_3.ctid + Index Cond: (m4_3."time" < length(version())) + -> Index Scan using _hyper_2_26_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_26_chunk m4_4 (actual rows=1 loops=1) + Output: m4_4.tableoid, m4_4.ctid + Index Cond: (m4_4."time" < length(version())) + -> Update on public.metrics_int2 m2 (actual rows=0 loops=1) + Update on _timescaledb_internal._hyper_1_1_chunk m2_1 + Update on _timescaledb_internal._hyper_1_9_chunk m2_2 + Update on _timescaledb_internal._hyper_1_17_chunk m2_3 + Update on _timescaledb_internal._hyper_1_25_chunk m2_4 + -> Result (actual rows=4 loops=1) + Output: 'cte update'::text, m2.tableoid, m2.ctid + -> Custom Scan (ChunkAppend) on public.metrics_int2 m2 (actual rows=4 loops=1) + Output: m2.tableoid, m2.ctid + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 0 + -> Index Scan using _hyper_1_1_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_1_chunk m2_1 (actual rows=1 loops=1) + Output: m2_1.tableoid, m2_1.ctid + Index Cond: (m2_1."time" < length(version())) + -> Index Scan using _hyper_1_9_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_9_chunk m2_2 (actual rows=1 loops=1) + Output: m2_2.tableoid, m2_2.ctid + Index Cond: (m2_2."time" < length(version())) + -> Index Scan using _hyper_1_17_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_17_chunk m2_3 (actual rows=1 loops=1) + Output: m2_3.tableoid, m2_3.ctid + Index Cond: (m2_3."time" < length(version())) + -> Index Scan using _hyper_1_25_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_25_chunk m2_4 (actual rows=1 loops=1) + Output: m2_4.tableoid, m2_4.ctid + Index Cond: (m2_4."time" < length(version())) +(50 rows) + +ROLLBACK; +BEGIN; +:PREFIX WITH u AS (UPDATE metrics_int4 m4 SET data = 'cte update' WHERE m4.time < length(version()) RETURNING time) +DELETE FROM metrics_int2 m2 WHERE m2.time < length(version()); + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + CTE u + -> Custom Scan (HypertableModify) (actual rows=4 loops=1) + Output: m4_1."time" + -> Update on public.metrics_int4 m4 (actual rows=4 loops=1) + Output: m4_1."time" + Update on _timescaledb_internal._hyper_2_2_chunk m4_1 + Update on _timescaledb_internal._hyper_2_10_chunk m4_2 + Update on _timescaledb_internal._hyper_2_18_chunk m4_3 + Update on _timescaledb_internal._hyper_2_26_chunk m4_4 + -> Result (actual rows=4 loops=1) + Output: 'cte update'::text, m4.tableoid, m4.ctid + -> Custom Scan (ChunkAppend) on public.metrics_int4 m4 (actual rows=4 loops=1) + Output: m4.tableoid, m4.ctid + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 0 + -> Index Scan using _hyper_2_2_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_2_chunk m4_1 (actual rows=1 loops=1) + Output: m4_1.tableoid, m4_1.ctid + Index Cond: (m4_1."time" < length(version())) + -> Index Scan using _hyper_2_10_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_10_chunk m4_2 (actual rows=1 loops=1) + Output: m4_2.tableoid, m4_2.ctid + Index Cond: (m4_2."time" < length(version())) + -> Index Scan using _hyper_2_18_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_18_chunk m4_3 (actual rows=1 loops=1) + Output: m4_3.tableoid, m4_3.ctid + Index Cond: (m4_3."time" < length(version())) + -> Index Scan using _hyper_2_26_chunk_metrics_int4_time_idx on _timescaledb_internal._hyper_2_26_chunk m4_4 (actual rows=1 loops=1) + Output: m4_4.tableoid, m4_4.ctid + Index Cond: (m4_4."time" < length(version())) + -> Delete on public.metrics_int2 m2 (actual rows=0 loops=1) + Delete on _timescaledb_internal._hyper_1_1_chunk m2_1 + Delete on _timescaledb_internal._hyper_1_9_chunk m2_2 + Delete on _timescaledb_internal._hyper_1_17_chunk m2_3 + Delete on _timescaledb_internal._hyper_1_25_chunk m2_4 + -> Custom Scan (ChunkAppend) on public.metrics_int2 m2 (actual rows=4 loops=1) + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 0 + -> Index Scan using _hyper_1_1_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_1_chunk m2_1 (actual rows=1 loops=1) + Output: m2_1.tableoid, m2_1.ctid + Index Cond: (m2_1."time" < length(version())) + -> Index Scan using _hyper_1_9_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_9_chunk m2_2 (actual rows=1 loops=1) + Output: m2_2.tableoid, m2_2.ctid + Index Cond: (m2_2."time" < length(version())) + -> Index Scan using _hyper_1_17_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_17_chunk m2_3 (actual rows=1 loops=1) + Output: m2_3.tableoid, m2_3.ctid + Index Cond: (m2_3."time" < length(version())) + -> Index Scan using _hyper_1_25_chunk_metrics_int2_time_idx on _timescaledb_internal._hyper_1_25_chunk m2_4 (actual rows=1 loops=1) + Output: m2_4.tableoid, m2_4.ctid + Index Cond: (m2_4."time" < length(version())) +(50 rows) + +ROLLBACK; +-- test interaction with compression +-- with chunk exclusion for compressed chunks operations that would +-- error because they hit compressed chunks before can succeed now +-- if those chunks get excluded +-- delete from uncompressed chunks with non-immutable constraints +BEGIN; +:PREFIX DELETE FROM metrics_compressed WHERE time > '2005-01-01'::text::timestamptz; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Delete on public.metrics_compressed (actual rows=0 loops=1) + Delete on _timescaledb_internal._hyper_8_33_chunk metrics_compressed + Delete on _timescaledb_internal._hyper_8_35_chunk metrics_compressed_1 + Delete on _timescaledb_internal._hyper_8_36_chunk metrics_compressed_2 + Delete on _timescaledb_internal._hyper_8_37_chunk metrics_compressed_3 + -> Custom Scan (ChunkAppend) on public.metrics_compressed (actual rows=3 loops=1) + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 1 + -> Index Scan using _hyper_8_35_chunk_metrics_compressed_time_idx on _timescaledb_internal._hyper_8_35_chunk metrics_compressed_1 (actual rows=1 loops=1) + Output: metrics_compressed_1.tableoid, metrics_compressed_1.ctid + Index Cond: (metrics_compressed_1."time" > ('2005-01-01'::cstring)::timestamp with time zone) + -> Index Scan using _hyper_8_36_chunk_metrics_compressed_time_idx on _timescaledb_internal._hyper_8_36_chunk metrics_compressed_2 (actual rows=1 loops=1) + Output: metrics_compressed_2.tableoid, metrics_compressed_2.ctid + Index Cond: (metrics_compressed_2."time" > ('2005-01-01'::cstring)::timestamp with time zone) + -> Index Scan using _hyper_8_37_chunk_metrics_compressed_time_idx on _timescaledb_internal._hyper_8_37_chunk metrics_compressed_3 (actual rows=1 loops=1) + Output: metrics_compressed_3.tableoid, metrics_compressed_3.ctid + Index Cond: (metrics_compressed_3."time" > ('2005-01-01'::cstring)::timestamp with time zone) +(19 rows) + +ROLLBACK; +-- update uncompressed chunks with non-immutable constraints +BEGIN; +:PREFIX UPDATE metrics_compressed SET value = 2 * value WHERE time > '2005-01-01'::text::timestamptz; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Update on public.metrics_compressed (actual rows=0 loops=1) + Update on _timescaledb_internal._hyper_8_33_chunk metrics_compressed + Update on _timescaledb_internal._hyper_8_35_chunk metrics_compressed_1 + Update on _timescaledb_internal._hyper_8_36_chunk metrics_compressed_2 + Update on _timescaledb_internal._hyper_8_37_chunk metrics_compressed_3 + -> Result (actual rows=3 loops=1) + Output: ('2'::double precision * metrics_compressed.value), metrics_compressed.tableoid, metrics_compressed.ctid + -> Custom Scan (ChunkAppend) on public.metrics_compressed (actual rows=3 loops=1) + Output: metrics_compressed.value, metrics_compressed.tableoid, metrics_compressed.ctid + Startup Exclusion: true + Runtime Exclusion: false + Chunks excluded during startup: 1 + -> Index Scan using _hyper_8_35_chunk_metrics_compressed_time_idx on _timescaledb_internal._hyper_8_35_chunk metrics_compressed_1 (actual rows=1 loops=1) + Output: metrics_compressed_1.value, metrics_compressed_1.tableoid, metrics_compressed_1.ctid + Index Cond: (metrics_compressed_1."time" > ('2005-01-01'::cstring)::timestamp with time zone) + -> Index Scan using _hyper_8_36_chunk_metrics_compressed_time_idx on _timescaledb_internal._hyper_8_36_chunk metrics_compressed_2 (actual rows=1 loops=1) + Output: metrics_compressed_2.value, metrics_compressed_2.tableoid, metrics_compressed_2.ctid + Index Cond: (metrics_compressed_2."time" > ('2005-01-01'::cstring)::timestamp with time zone) + -> Index Scan using _hyper_8_37_chunk_metrics_compressed_time_idx on _timescaledb_internal._hyper_8_37_chunk metrics_compressed_3 (actual rows=1 loops=1) + Output: metrics_compressed_3.value, metrics_compressed_3.tableoid, metrics_compressed_3.ctid + Index Cond: (metrics_compressed_3."time" > ('2005-01-01'::cstring)::timestamp with time zone) +(22 rows) + +ROLLBACK; diff --git a/tsl/test/expected/plan_skip_scan-16.out b/tsl/test/expected/plan_skip_scan-16.out new file mode 100644 index 00000000000..14a2112598b --- /dev/null +++ b/tsl/test/expected/plan_skip_scan-16.out @@ -0,0 +1,3979 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- need superuser to modify statistics +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER +\ir include/skip_scan_load.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +CREATE TABLE skip_scan(time int, dev int, dev_name text, val int); +INSERT INTO skip_scan SELECT t, d, 'device_' || d::text, t*d FROM generate_series(1, 1000) t, generate_series(1, 10) d; +INSERT INTO skip_scan VALUES (NULL, 0, -1, NULL), (0, NULL, -1, NULL); +INSERT INTO skip_scan(time,dev,dev_name,val) SELECT t, NULL, NULL, NULL FROM generate_series(0, 999, 50) t; +ANALYZE skip_scan; +CREATE TABLE skip_scan_nulls(time int); +CREATE INDEX ON skip_scan_nulls(time); +INSERT INTO skip_scan_nulls SELECT NULL FROM generate_series(1,100); +ANALYZE skip_scan_nulls; +-- create hypertable with different physical layouts in the chunks +CREATE TABLE skip_scan_ht(f1 int, f2 int, f3 int, time int NOT NULL, dev int, dev_name text, val int); +SELECT create_hypertable('skip_scan_ht', 'time', chunk_time_interval => 250, create_default_indexes => false); + create_hypertable +--------------------------- + (1,public,skip_scan_ht,t) +(1 row) + +INSERT INTO skip_scan_ht(time,dev,dev_name,val) SELECT t, d, 'device_' || d::text, random() FROM generate_series(0, 249) t, generate_series(1, 10) d; +ALTER TABLE skip_scan_ht DROP COLUMN f1; +INSERT INTO skip_scan_ht(time,dev,dev_name,val) SELECT t, d, 'device_' || d::text, random() FROM generate_series(250, 499) t, generate_series(1, 10) d; +ALTER TABLE skip_scan_ht DROP COLUMN f2; +INSERT INTO skip_scan_ht(time,dev,dev_name,val) SELECT t, d, 'device_' || d::text, random() FROM generate_series(500, 749) t, generate_series(1, 10) d; +ALTER TABLE skip_scan_ht DROP COLUMN f3; +INSERT INTO skip_scan_ht(time,dev,dev_name,val) SELECT t, d, 'device_' || d::text, random() FROM generate_series(750, 999) t, generate_series(1, 10) d; +INSERT INTO skip_scan_ht(time,dev,dev_name,val) SELECT t, NULL, NULL, NULL FROM generate_series(0, 999, 50) t; +ANALYZE skip_scan_ht; +ALTER TABLE skip_scan_ht SET (timescaledb.compress,timescaledb.compress_orderby='time desc', timescaledb.compress_segmentby='dev'); +CREATE TABLE skip_scan_insert(time int, dev int, dev_name text, val int, query text); +CREATE OR REPLACE FUNCTION int_func_immutable() RETURNS int LANGUAGE SQL IMMUTABLE SECURITY DEFINER AS $$SELECT 1; $$; +CREATE OR REPLACE FUNCTION int_func_stable() RETURNS int LANGUAGE SQL STABLE SECURITY DEFINER AS $$ SELECT 2; $$; +CREATE OR REPLACE FUNCTION int_func_volatile() RETURNS int LANGUAGE SQL VOLATILE SECURITY DEFINER AS $$ SELECT 3; $$; +CREATE OR REPLACE FUNCTION inta_func_immutable() RETURNS int[] LANGUAGE SQL IMMUTABLE SECURITY DEFINER AS $$ SELECT ARRAY[1,2,3]; $$; +CREATE OR REPLACE FUNCTION inta_func_stable() RETURNS int[] LANGUAGE SQL STABLE SECURITY DEFINER AS $$ SELECT ARRAY[2,3,4]; $$; +CREATE OR REPLACE FUNCTION inta_func_volatile() RETURNS int[] LANGUAGE SQL VOLATILE SECURITY DEFINER AS $$ SELECT ARRAY[3,4,5]; $$; +-- adjust statistics so we get skipscan plans +UPDATE pg_statistic SET stadistinct=1, stanullfrac=0.5 WHERE starelid='skip_scan'::regclass; +UPDATE pg_statistic SET stadistinct=1, stanullfrac=0.5 WHERE starelid='skip_scan_nulls'::regclass; +UPDATE pg_statistic SET stadistinct=1, stanullfrac=0.5 WHERE starelid='skip_scan_ht'::regclass; +UPDATE pg_statistic SET stadistinct=1, stanullfrac=0.5 WHERE starelid IN (select inhrelid from pg_inherits where inhparent='skip_scan_ht'::regclass); +-- we want to run with analyze here so we can see counts in the nodes +\set PREFIX 'EXPLAIN (analyze, costs off, timing off, summary off)' +\set TABLE skip_scan +\ir include/skip_scan_query.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- canary for result diff +SELECT current_setting('timescaledb.enable_skipscan') AS enable_skipscan; + enable_skipscan +----------------- + on +(1 row) + +-- test different index configurations +-- no index so we cant do SkipScan +:PREFIX SELECT DISTINCT dev FROM :TABLE ORDER BY dev; + QUERY PLAN +--------------------------------------------------------------- + Sort (actual rows=12 loops=1) + Sort Key: dev + Sort Method: quicksort + -> HashAggregate (actual rows=12 loops=1) + Group Key: dev + Batches: 1 + -> Seq Scan on skip_scan (actual rows=10022 loops=1) +(7 rows) + +-- NULLS LAST index on dev +CREATE INDEX skip_scan_idx_dev_nulls_last ON :TABLE(dev); +:PREFIX SELECT DISTINCT dev FROM :TABLE ORDER BY dev; + QUERY PLAN +------------------------------------------------------------------------------------------------------ + Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Only Scan using skip_scan_idx_dev_nulls_last on skip_scan (actual rows=12 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 12 +(5 rows) + +:PREFIX SELECT DISTINCT dev FROM :TABLE ORDER BY dev DESC; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------- + Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Only Scan Backward using skip_scan_idx_dev_nulls_last on skip_scan (actual rows=12 loops=1) + Index Cond: (dev < NULL::integer) + Heap Fetches: 12 +(5 rows) + +:PREFIX SELECT DISTINCT ON (dev) dev FROM :TABLE; + QUERY PLAN +------------------------------------------------------------------------------------------------------ + Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Only Scan using skip_scan_idx_dev_nulls_last on skip_scan (actual rows=12 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 12 +(5 rows) + +DROP INDEX skip_scan_idx_dev_nulls_last; +-- NULLS FIRST index on dev +CREATE INDEX skip_scan_idx_dev_nulls_first ON :TABLE(dev NULLS FIRST); +:PREFIX SELECT DISTINCT dev FROM :TABLE ORDER BY dev NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Only Scan using skip_scan_idx_dev_nulls_first on skip_scan (actual rows=12 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 12 +(5 rows) + +:PREFIX SELECT DISTINCT ON (dev) dev FROM :TABLE ORDER BY dev NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Only Scan using skip_scan_idx_dev_nulls_first on skip_scan (actual rows=12 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 12 +(5 rows) + +DROP INDEX skip_scan_idx_dev_nulls_first; +-- multicolumn index with dev as leading column +CREATE INDEX skip_scan_idx_dev_time_idx ON :TABLE(dev, time); +:PREFIX SELECT DISTINCT dev FROM :TABLE ORDER BY dev; + QUERY PLAN +---------------------------------------------------------------------------------------------------- + Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Only Scan using skip_scan_idx_dev_time_idx on skip_scan (actual rows=12 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 12 +(5 rows) + +:PREFIX SELECT DISTINCT ON (dev) dev FROM :TABLE; + QUERY PLAN +---------------------------------------------------------------------------------------------------- + Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Only Scan using skip_scan_idx_dev_time_idx on skip_scan (actual rows=12 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 12 +(5 rows) + +:PREFIX SELECT DISTINCT ON (dev) dev FROM :TABLE ORDER BY dev DESC, time DESC; + QUERY PLAN +------------------------------------------------------------------------------------------------------------- + Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Only Scan Backward using skip_scan_idx_dev_time_idx on skip_scan (actual rows=12 loops=1) + Index Cond: (dev < NULL::integer) + Heap Fetches: 12 +(5 rows) + +DROP INDEX skip_scan_idx_dev_time_idx; +-- multicolumn index with dev as non-leading column +CREATE INDEX skip_scan_idx_time_dev_idx ON :TABLE(time, dev); +:PREFIX SELECT DISTINCT dev FROM :TABLE WHERE time = 100 ORDER BY dev; + QUERY PLAN +---------------------------------------------------------------------------------------------------- + Unique (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=11 loops=1) + -> Index Only Scan using skip_scan_idx_time_dev_idx on skip_scan (actual rows=11 loops=1) + Index Cond: (("time" = 100) AND (dev > NULL::integer)) + Heap Fetches: 11 +(5 rows) + +:PREFIX SELECT DISTINCT ON (dev) dev FROM :TABLE WHERE time = 100; + QUERY PLAN +---------------------------------------------------------------------------------------------------- + Unique (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=11 loops=1) + -> Index Only Scan using skip_scan_idx_time_dev_idx on skip_scan (actual rows=11 loops=1) + Index Cond: (("time" = 100) AND (dev > NULL::integer)) + Heap Fetches: 11 +(5 rows) + +DROP INDEX skip_scan_idx_time_dev_idx; +-- hash index is not ordered so can't use skipscan +CREATE INDEX skip_scan_idx_hash ON :TABLE USING hash(dev_name); +:PREFIX SELECT DISTINCT dev_name FROM :TABLE WHERE dev_name IN ('device_1','device_2') ORDER BY dev_name; + QUERY PLAN +-------------------------------------------------------------------------------------- + Sort (actual rows=2 loops=1) + Sort Key: dev_name + Sort Method: quicksort + -> HashAggregate (actual rows=2 loops=1) + Group Key: dev_name + Batches: 1 + -> Bitmap Heap Scan on skip_scan (actual rows=2000 loops=1) + Recheck Cond: (dev_name = ANY ('{device_1,device_2}'::text[])) + Heap Blocks: exact=13 + -> Bitmap Index Scan on skip_scan_idx_hash (actual rows=2000 loops=1) + Index Cond: (dev_name = ANY ('{device_1,device_2}'::text[])) +(11 rows) + +DROP INDEX skip_scan_idx_hash; +-- expression indexes +-- currently not supported by skipscan +CREATE INDEX skip_scan_expr_idx ON :TABLE((dev % 3)); +:PREFIX SELECT DISTINCT dev%3 FROM :TABLE ORDER BY dev%3; + QUERY PLAN +--------------------------------------------------------------- + Sort (actual rows=4 loops=1) + Sort Key: ((dev % 3)) + Sort Method: quicksort + -> HashAggregate (actual rows=4 loops=1) + Group Key: (dev % 3) + Batches: 1 + -> Seq Scan on skip_scan (actual rows=10022 loops=1) +(7 rows) + +:PREFIX SELECT DISTINCT ON (dev%3) dev FROM :TABLE ORDER BY dev%3; + QUERY PLAN +------------------------------------------------------------------------------------ + Unique (actual rows=4 loops=1) + -> Index Scan using skip_scan_expr_idx on skip_scan (actual rows=10022 loops=1) +(2 rows) + +DROP INDEX skip_scan_expr_idx; +CREATE INDEX ON :TABLE(dev_name); +CREATE INDEX ON :TABLE(dev); +CREATE INDEX ON :TABLE(dev, time); +CREATE INDEX ON :TABLE(time,dev); +CREATE INDEX ON :TABLE(time,dev,val); +\qecho basic DISTINCT queries on :TABLE +basic DISTINCT queries on skip_scan +:PREFIX SELECT DISTINCT dev, 'q1_1' FROM :TABLE ORDER BY dev; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Result (actual rows=12 loops=1) + -> Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Only Scan using skip_scan_dev_idx on skip_scan (actual rows=12 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 12 +(6 rows) + +:PREFIX SELECT DISTINCT dev_name, 'q1_2' FROM :TABLE ORDER BY dev_name; + QUERY PLAN +------------------------------------------------------------------------------------------------------ + Result (actual rows=12 loops=1) + -> Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Only Scan using skip_scan_dev_name_idx on skip_scan (actual rows=12 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 12 +(6 rows) + +:PREFIX SELECT DISTINCT dev, 'q1_3', NULL FROM :TABLE ORDER BY dev; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Result (actual rows=12 loops=1) + -> Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Only Scan using skip_scan_dev_idx on skip_scan (actual rows=12 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 12 +(6 rows) + +\qecho stable expression in targetlist on :TABLE +stable expression in targetlist on skip_scan +:PREFIX SELECT DISTINCT dev, 'q1_4', length(md5(now()::text)) FROM :TABLE ORDER BY dev; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Result (actual rows=12 loops=1) + -> Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Only Scan using skip_scan_dev_idx on skip_scan (actual rows=12 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 12 +(6 rows) + +:PREFIX SELECT DISTINCT dev_name, 'q1_5', length(md5(now()::text)) FROM :TABLE ORDER BY dev_name; + QUERY PLAN +------------------------------------------------------------------------------------------------------ + Result (actual rows=12 loops=1) + -> Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Only Scan using skip_scan_dev_name_idx on skip_scan (actual rows=12 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 12 +(6 rows) + +-- volatile expression in targetlist +:PREFIX SELECT DISTINCT dev, 'q1_6', length(md5(random()::text)) FROM :TABLE ORDER BY dev; + QUERY PLAN +--------------------------------------------------------------- + Unique (actual rows=12 loops=1) + -> Sort (actual rows=10022 loops=1) + Sort Key: dev, (length(md5((random())::text))) + Sort Method: quicksort + -> Seq Scan on skip_scan (actual rows=10022 loops=1) +(5 rows) + +:PREFIX SELECT DISTINCT dev_name, 'q1_7', length(md5(random()::text)) FROM :TABLE ORDER BY dev_name; + QUERY PLAN +--------------------------------------------------------------- + Unique (actual rows=12 loops=1) + -> Sort (actual rows=10022 loops=1) + Sort Key: dev_name, (length(md5((random())::text))) + Sort Method: quicksort + -> Seq Scan on skip_scan (actual rows=10022 loops=1) +(5 rows) + +-- queries without skipscan because distinct is not limited to specific column +:PREFIX SELECT DISTINCT * FROM :TABLE ORDER BY dev; + QUERY PLAN +--------------------------------------------------------------- + Sort (actual rows=10022 loops=1) + Sort Key: dev + Sort Method: quicksort + -> HashAggregate (actual rows=10022 loops=1) + Group Key: dev, "time", dev_name, val + Batches: 1 + -> Seq Scan on skip_scan (actual rows=10022 loops=1) +(7 rows) + +:PREFIX SELECT DISTINCT *, 'q1_9' FROM :TABLE ORDER BY dev; + QUERY PLAN +--------------------------------------------------------------- + Sort (actual rows=10022 loops=1) + Sort Key: dev + Sort Method: quicksort + -> HashAggregate (actual rows=10022 loops=1) + Group Key: dev, "time", dev_name, val + Batches: 1 + -> Seq Scan on skip_scan (actual rows=10022 loops=1) +(7 rows) + +:PREFIX SELECT DISTINCT dev, time, 'q1_10' FROM :TABLE ORDER BY dev; + QUERY PLAN +--------------------------------------------------------------- + Sort (actual rows=10021 loops=1) + Sort Key: dev + Sort Method: quicksort + -> HashAggregate (actual rows=10021 loops=1) + Group Key: dev, "time" + Batches: 1 + -> Seq Scan on skip_scan (actual rows=10022 loops=1) +(7 rows) + +:PREFIX SELECT DISTINCT dev, NULL, 'q1_11' FROM :TABLE ORDER BY dev; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Result (actual rows=12 loops=1) + -> Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Only Scan using skip_scan_dev_idx on skip_scan (actual rows=12 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 12 +(6 rows) + +-- distinct on expressions not supported +:PREFIX SELECT DISTINCT time_bucket(10,time), 'q1_12' FROM :TABLE; + QUERY PLAN +--------------------------------------------------------- + HashAggregate (actual rows=102 loops=1) + Group Key: time_bucket(10, "time") + Batches: 1 + -> Seq Scan on skip_scan (actual rows=10022 loops=1) +(4 rows) + +:PREFIX SELECT DISTINCT length(dev_name), 'q1_13' FROM :TABLE; + QUERY PLAN +--------------------------------------------------------- + HashAggregate (actual rows=4 loops=1) + Group Key: length(dev_name) + Batches: 1 + -> Seq Scan on skip_scan (actual rows=10022 loops=1) +(4 rows) + +:PREFIX SELECT DISTINCT 3*time, 'q1_14' FROM :TABLE; + QUERY PLAN +--------------------------------------------------------- + HashAggregate (actual rows=1002 loops=1) + Group Key: (3 * "time") + Batches: 1 + -> Seq Scan on skip_scan (actual rows=10022 loops=1) +(4 rows) + +:PREFIX SELECT DISTINCT 'Device ' || dev_name FROM :TABLE; + QUERY PLAN +--------------------------------------------------------- + HashAggregate (actual rows=12 loops=1) + Group Key: ('Device '::text || dev_name) + Batches: 1 + -> Seq Scan on skip_scan (actual rows=10022 loops=1) +(4 rows) + +-- DISTINCT ON queries +:PREFIX SELECT DISTINCT ON (dev) dev FROM :TABLE; + QUERY PLAN +------------------------------------------------------------------------------------------- + Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Only Scan using skip_scan_dev_idx on skip_scan (actual rows=12 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 12 +(5 rows) + +:PREFIX SELECT DISTINCT ON (dev) dev, 'q2_2' FROM :TABLE; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Result (actual rows=12 loops=1) + -> Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Only Scan using skip_scan_dev_idx on skip_scan (actual rows=12 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 12 +(6 rows) + +:PREFIX SELECT DISTINCT ON (dev) dev, 'q2_3', NULL FROM :TABLE; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Result (actual rows=12 loops=1) + -> Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Only Scan using skip_scan_dev_idx on skip_scan (actual rows=12 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 12 +(6 rows) + +:PREFIX SELECT DISTINCT ON (dev) dev, 'q2_4', length(md5(now()::text)) FROM :TABLE; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Result (actual rows=12 loops=1) + -> Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Only Scan using skip_scan_dev_idx on skip_scan (actual rows=12 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 12 +(6 rows) + +:PREFIX SELECT DISTINCT ON (dev) dev, 'q2_5', length(md5(random()::text)) FROM :TABLE; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Result (actual rows=12 loops=1) + -> Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Only Scan using skip_scan_dev_idx on skip_scan (actual rows=12 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 12 +(6 rows) + +:PREFIX SELECT DISTINCT ON (dev) * FROM :TABLE; + QUERY PLAN +-------------------------------------------------------------------------------------- + Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Scan using skip_scan_dev_idx on skip_scan (actual rows=12 loops=1) +(3 rows) + +:PREFIX SELECT DISTINCT ON (dev) *, 'q2_7' FROM :TABLE; + QUERY PLAN +-------------------------------------------------------------------------------------------- + Result (actual rows=12 loops=1) + -> Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Scan using skip_scan_dev_idx on skip_scan (actual rows=12 loops=1) +(4 rows) + +:PREFIX SELECT DISTINCT ON (dev) dev, time, 'q2_8' FROM :TABLE; + QUERY PLAN +-------------------------------------------------------------------------------------------- + Result (actual rows=12 loops=1) + -> Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Scan using skip_scan_dev_idx on skip_scan (actual rows=12 loops=1) +(4 rows) + +:PREFIX SELECT DISTINCT ON (dev) dev, NULL, 'q2_9' FROM :TABLE; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Result (actual rows=12 loops=1) + -> Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Only Scan using skip_scan_dev_idx on skip_scan (actual rows=12 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 12 +(6 rows) + +:PREFIX SELECT DISTINCT ON (dev) time, 'q2_10' FROM :TABLE ORDER by dev, time; + QUERY PLAN +------------------------------------------------------------------------------------------------------ + Result (actual rows=12 loops=1) + -> Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Only Scan using skip_scan_dev_time_idx on skip_scan (actual rows=12 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 12 +(6 rows) + +:PREFIX SELECT DISTINCT ON (dev) dev, tableoid::regclass, 'q2_11' FROM :TABLE; + QUERY PLAN +-------------------------------------------------------------------------------------------- + Result (actual rows=12 loops=1) + -> Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Scan using skip_scan_dev_idx on skip_scan (actual rows=12 loops=1) +(4 rows) + +:PREFIX SELECT DISTINCT ON (dev) dev, int_func_immutable(), 'q2_12' FROM :TABLE; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Result (actual rows=12 loops=1) + -> Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Only Scan using skip_scan_dev_idx on skip_scan (actual rows=12 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 12 +(6 rows) + +:PREFIX SELECT DISTINCT ON (dev) dev, int_func_stable(), 'q2_13' FROM :TABLE; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Result (actual rows=12 loops=1) + -> Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Only Scan using skip_scan_dev_idx on skip_scan (actual rows=12 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 12 +(6 rows) + +:PREFIX SELECT DISTINCT ON (dev) dev, int_func_volatile(), 'q2_14' FROM :TABLE; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Result (actual rows=12 loops=1) + -> Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Only Scan using skip_scan_dev_idx on skip_scan (actual rows=12 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 12 +(6 rows) + +-- DISTINCT ON queries on TEXT column +:PREFIX SELECT DISTINCT ON (dev_name) dev_name FROM :TABLE; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Only Scan using skip_scan_dev_name_idx on skip_scan (actual rows=12 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 12 +(5 rows) + +:PREFIX SELECT DISTINCT ON (dev_name) dev_name, 'q3_2' FROM :TABLE; + QUERY PLAN +------------------------------------------------------------------------------------------------------ + Result (actual rows=12 loops=1) + -> Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Only Scan using skip_scan_dev_name_idx on skip_scan (actual rows=12 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 12 +(6 rows) + +:PREFIX SELECT DISTINCT ON (dev_name) dev_name, 'q3_3', NULL FROM :TABLE; + QUERY PLAN +------------------------------------------------------------------------------------------------------ + Result (actual rows=12 loops=1) + -> Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Only Scan using skip_scan_dev_name_idx on skip_scan (actual rows=12 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 12 +(6 rows) + +:PREFIX SELECT DISTINCT ON (dev_name) dev_name, 'q3_4', length(md5(now()::text)) FROM :TABLE; + QUERY PLAN +------------------------------------------------------------------------------------------------------ + Result (actual rows=12 loops=1) + -> Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Only Scan using skip_scan_dev_name_idx on skip_scan (actual rows=12 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 12 +(6 rows) + +:PREFIX SELECT DISTINCT ON (dev_name) dev_name, 'q3_5', length(md5(random()::text)) FROM :TABLE; + QUERY PLAN +------------------------------------------------------------------------------------------------------ + Result (actual rows=12 loops=1) + -> Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Only Scan using skip_scan_dev_name_idx on skip_scan (actual rows=12 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 12 +(6 rows) + +:PREFIX SELECT DISTINCT ON (dev_name) * FROM :TABLE; + QUERY PLAN +------------------------------------------------------------------------------------------- + Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Scan using skip_scan_dev_name_idx on skip_scan (actual rows=12 loops=1) +(3 rows) + +:PREFIX SELECT DISTINCT ON (dev_name) *, 'q3_7' FROM :TABLE; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Result (actual rows=12 loops=1) + -> Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Scan using skip_scan_dev_name_idx on skip_scan (actual rows=12 loops=1) +(4 rows) + +:PREFIX SELECT DISTINCT ON (dev_name) dev_name, time, 'q3_8' FROM :TABLE; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Result (actual rows=12 loops=1) + -> Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Scan using skip_scan_dev_name_idx on skip_scan (actual rows=12 loops=1) +(4 rows) + +:PREFIX SELECT DISTINCT ON (dev_name) dev_name, NULL, 'q3_9' FROM :TABLE; + QUERY PLAN +------------------------------------------------------------------------------------------------------ + Result (actual rows=12 loops=1) + -> Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Only Scan using skip_scan_dev_name_idx on skip_scan (actual rows=12 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 12 +(6 rows) + +:PREFIX SELECT DISTINCT ON (dev_name) time, 'q3_10' FROM :TABLE ORDER by dev_name, time; + QUERY PLAN +--------------------------------------------------------------- + Unique (actual rows=12 loops=1) + -> Sort (actual rows=10022 loops=1) + Sort Key: dev_name, "time" + Sort Method: quicksort + -> Seq Scan on skip_scan (actual rows=10022 loops=1) +(5 rows) + +:PREFIX SELECT DISTINCT ON (dev_name) dev_name, tableoid::regclass, 'q3_11' FROM :TABLE; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Result (actual rows=12 loops=1) + -> Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Scan using skip_scan_dev_name_idx on skip_scan (actual rows=12 loops=1) +(4 rows) + +:PREFIX SELECT DISTINCT ON (dev_name::varchar) dev_name::varchar FROM :TABLE; + QUERY PLAN +------------------------------------------------------------------------------------------------------ + Result (actual rows=12 loops=1) + -> Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Only Scan using skip_scan_dev_name_idx on skip_scan (actual rows=12 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 12 +(6 rows) + +:PREFIX SELECT DISTINCT ON (dev_name) dev, int_func_immutable(), 'q3_13' FROM :TABLE; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Result (actual rows=12 loops=1) + -> Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Scan using skip_scan_dev_name_idx on skip_scan (actual rows=12 loops=1) +(4 rows) + +:PREFIX SELECT DISTINCT ON (dev_name) dev, int_func_stable(), 'q3_14' FROM :TABLE; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Result (actual rows=12 loops=1) + -> Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Scan using skip_scan_dev_name_idx on skip_scan (actual rows=12 loops=1) +(4 rows) + +:PREFIX SELECT DISTINCT ON (dev_name) dev, int_func_volatile(), 'q3_15' FROM :TABLE; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Result (actual rows=12 loops=1) + -> Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Scan using skip_scan_dev_name_idx on skip_scan (actual rows=12 loops=1) +(4 rows) + +\qecho DISTINCT with wholerow var +DISTINCT with wholerow var +:PREFIX SELECT DISTINCT ON (dev) :TABLE FROM :TABLE; + QUERY PLAN +-------------------------------------------------------------------------------------- + Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Scan using skip_scan_dev_idx on skip_scan (actual rows=12 loops=1) +(3 rows) + +-- should not use SkipScan since we only support SkipScan on single-column distinct +:PREFIX SELECT DISTINCT :TABLE FROM :TABLE; + QUERY PLAN +--------------------------------------------------------- + HashAggregate (actual rows=10022 loops=1) + Group Key: skip_scan.* + Batches: 1 + -> Seq Scan on skip_scan (actual rows=10022 loops=1) +(4 rows) + +\qecho LIMIT queries on :TABLE +LIMIT queries on skip_scan +:PREFIX SELECT DISTINCT ON (dev) dev FROM :TABLE LIMIT 3; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Limit (actual rows=3 loops=1) + -> Unique (actual rows=3 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=3 loops=1) + -> Index Only Scan using skip_scan_dev_idx on skip_scan (actual rows=3 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 3 +(6 rows) + +:PREFIX SELECT DISTINCT ON (dev) dev FROM :TABLE ORDER BY dev DESC, time DESC LIMIT 3; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------- + Limit (actual rows=3 loops=1) + -> Unique (actual rows=3 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=3 loops=1) + -> Index Only Scan Backward using skip_scan_dev_time_idx on skip_scan (actual rows=3 loops=1) + Index Cond: (dev < NULL::integer) + Heap Fetches: 3 +(6 rows) + +:PREFIX SELECT DISTINCT ON (dev) dev FROM :TABLE ORDER BY dev, time LIMIT 3; + QUERY PLAN +----------------------------------------------------------------------------------------------------- + Limit (actual rows=3 loops=1) + -> Unique (actual rows=3 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=3 loops=1) + -> Index Only Scan using skip_scan_dev_time_idx on skip_scan (actual rows=3 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 3 +(6 rows) + +\qecho range queries on :TABLE +range queries on skip_scan +:PREFIX SELECT DISTINCT ON (dev) dev FROM :TABLE WHERE time BETWEEN 100 AND 300; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Unique (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=11 loops=1) + -> Index Only Scan using skip_scan_dev_time_idx on skip_scan (actual rows=11 loops=1) + Index Cond: ((dev > NULL::integer) AND ("time" >= 100) AND ("time" <= 300)) + Heap Fetches: 11 +(5 rows) + +:PREFIX SELECT DISTINCT ON (dev) dev FROM :TABLE WHERE time < 200; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Unique (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=11 loops=1) + -> Index Only Scan using skip_scan_dev_time_idx on skip_scan (actual rows=11 loops=1) + Index Cond: ((dev > NULL::integer) AND ("time" < 200)) + Heap Fetches: 11 +(5 rows) + +:PREFIX SELECT DISTINCT ON (dev) dev FROM :TABLE WHERE time > 800; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Unique (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=11 loops=1) + -> Index Only Scan using skip_scan_dev_time_idx on skip_scan (actual rows=11 loops=1) + Index Cond: ((dev > NULL::integer) AND ("time" > 800)) + Heap Fetches: 11 +(5 rows) + +\qecho ordered append on :TABLE +ordered append on skip_scan +:PREFIX SELECT * FROM :TABLE ORDER BY time; + QUERY PLAN +---------------------------------------------------------------------------------- + Index Scan using skip_scan_time_dev_idx on skip_scan (actual rows=10022 loops=1) +(1 row) + +:PREFIX SELECT DISTINCT ON (time) time FROM :TABLE WHERE time BETWEEN 0 AND 5000; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Unique (actual rows=1001 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=1001 loops=1) + -> Index Only Scan using skip_scan_time_dev_idx on skip_scan (actual rows=1001 loops=1) + Index Cond: (("time" > NULL::integer) AND ("time" >= 0) AND ("time" <= 5000)) + Heap Fetches: 1001 +(5 rows) + +\qecho SUBSELECTS on :TABLE +SUBSELECTS on skip_scan +:PREFIX SELECT time, dev, val, 'q4_1' FROM (SELECT DISTINCT ON (dev) * FROM :TABLE) a; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Subquery Scan on a (actual rows=12 loops=1) + -> Result (actual rows=12 loops=1) + -> Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Scan using skip_scan_dev_idx on skip_scan (actual rows=12 loops=1) +(5 rows) + +:PREFIX SELECT NULL, dev, NULL, 'q4_3' FROM (SELECT DISTINCT ON (dev) dev FROM :TABLE) a; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Subquery Scan on a (actual rows=12 loops=1) + -> Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Only Scan using skip_scan_dev_idx on skip_scan (actual rows=12 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 12 +(6 rows) + +:PREFIX SELECT time, dev, NULL, 'q4_4' FROM (SELECT DISTINCT ON (dev) dev, time FROM :TABLE) a; + QUERY PLAN +-------------------------------------------------------------------------------------------- + Subquery Scan on a (actual rows=12 loops=1) + -> Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Scan using skip_scan_dev_idx on skip_scan (actual rows=12 loops=1) +(4 rows) + +\qecho ORDER BY +ORDER BY +:PREFIX SELECT time, dev, val, 'q5_1' FROM (SELECT DISTINCT ON (dev) * FROM :TABLE ORDER BY dev, time) a; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Subquery Scan on a (actual rows=12 loops=1) + -> Result (actual rows=12 loops=1) + -> Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Scan using skip_scan_dev_time_idx on skip_scan (actual rows=12 loops=1) +(5 rows) + +:PREFIX SELECT time, dev, val, 'q5_2' FROM (SELECT DISTINCT ON (dev) * FROM :TABLE ORDER BY dev DESC, time DESC) a; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------- + Subquery Scan on a (actual rows=12 loops=1) + -> Result (actual rows=12 loops=1) + -> Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Scan Backward using skip_scan_dev_time_idx on skip_scan (actual rows=12 loops=1) +(5 rows) + +\qecho WHERE CLAUSES +WHERE CLAUSES +:PREFIX SELECT time, dev, val, 'q6_1' FROM (SELECT DISTINCT ON (dev) * FROM :TABLE WHERE dev > 5) a; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Subquery Scan on a (actual rows=5 loops=1) + -> Result (actual rows=5 loops=1) + -> Unique (actual rows=5 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=5 loops=1) + -> Index Scan using skip_scan_dev_idx on skip_scan (actual rows=5 loops=1) + Index Cond: (dev > 5) +(6 rows) + +:PREFIX SELECT time, dev, val, 'q6_2' FROM (SELECT DISTINCT ON (dev) * FROM :TABLE WHERE time > 5) a; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Subquery Scan on a (actual rows=11 loops=1) + -> Result (actual rows=11 loops=1) + -> Unique (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=11 loops=1) + -> Index Scan using skip_scan_dev_time_idx on skip_scan (actual rows=11 loops=1) + Index Cond: ("time" > 5) +(6 rows) + +:PREFIX SELECT time, dev, val, 'q6_3' FROM (SELECT DISTINCT ON (dev) * FROM :TABLE) a WHERE dev > 5; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Subquery Scan on a (actual rows=5 loops=1) + -> Result (actual rows=5 loops=1) + -> Unique (actual rows=5 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=5 loops=1) + -> Index Scan using skip_scan_dev_idx on skip_scan (actual rows=5 loops=1) + Index Cond: (dev > 5) +(6 rows) + +:PREFIX SELECT time, dev, val, 'q6_4' FROM (SELECT DISTINCT ON (dev) * FROM :TABLE) a WHERE time > 5; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Subquery Scan on a (actual rows=0 loops=1) + Filter: (a."time" > 5) + Rows Removed by Filter: 12 + -> Result (actual rows=12 loops=1) + -> Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Scan using skip_scan_dev_idx on skip_scan (actual rows=12 loops=1) +(7 rows) + +--\qecho immutable func in WHERE clause on :TABLE +:PREFIX SELECT DISTINCT ON (dev) *, 'q6_5' FROM :TABLE WHERE dev > int_func_immutable(); + QUERY PLAN +------------------------------------------------------------------------------------------- + Result (actual rows=9 loops=1) + -> Unique (actual rows=9 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=9 loops=1) + -> Index Scan using skip_scan_dev_idx on skip_scan (actual rows=9 loops=1) + Index Cond: (dev > 1) +(5 rows) + +--\qecho stable func in WHERE clause on :TABLE +:PREFIX SELECT DISTINCT ON (dev) *, 'q6_6' FROM :TABLE WHERE dev > int_func_stable(); + QUERY PLAN +------------------------------------------------------------------------------------------- + Result (actual rows=8 loops=1) + -> Unique (actual rows=8 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=8 loops=1) + -> Index Scan using skip_scan_dev_idx on skip_scan (actual rows=8 loops=1) + Index Cond: (dev > int_func_stable()) +(5 rows) + +--\qecho volatile func in WHERE clause on :TABLE +:PREFIX SELECT DISTINCT ON (dev) * FROM :TABLE WHERE dev > int_func_volatile(); + QUERY PLAN +------------------------------------------------------------------------------------- + Unique (actual rows=7 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=7 loops=1) + -> Index Scan using skip_scan_dev_idx on skip_scan (actual rows=7 loops=1) + Filter: (dev > int_func_volatile()) + Rows Removed by Filter: 3022 +(5 rows) + +:PREFIX SELECT DISTINCT ON (dev) * FROM :TABLE WHERE dev = ANY(inta_func_immutable()); + QUERY PLAN +------------------------------------------------------------------------------------- + Unique (actual rows=3 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=3 loops=1) + -> Index Scan using skip_scan_dev_idx on skip_scan (actual rows=3 loops=1) + Index Cond: (dev = ANY ('{1,2,3}'::integer[])) +(4 rows) + +:PREFIX SELECT DISTINCT ON (dev) * FROM :TABLE WHERE dev = ANY(inta_func_stable()); + QUERY PLAN +------------------------------------------------------------------------------------- + Unique (actual rows=3 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=3 loops=1) + -> Index Scan using skip_scan_dev_idx on skip_scan (actual rows=3 loops=1) + Index Cond: (dev = ANY (inta_func_stable())) +(4 rows) + +:PREFIX SELECT DISTINCT ON (dev) * FROM :TABLE WHERE dev = ANY(inta_func_volatile()); + QUERY PLAN +------------------------------------------------------------------------------------- + Unique (actual rows=3 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=3 loops=1) + -> Index Scan using skip_scan_dev_idx on skip_scan (actual rows=3 loops=1) + Filter: (dev = ANY (inta_func_volatile())) + Rows Removed by Filter: 7022 +(5 rows) + +-- RowCompareExpr +:PREFIX SELECT DISTINCT ON (dev) * FROM :TABLE WHERE (dev, time) > (5,100); + QUERY PLAN +------------------------------------------------------------------------------------------ + Unique (actual rows=6 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=6 loops=1) + -> Index Scan using skip_scan_dev_time_idx on skip_scan (actual rows=6 loops=1) + Index Cond: (ROW(dev, "time") > ROW(5, 100)) +(4 rows) + +-- always false expr similar to our initial skip qual +:PREFIX SELECT DISTINCT ON (dev) * FROM :TABLE WHERE dev > NULL; + QUERY PLAN +---------------------------------------------- + Unique (actual rows=0 loops=1) + -> Sort (actual rows=0 loops=1) + Sort Key: dev + Sort Method: quicksort + -> Result (actual rows=0 loops=1) + One-Time Filter: false +(6 rows) + +-- no tuples matching +:PREFIX SELECT DISTINCT ON (dev) * FROM :TABLE WHERE dev > 20; + QUERY PLAN +------------------------------------------------------------------------------------- + Unique (actual rows=0 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=0 loops=1) + -> Index Scan using skip_scan_dev_idx on skip_scan (actual rows=0 loops=1) + Index Cond: (dev > 20) +(4 rows) + +-- multiple constraints in WHERE clause +:PREFIX SELECT DISTINCT ON (dev) dev,time FROM :TABLE WHERE dev > 5 AND time = 100; + QUERY PLAN +--------------------------------------------------------------------------------------------------- + Unique (actual rows=5 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=5 loops=1) + -> Index Only Scan using skip_scan_time_dev_val_idx on skip_scan (actual rows=5 loops=1) + Index Cond: (("time" = 100) AND (dev > NULL::integer) AND (dev > 5)) + Heap Fetches: 5 +(5 rows) + +:PREFIX SELECT DISTINCT ON (dev) dev,time FROM :TABLE WHERE dev > 5 AND time > 200; + QUERY PLAN +------------------------------------------------------------------------------------- + Unique (actual rows=5 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=5 loops=1) + -> Index Scan using skip_scan_dev_idx on skip_scan (actual rows=5 loops=1) + Index Cond: (dev > 5) + Filter: ("time" > 200) + Rows Removed by Filter: 1000 +(6 rows) + +:PREFIX SELECT DISTINCT ON (dev) dev,time FROM :TABLE WHERE dev >= 5 AND dev < 7 AND dev >= 2; + QUERY PLAN +------------------------------------------------------------------------------------- + Unique (actual rows=2 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=2 loops=1) + -> Index Scan using skip_scan_dev_idx on skip_scan (actual rows=2 loops=1) + Index Cond: ((dev >= 5) AND (dev < 7) AND (dev >= 2)) +(4 rows) + +:PREFIX SELECT DISTINCT ON (dev) dev,time,val FROM :TABLE WHERE time > 100 AND time < 200 AND val > 10 AND val < 10000 AND dev > 2 AND dev < 7 ORDER BY dev,time; + QUERY PLAN +------------------------------------------------------------------------------------------- + Unique (actual rows=4 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=4 loops=1) + -> Index Scan using skip_scan_dev_time_idx on skip_scan (actual rows=4 loops=1) + Index Cond: ((dev > 2) AND (dev < 7) AND ("time" > 100) AND ("time" < 200)) + Filter: ((val > 10) AND (val < 10000)) +(5 rows) + +:PREFIX SELECT DISTINCT ON (dev) dev FROM :TABLE WHERE dev IS NULL; + QUERY PLAN +------------------------------------------------------------------------------------------ + Unique (actual rows=1 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=1 loops=1) + -> Index Only Scan using skip_scan_dev_idx on skip_scan (actual rows=1 loops=1) + Index Cond: ((dev > NULL::integer) AND (dev IS NULL)) + Heap Fetches: 1 +(5 rows) + +:PREFIX SELECT DISTINCT ON (dev_name) dev_name FROM :TABLE WHERE dev_name IS NULL; + QUERY PLAN +----------------------------------------------------------------------------------------------- + Unique (actual rows=1 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=1 loops=1) + -> Index Only Scan using skip_scan_dev_name_idx on skip_scan (actual rows=1 loops=1) + Index Cond: ((dev_name > NULL::text) AND (dev_name IS NULL)) + Heap Fetches: 1 +(5 rows) + +-- test constants in ORDER BY +:PREFIX SELECT DISTINCT ON (dev) * FROM :TABLE WHERE dev = 1 ORDER BY dev, time DESC; + QUERY PLAN +--------------------------------------------------------------------------------------------- + Limit (actual rows=1 loops=1) + -> Index Scan Backward using skip_scan_dev_time_idx on skip_scan (actual rows=1 loops=1) + Index Cond: (dev = 1) +(3 rows) + +-- CTE +:PREFIX WITH devices AS ( + SELECT DISTINCT ON (dev) dev FROM :TABLE +) +SELECT * FROM devices; + QUERY PLAN +------------------------------------------------------------------------------------------- + Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Only Scan using skip_scan_dev_idx on skip_scan (actual rows=12 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 12 +(5 rows) + +:PREFIX WITH devices AS ( + SELECT DISTINCT dev FROM :TABLE +) +SELECT * FROM devices ORDER BY dev; + QUERY PLAN +------------------------------------------------------------------------------------------- + Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Only Scan using skip_scan_dev_idx on skip_scan (actual rows=12 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 12 +(5 rows) + +-- prepared statements +PREPARE prep AS SELECT DISTINCT ON (dev_name) dev_name FROM :TABLE; +:PREFIX EXECUTE prep; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Only Scan using skip_scan_dev_name_idx on skip_scan (actual rows=12 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 12 +(5 rows) + +:PREFIX EXECUTE prep; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Only Scan using skip_scan_dev_name_idx on skip_scan (actual rows=12 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 12 +(5 rows) + +:PREFIX EXECUTE prep; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Only Scan using skip_scan_dev_name_idx on skip_scan (actual rows=12 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 12 +(5 rows) + +DEALLOCATE prep; +-- ReScan tests +:PREFIX SELECT time, dev, val, 'q7_1' FROM (SELECT DISTINCT ON (dev) * FROM ( + VALUES (1), (2)) a(v), + LATERAL (SELECT * FROM :TABLE WHERE time != a.v) b) a; + QUERY PLAN +----------------------------------------------------------------------------------------------- + Subquery Scan on a (actual rows=11 loops=1) + -> Unique (actual rows=11 loops=1) + -> Nested Loop (actual rows=20022 loops=1) + Join Filter: (skip_scan."time" <> "*VALUES*".column1) + Rows Removed by Join Filter: 22 + -> Index Scan using skip_scan_dev_idx on skip_scan (actual rows=10022 loops=1) + -> Materialize (actual rows=2 loops=10022) + -> Values Scan on "*VALUES*" (actual rows=2 loops=1) +(8 rows) + +:PREFIX SELECT time, dev, val, 'q7_2' FROM (SELECT * FROM ( + VALUES (1), (2)) a(v), + LATERAL (SELECT DISTINCT ON (dev) * FROM :TABLE WHERE dev != a.v) b) a; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Nested Loop (actual rows=20 loops=1) + -> Values Scan on "*VALUES*" (actual rows=2 loops=1) + -> Result (actual rows=10 loops=2) + -> Unique (actual rows=10 loops=2) + -> Custom Scan (SkipScan) on skip_scan (actual rows=10 loops=2) + -> Index Scan using skip_scan_dev_idx on skip_scan (actual rows=10 loops=2) + Filter: (dev <> "*VALUES*".column1) + Rows Removed by Filter: 1021 +(8 rows) + +-- RuntimeKeys +:PREFIX SELECT time, dev, val, 'q8_1' FROM (SELECT * FROM ( + VALUES (1), (2)) a(v), + LATERAL (SELECT DISTINCT ON (dev) * FROM :TABLE WHERE dev >= a.v) b) c; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Nested Loop (actual rows=19 loops=1) + -> Values Scan on "*VALUES*" (actual rows=2 loops=1) + -> Result (actual rows=10 loops=2) + -> Unique (actual rows=10 loops=2) + -> Custom Scan (SkipScan) on skip_scan (actual rows=10 loops=2) + -> Index Scan using skip_scan_dev_idx on skip_scan (actual rows=10 loops=2) + Index Cond: (dev >= "*VALUES*".column1) +(7 rows) + +-- Emulate multi-column DISTINCT using multiple SkipSkans +:PREFIX SELECT time, dev, val, 'q9_1' FROM (SELECT b.* FROM + (SELECT DISTINCT ON (dev) dev FROM :TABLE) a, + LATERAL (SELECT DISTINCT ON (time) * FROM :TABLE WHERE dev = a.dev) b) c; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------- + Nested Loop (actual rows=10001 loops=1) + -> Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Only Scan using skip_scan_dev_idx on skip_scan (actual rows=12 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 12 + -> Result (actual rows=833 loops=12) + -> Unique (actual rows=833 loops=12) + -> Custom Scan (SkipScan) on skip_scan skip_scan_1 (actual rows=833 loops=12) + -> Index Scan using skip_scan_dev_time_idx on skip_scan skip_scan_1 (actual rows=833 loops=12) + Index Cond: (dev = skip_scan.dev) +(11 rows) + +:PREFIX SELECT time, dev, NULL, 'q9_2' FROM (SELECT b.* FROM + (SELECT DISTINCT ON (dev) dev FROM :TABLE) a, + LATERAL (SELECT DISTINCT ON (time) dev, time FROM :TABLE WHERE dev = a.dev) b) c; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------- + Nested Loop (actual rows=10001 loops=1) + -> Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Only Scan using skip_scan_dev_idx on skip_scan (actual rows=12 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 12 + -> Unique (actual rows=833 loops=12) + -> Custom Scan (SkipScan) on skip_scan skip_scan_1 (actual rows=833 loops=12) + -> Index Only Scan using skip_scan_dev_time_idx on skip_scan skip_scan_1 (actual rows=833 loops=12) + Index Cond: ((dev = skip_scan.dev) AND ("time" > NULL::integer)) + Heap Fetches: 10001 +(11 rows) + +-- Test that the multi-column DISTINCT emulation is equivalent to a real multi-column DISTINCT +:PREFIX SELECT * FROM + (SELECT DISTINCT ON (dev) dev FROM :TABLE) a, + LATERAL (SELECT DISTINCT ON (time) dev, time FROM :TABLE WHERE dev = a.dev) b; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------- + Nested Loop (actual rows=10001 loops=1) + -> Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Only Scan using skip_scan_dev_idx on skip_scan (actual rows=12 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 12 + -> Unique (actual rows=833 loops=12) + -> Custom Scan (SkipScan) on skip_scan skip_scan_1 (actual rows=833 loops=12) + -> Index Only Scan using skip_scan_dev_time_idx on skip_scan skip_scan_1 (actual rows=833 loops=12) + Index Cond: ((dev = skip_scan.dev) AND ("time" > NULL::integer)) + Heap Fetches: 10001 +(11 rows) + +:PREFIX SELECT DISTINCT ON (dev, time) dev, time FROM :TABLE WHERE dev IS NOT NULL; + QUERY PLAN +--------------------------------------------------------------------------------------------- + Unique (actual rows=10001 loops=1) + -> Index Only Scan using skip_scan_dev_time_idx on skip_scan (actual rows=10001 loops=1) + Index Cond: (dev IS NOT NULL) + Heap Fetches: 10001 +(4 rows) + +:PREFIX SELECT DISTINCT ON (dev, time) dev, time FROM :TABLE WHERE dev IS NOT NULL +UNION SELECT b.* FROM + (SELECT DISTINCT ON (dev) dev FROM :TABLE) a, + LATERAL (SELECT DISTINCT ON (time) dev, time FROM :TABLE WHERE dev = a.dev) b; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------- + Unique (actual rows=10001 loops=1) + -> Sort (actual rows=20002 loops=1) + Sort Key: skip_scan.dev, skip_scan."time" + Sort Method: quicksort + -> Append (actual rows=20002 loops=1) + -> Unique (actual rows=10001 loops=1) + -> Index Only Scan using skip_scan_dev_time_idx on skip_scan (actual rows=10001 loops=1) + Index Cond: (dev IS NOT NULL) + Heap Fetches: 10001 + -> Nested Loop (actual rows=10001 loops=1) + -> Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan skip_scan_1 (actual rows=12 loops=1) + -> Index Only Scan using skip_scan_dev_idx on skip_scan skip_scan_1 (actual rows=12 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 12 + -> Unique (actual rows=833 loops=12) + -> Custom Scan (SkipScan) on skip_scan skip_scan_2 (actual rows=833 loops=12) + -> Index Only Scan using skip_scan_dev_time_idx on skip_scan skip_scan_2 (actual rows=833 loops=12) + Index Cond: ((dev = skip_scan_1.dev) AND ("time" > NULL::integer)) + Heap Fetches: 10001 +(20 rows) + +-- SkipScan into INSERT +:PREFIX INSERT INTO skip_scan_insert(time, dev, val, query) SELECT time, dev, val, 'q10_1' FROM (SELECT DISTINCT ON (dev) * FROM :TABLE) a; + QUERY PLAN +-------------------------------------------------------------------------------------------------------- + Insert on skip_scan_insert (actual rows=0 loops=1) + -> Subquery Scan on a (actual rows=12 loops=1) + -> Result (actual rows=12 loops=1) + -> Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Scan using skip_scan_dev_idx on skip_scan (actual rows=12 loops=1) +(6 rows) + +-- parallel query +SELECT set_config(CASE WHEN current_setting('server_version_num')::int < 160000 THEN 'force_parallel_mode' ELSE 'debug_parallel_query' END,'on', false); + set_config +------------ + on +(1 row) + +:PREFIX SELECT DISTINCT dev FROM :TABLE ORDER BY dev; + QUERY PLAN +------------------------------------------------------------------------------------------- + Unique (actual rows=12 loops=1) + -> Custom Scan (SkipScan) on skip_scan (actual rows=12 loops=1) + -> Index Only Scan using skip_scan_dev_idx on skip_scan (actual rows=12 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 12 +(5 rows) + +SELECT set_config(CASE WHEN current_setting('server_version_num')::int < 160000 THEN 'force_parallel_mode' ELSE 'debug_parallel_query' END,'off', false); + set_config +------------ + off +(1 row) + +TRUNCATE skip_scan_insert; +-- table with only nulls +:PREFIX SELECT DISTINCT ON (time) time FROM skip_scan_nulls; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Unique (actual rows=1 loops=1) + -> Custom Scan (SkipScan) on skip_scan_nulls (actual rows=1 loops=1) + -> Index Only Scan using skip_scan_nulls_time_idx on skip_scan_nulls (actual rows=1 loops=1) + Index Cond: ("time" > NULL::integer) + Heap Fetches: 1 +(5 rows) + +-- no tuples in resultset +:PREFIX SELECT DISTINCT ON (time) time FROM skip_scan_nulls WHERE time IS NOT NULL; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Unique (actual rows=0 loops=1) + -> Custom Scan (SkipScan) on skip_scan_nulls (actual rows=0 loops=1) + -> Index Only Scan using skip_scan_nulls_time_idx on skip_scan_nulls (actual rows=0 loops=1) + Index Cond: (("time" > NULL::integer) AND ("time" IS NOT NULL)) + Heap Fetches: 0 +(5 rows) + +\set TABLE skip_scan_ht +\ir include/skip_scan_query.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- canary for result diff +SELECT current_setting('timescaledb.enable_skipscan') AS enable_skipscan; + enable_skipscan +----------------- + on +(1 row) + +-- test different index configurations +-- no index so we cant do SkipScan +:PREFIX SELECT DISTINCT dev FROM :TABLE ORDER BY dev; + QUERY PLAN +--------------------------------------------------------------------------- + Sort (actual rows=11 loops=1) + Sort Key: _hyper_1_1_chunk.dev + Sort Method: quicksort + -> HashAggregate (actual rows=11 loops=1) + Group Key: _hyper_1_1_chunk.dev + Batches: 1 + -> Append (actual rows=10020 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=2505 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2505 loops=1) + -> Seq Scan on _hyper_1_3_chunk (actual rows=2505 loops=1) + -> Seq Scan on _hyper_1_4_chunk (actual rows=2505 loops=1) +(11 rows) + +-- NULLS LAST index on dev +CREATE INDEX skip_scan_idx_dev_nulls_last ON :TABLE(dev); +:PREFIX SELECT DISTINCT dev FROM :TABLE ORDER BY dev; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------ + Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_idx_dev_nulls_last on _hyper_1_1_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_idx_dev_nulls_last on _hyper_1_2_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_idx_dev_nulls_last on _hyper_1_3_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_idx_dev_nulls_last on _hyper_1_4_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 +(19 rows) + +:PREFIX SELECT DISTINCT dev FROM :TABLE ORDER BY dev DESC; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------- + Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev DESC + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Only Scan Backward using _hyper_1_1_chunk_skip_scan_idx_dev_nulls_last on _hyper_1_1_chunk (actual rows=11 loops=1) + Index Cond: (dev < NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Only Scan Backward using _hyper_1_2_chunk_skip_scan_idx_dev_nulls_last on _hyper_1_2_chunk (actual rows=11 loops=1) + Index Cond: (dev < NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Only Scan Backward using _hyper_1_3_chunk_skip_scan_idx_dev_nulls_last on _hyper_1_3_chunk (actual rows=11 loops=1) + Index Cond: (dev < NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Only Scan Backward using _hyper_1_4_chunk_skip_scan_idx_dev_nulls_last on _hyper_1_4_chunk (actual rows=11 loops=1) + Index Cond: (dev < NULL::integer) + Heap Fetches: 11 +(19 rows) + +:PREFIX SELECT DISTINCT ON (dev) dev FROM :TABLE; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------ + Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_idx_dev_nulls_last on _hyper_1_1_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_idx_dev_nulls_last on _hyper_1_2_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_idx_dev_nulls_last on _hyper_1_3_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_idx_dev_nulls_last on _hyper_1_4_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 +(19 rows) + +DROP INDEX skip_scan_idx_dev_nulls_last; +-- NULLS FIRST index on dev +CREATE INDEX skip_scan_idx_dev_nulls_first ON :TABLE(dev NULLS FIRST); +:PREFIX SELECT DISTINCT dev FROM :TABLE ORDER BY dev NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------- + Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev NULLS FIRST + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_idx_dev_nulls_first on _hyper_1_1_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_idx_dev_nulls_first on _hyper_1_2_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_idx_dev_nulls_first on _hyper_1_3_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_idx_dev_nulls_first on _hyper_1_4_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 +(19 rows) + +:PREFIX SELECT DISTINCT ON (dev) dev FROM :TABLE ORDER BY dev NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------- + Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev NULLS FIRST + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_idx_dev_nulls_first on _hyper_1_1_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_idx_dev_nulls_first on _hyper_1_2_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_idx_dev_nulls_first on _hyper_1_3_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_idx_dev_nulls_first on _hyper_1_4_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 +(19 rows) + +DROP INDEX skip_scan_idx_dev_nulls_first; +-- multicolumn index with dev as leading column +CREATE INDEX skip_scan_idx_dev_time_idx ON :TABLE(dev, time); +:PREFIX SELECT DISTINCT dev FROM :TABLE ORDER BY dev; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_idx_dev_time_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_idx_dev_time_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_idx_dev_time_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_idx_dev_time_idx on _hyper_1_4_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 +(19 rows) + +:PREFIX SELECT DISTINCT ON (dev) dev FROM :TABLE; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_idx_dev_time_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_idx_dev_time_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_idx_dev_time_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_idx_dev_time_idx on _hyper_1_4_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 +(19 rows) + +:PREFIX SELECT DISTINCT ON (dev) dev FROM :TABLE ORDER BY dev DESC, time DESC; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------- + Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev DESC, _hyper_1_1_chunk."time" DESC + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Only Scan Backward using _hyper_1_1_chunk_skip_scan_idx_dev_time_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + Index Cond: (dev < NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Only Scan Backward using _hyper_1_2_chunk_skip_scan_idx_dev_time_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + Index Cond: (dev < NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Only Scan Backward using _hyper_1_3_chunk_skip_scan_idx_dev_time_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + Index Cond: (dev < NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Only Scan Backward using _hyper_1_4_chunk_skip_scan_idx_dev_time_idx on _hyper_1_4_chunk (actual rows=11 loops=1) + Index Cond: (dev < NULL::integer) + Heap Fetches: 11 +(19 rows) + +DROP INDEX skip_scan_idx_dev_time_idx; +-- multicolumn index with dev as non-leading column +CREATE INDEX skip_scan_idx_time_dev_idx ON :TABLE(time, dev); +:PREFIX SELECT DISTINCT dev FROM :TABLE WHERE time = 100 ORDER BY dev; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------- + Unique (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_idx_time_dev_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + Index Cond: ("time" = 100) + Heap Fetches: 11 +(4 rows) + +:PREFIX SELECT DISTINCT ON (dev) dev FROM :TABLE WHERE time = 100; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------- + Unique (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_idx_time_dev_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + Index Cond: ("time" = 100) + Heap Fetches: 11 +(4 rows) + +DROP INDEX skip_scan_idx_time_dev_idx; +-- hash index is not ordered so can't use skipscan +CREATE INDEX skip_scan_idx_hash ON :TABLE USING hash(dev_name); +:PREFIX SELECT DISTINCT dev_name FROM :TABLE WHERE dev_name IN ('device_1','device_2') ORDER BY dev_name; + QUERY PLAN +------------------------------------------------------------------------------------------------------------ + Sort (actual rows=2 loops=1) + Sort Key: _hyper_1_1_chunk.dev_name + Sort Method: quicksort + -> HashAggregate (actual rows=2 loops=1) + Group Key: _hyper_1_1_chunk.dev_name + Batches: 1 + -> Append (actual rows=2000 loops=1) + -> Bitmap Heap Scan on _hyper_1_1_chunk (actual rows=500 loops=1) + Recheck Cond: (dev_name = ANY ('{device_1,device_2}'::text[])) + Heap Blocks: exact=4 + -> Bitmap Index Scan on _hyper_1_1_chunk_skip_scan_idx_hash (actual rows=500 loops=1) + Index Cond: (dev_name = ANY ('{device_1,device_2}'::text[])) + -> Bitmap Heap Scan on _hyper_1_2_chunk (actual rows=500 loops=1) + Recheck Cond: (dev_name = ANY ('{device_1,device_2}'::text[])) + Heap Blocks: exact=4 + -> Bitmap Index Scan on _hyper_1_2_chunk_skip_scan_idx_hash (actual rows=500 loops=1) + Index Cond: (dev_name = ANY ('{device_1,device_2}'::text[])) + -> Bitmap Heap Scan on _hyper_1_3_chunk (actual rows=500 loops=1) + Recheck Cond: (dev_name = ANY ('{device_1,device_2}'::text[])) + Heap Blocks: exact=4 + -> Bitmap Index Scan on _hyper_1_3_chunk_skip_scan_idx_hash (actual rows=500 loops=1) + Index Cond: (dev_name = ANY ('{device_1,device_2}'::text[])) + -> Bitmap Heap Scan on _hyper_1_4_chunk (actual rows=500 loops=1) + Recheck Cond: (dev_name = ANY ('{device_1,device_2}'::text[])) + Heap Blocks: exact=4 + -> Bitmap Index Scan on _hyper_1_4_chunk_skip_scan_idx_hash (actual rows=500 loops=1) + Index Cond: (dev_name = ANY ('{device_1,device_2}'::text[])) +(27 rows) + +DROP INDEX skip_scan_idx_hash; +-- expression indexes +-- currently not supported by skipscan +CREATE INDEX skip_scan_expr_idx ON :TABLE((dev % 3)); +:PREFIX SELECT DISTINCT dev%3 FROM :TABLE ORDER BY dev%3; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort (actual rows=4 loops=1) + Sort Key: ((_hyper_1_1_chunk.dev % 3)) + Sort Method: quicksort + -> HashAggregate (actual rows=4 loops=1) + Group Key: (_hyper_1_1_chunk.dev % 3) + Batches: 1 + -> Result (actual rows=10020 loops=1) + -> Append (actual rows=10020 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=2505 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2505 loops=1) + -> Seq Scan on _hyper_1_3_chunk (actual rows=2505 loops=1) + -> Seq Scan on _hyper_1_4_chunk (actual rows=2505 loops=1) +(12 rows) + +:PREFIX SELECT DISTINCT ON (dev%3) dev FROM :TABLE ORDER BY dev%3; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------- + Unique (actual rows=4 loops=1) + -> Merge Append (actual rows=10020 loops=1) + Sort Key: ((_hyper_1_1_chunk.dev % 3)) + -> Index Scan using _hyper_1_1_chunk_skip_scan_expr_idx on _hyper_1_1_chunk (actual rows=2505 loops=1) + -> Index Scan using _hyper_1_2_chunk_skip_scan_expr_idx on _hyper_1_2_chunk (actual rows=2505 loops=1) + -> Index Scan using _hyper_1_3_chunk_skip_scan_expr_idx on _hyper_1_3_chunk (actual rows=2505 loops=1) + -> Index Scan using _hyper_1_4_chunk_skip_scan_expr_idx on _hyper_1_4_chunk (actual rows=2505 loops=1) +(7 rows) + +DROP INDEX skip_scan_expr_idx; +CREATE INDEX ON :TABLE(dev_name); +CREATE INDEX ON :TABLE(dev); +CREATE INDEX ON :TABLE(dev, time); +CREATE INDEX ON :TABLE(time,dev); +CREATE INDEX ON :TABLE(time,dev,val); +\qecho basic DISTINCT queries on :TABLE +basic DISTINCT queries on skip_scan_ht +:PREFIX SELECT DISTINCT dev, 'q1_1' FROM :TABLE ORDER BY dev; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=11 loops=1) + -> Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_dev_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_ht_dev_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_ht_dev_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_ht_dev_idx on _hyper_1_4_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 +(20 rows) + +:PREFIX SELECT DISTINCT dev_name, 'q1_2' FROM :TABLE ORDER BY dev_name; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=11 loops=1) + -> Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev_name + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_dev_name_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_ht_dev_name_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_ht_dev_name_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_ht_dev_name_idx on _hyper_1_4_chunk (actual rows=11 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 11 +(20 rows) + +:PREFIX SELECT DISTINCT dev, 'q1_3', NULL FROM :TABLE ORDER BY dev; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=11 loops=1) + -> Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_dev_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_ht_dev_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_ht_dev_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_ht_dev_idx on _hyper_1_4_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 +(20 rows) + +\qecho stable expression in targetlist on :TABLE +stable expression in targetlist on skip_scan_ht +:PREFIX SELECT DISTINCT dev, 'q1_4', length(md5(now()::text)) FROM :TABLE ORDER BY dev; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=11 loops=1) + -> Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_dev_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_ht_dev_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_ht_dev_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_ht_dev_idx on _hyper_1_4_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 +(20 rows) + +:PREFIX SELECT DISTINCT dev_name, 'q1_5', length(md5(now()::text)) FROM :TABLE ORDER BY dev_name; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=11 loops=1) + -> Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev_name + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_dev_name_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_ht_dev_name_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_ht_dev_name_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_ht_dev_name_idx on _hyper_1_4_chunk (actual rows=11 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 11 +(20 rows) + +-- volatile expression in targetlist +:PREFIX SELECT DISTINCT dev, 'q1_6', length(md5(random()::text)) FROM :TABLE ORDER BY dev; + QUERY PLAN +--------------------------------------------------------------------------------- + Unique (actual rows=11 loops=1) + -> Sort (actual rows=10020 loops=1) + Sort Key: _hyper_1_1_chunk.dev, (length(md5((random())::text))) + Sort Method: quicksort + -> Result (actual rows=10020 loops=1) + -> Append (actual rows=10020 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=2505 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2505 loops=1) + -> Seq Scan on _hyper_1_3_chunk (actual rows=2505 loops=1) + -> Seq Scan on _hyper_1_4_chunk (actual rows=2505 loops=1) +(10 rows) + +:PREFIX SELECT DISTINCT dev_name, 'q1_7', length(md5(random()::text)) FROM :TABLE ORDER BY dev_name; + QUERY PLAN +--------------------------------------------------------------------------------- + Unique (actual rows=11 loops=1) + -> Sort (actual rows=10020 loops=1) + Sort Key: _hyper_1_1_chunk.dev_name, (length(md5((random())::text))) + Sort Method: quicksort + -> Result (actual rows=10020 loops=1) + -> Append (actual rows=10020 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=2505 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2505 loops=1) + -> Seq Scan on _hyper_1_3_chunk (actual rows=2505 loops=1) + -> Seq Scan on _hyper_1_4_chunk (actual rows=2505 loops=1) +(10 rows) + +-- queries without skipscan because distinct is not limited to specific column +:PREFIX SELECT DISTINCT * FROM :TABLE ORDER BY dev; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------- + Sort (actual rows=10020 loops=1) + Sort Key: _hyper_1_1_chunk.dev + Sort Method: quicksort + -> HashAggregate (actual rows=10020 loops=1) + Group Key: _hyper_1_1_chunk.dev, _hyper_1_1_chunk."time", _hyper_1_1_chunk.dev_name, _hyper_1_1_chunk.val + Batches: 1 + -> Append (actual rows=10020 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=2505 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2505 loops=1) + -> Seq Scan on _hyper_1_3_chunk (actual rows=2505 loops=1) + -> Seq Scan on _hyper_1_4_chunk (actual rows=2505 loops=1) +(11 rows) + +:PREFIX SELECT DISTINCT *, 'q1_9' FROM :TABLE ORDER BY dev; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------- + Sort (actual rows=10020 loops=1) + Sort Key: _hyper_1_1_chunk.dev + Sort Method: quicksort + -> HashAggregate (actual rows=10020 loops=1) + Group Key: _hyper_1_1_chunk.dev, _hyper_1_1_chunk."time", _hyper_1_1_chunk.dev_name, _hyper_1_1_chunk.val + Batches: 1 + -> Result (actual rows=10020 loops=1) + -> Append (actual rows=10020 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=2505 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2505 loops=1) + -> Seq Scan on _hyper_1_3_chunk (actual rows=2505 loops=1) + -> Seq Scan on _hyper_1_4_chunk (actual rows=2505 loops=1) +(12 rows) + +:PREFIX SELECT DISTINCT dev, time, 'q1_10' FROM :TABLE ORDER BY dev; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort (actual rows=10020 loops=1) + Sort Key: _hyper_1_1_chunk.dev + Sort Method: quicksort + -> HashAggregate (actual rows=10020 loops=1) + Group Key: _hyper_1_1_chunk.dev, _hyper_1_1_chunk."time" + Batches: 1 + -> Result (actual rows=10020 loops=1) + -> Append (actual rows=10020 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=2505 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2505 loops=1) + -> Seq Scan on _hyper_1_3_chunk (actual rows=2505 loops=1) + -> Seq Scan on _hyper_1_4_chunk (actual rows=2505 loops=1) +(12 rows) + +:PREFIX SELECT DISTINCT dev, NULL, 'q1_11' FROM :TABLE ORDER BY dev; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=11 loops=1) + -> Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_dev_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_ht_dev_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_ht_dev_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_ht_dev_idx on _hyper_1_4_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 +(20 rows) + +-- distinct on expressions not supported +:PREFIX SELECT DISTINCT time_bucket(10,time), 'q1_12' FROM :TABLE; + QUERY PLAN +--------------------------------------------------------------------------- + HashAggregate (actual rows=100 loops=1) + Group Key: time_bucket(10, _hyper_1_1_chunk."time") + Batches: 1 + -> Result (actual rows=10020 loops=1) + -> Append (actual rows=10020 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=2505 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2505 loops=1) + -> Seq Scan on _hyper_1_3_chunk (actual rows=2505 loops=1) + -> Seq Scan on _hyper_1_4_chunk (actual rows=2505 loops=1) +(9 rows) + +:PREFIX SELECT DISTINCT length(dev_name), 'q1_13' FROM :TABLE; + QUERY PLAN +--------------------------------------------------------------------------- + HashAggregate (actual rows=3 loops=1) + Group Key: length(_hyper_1_1_chunk.dev_name) + Batches: 1 + -> Result (actual rows=10020 loops=1) + -> Append (actual rows=10020 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=2505 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2505 loops=1) + -> Seq Scan on _hyper_1_3_chunk (actual rows=2505 loops=1) + -> Seq Scan on _hyper_1_4_chunk (actual rows=2505 loops=1) +(9 rows) + +:PREFIX SELECT DISTINCT 3*time, 'q1_14' FROM :TABLE; + QUERY PLAN +--------------------------------------------------------------------------- + HashAggregate (actual rows=1000 loops=1) + Group Key: (3 * _hyper_1_1_chunk."time") + Batches: 1 + -> Result (actual rows=10020 loops=1) + -> Append (actual rows=10020 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=2505 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2505 loops=1) + -> Seq Scan on _hyper_1_3_chunk (actual rows=2505 loops=1) + -> Seq Scan on _hyper_1_4_chunk (actual rows=2505 loops=1) +(9 rows) + +:PREFIX SELECT DISTINCT 'Device ' || dev_name FROM :TABLE; + QUERY PLAN +--------------------------------------------------------------------------- + HashAggregate (actual rows=11 loops=1) + Group Key: ('Device '::text || _hyper_1_1_chunk.dev_name) + Batches: 1 + -> Result (actual rows=10020 loops=1) + -> Append (actual rows=10020 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=2505 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2505 loops=1) + -> Seq Scan on _hyper_1_3_chunk (actual rows=2505 loops=1) + -> Seq Scan on _hyper_1_4_chunk (actual rows=2505 loops=1) +(9 rows) + +-- DISTINCT ON queries +:PREFIX SELECT DISTINCT ON (dev) dev FROM :TABLE; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------- + Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_dev_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_ht_dev_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_ht_dev_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_ht_dev_idx on _hyper_1_4_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 +(19 rows) + +:PREFIX SELECT DISTINCT ON (dev) dev, 'q2_2' FROM :TABLE; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=11 loops=1) + -> Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_dev_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_ht_dev_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_ht_dev_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_ht_dev_idx on _hyper_1_4_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 +(20 rows) + +:PREFIX SELECT DISTINCT ON (dev) dev, 'q2_3', NULL FROM :TABLE; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=11 loops=1) + -> Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_dev_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_ht_dev_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_ht_dev_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_ht_dev_idx on _hyper_1_4_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 +(20 rows) + +:PREFIX SELECT DISTINCT ON (dev) dev, 'q2_4', length(md5(now()::text)) FROM :TABLE; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=11 loops=1) + -> Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_dev_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_ht_dev_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_ht_dev_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_ht_dev_idx on _hyper_1_4_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 +(20 rows) + +:PREFIX SELECT DISTINCT ON (dev) dev, 'q2_5', length(md5(random()::text)) FROM :TABLE; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=11 loops=1) + -> Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_dev_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_ht_dev_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_ht_dev_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_ht_dev_idx on _hyper_1_4_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 +(20 rows) + +:PREFIX SELECT DISTINCT ON (dev) * FROM :TABLE; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------- + Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_1_chunk_skip_scan_ht_dev_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_2_chunk_skip_scan_ht_dev_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_3_chunk_skip_scan_ht_dev_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_4_chunk_skip_scan_ht_dev_idx on _hyper_1_4_chunk (actual rows=11 loops=1) +(11 rows) + +:PREFIX SELECT DISTINCT ON (dev) *, 'q2_7' FROM :TABLE; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=11 loops=1) + -> Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_1_chunk_skip_scan_ht_dev_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_2_chunk_skip_scan_ht_dev_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_3_chunk_skip_scan_ht_dev_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_4_chunk_skip_scan_ht_dev_idx on _hyper_1_4_chunk (actual rows=11 loops=1) +(12 rows) + +:PREFIX SELECT DISTINCT ON (dev) dev, time, 'q2_8' FROM :TABLE; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=11 loops=1) + -> Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_1_chunk_skip_scan_ht_dev_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_2_chunk_skip_scan_ht_dev_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_3_chunk_skip_scan_ht_dev_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_4_chunk_skip_scan_ht_dev_idx on _hyper_1_4_chunk (actual rows=11 loops=1) +(12 rows) + +:PREFIX SELECT DISTINCT ON (dev) dev, NULL, 'q2_9' FROM :TABLE; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=11 loops=1) + -> Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_dev_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_ht_dev_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_ht_dev_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_ht_dev_idx on _hyper_1_4_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 +(20 rows) + +:PREFIX SELECT DISTINCT ON (dev) time, 'q2_10' FROM :TABLE ORDER by dev, time; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=11 loops=1) + -> Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev, _hyper_1_1_chunk."time" + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_dev_time_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_ht_dev_time_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_ht_dev_time_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_ht_dev_time_idx on _hyper_1_4_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 +(20 rows) + +:PREFIX SELECT DISTINCT ON (dev) dev, tableoid::regclass, 'q2_11' FROM :TABLE; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=11 loops=1) + -> Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_1_chunk_skip_scan_ht_dev_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_2_chunk_skip_scan_ht_dev_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_3_chunk_skip_scan_ht_dev_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_4_chunk_skip_scan_ht_dev_idx on _hyper_1_4_chunk (actual rows=11 loops=1) +(12 rows) + +:PREFIX SELECT DISTINCT ON (dev) dev, int_func_immutable(), 'q2_12' FROM :TABLE; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=11 loops=1) + -> Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_dev_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_ht_dev_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_ht_dev_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_ht_dev_idx on _hyper_1_4_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 +(20 rows) + +:PREFIX SELECT DISTINCT ON (dev) dev, int_func_stable(), 'q2_13' FROM :TABLE; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=11 loops=1) + -> Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_dev_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_ht_dev_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_ht_dev_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_ht_dev_idx on _hyper_1_4_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 +(20 rows) + +:PREFIX SELECT DISTINCT ON (dev) dev, int_func_volatile(), 'q2_14' FROM :TABLE; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=11 loops=1) + -> Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_dev_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_ht_dev_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_ht_dev_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_ht_dev_idx on _hyper_1_4_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 +(20 rows) + +-- DISTINCT ON queries on TEXT column +:PREFIX SELECT DISTINCT ON (dev_name) dev_name FROM :TABLE; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------- + Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev_name + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_dev_name_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_ht_dev_name_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_ht_dev_name_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_ht_dev_name_idx on _hyper_1_4_chunk (actual rows=11 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 11 +(19 rows) + +:PREFIX SELECT DISTINCT ON (dev_name) dev_name, 'q3_2' FROM :TABLE; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=11 loops=1) + -> Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev_name + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_dev_name_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_ht_dev_name_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_ht_dev_name_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_ht_dev_name_idx on _hyper_1_4_chunk (actual rows=11 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 11 +(20 rows) + +:PREFIX SELECT DISTINCT ON (dev_name) dev_name, 'q3_3', NULL FROM :TABLE; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=11 loops=1) + -> Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev_name + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_dev_name_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_ht_dev_name_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_ht_dev_name_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_ht_dev_name_idx on _hyper_1_4_chunk (actual rows=11 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 11 +(20 rows) + +:PREFIX SELECT DISTINCT ON (dev_name) dev_name, 'q3_4', length(md5(now()::text)) FROM :TABLE; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=11 loops=1) + -> Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev_name + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_dev_name_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_ht_dev_name_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_ht_dev_name_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_ht_dev_name_idx on _hyper_1_4_chunk (actual rows=11 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 11 +(20 rows) + +:PREFIX SELECT DISTINCT ON (dev_name) dev_name, 'q3_5', length(md5(random()::text)) FROM :TABLE; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=11 loops=1) + -> Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev_name + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_dev_name_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_ht_dev_name_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_ht_dev_name_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_ht_dev_name_idx on _hyper_1_4_chunk (actual rows=11 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 11 +(20 rows) + +:PREFIX SELECT DISTINCT ON (dev_name) * FROM :TABLE; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------- + Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev_name + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_1_chunk_skip_scan_ht_dev_name_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_2_chunk_skip_scan_ht_dev_name_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_3_chunk_skip_scan_ht_dev_name_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_4_chunk_skip_scan_ht_dev_name_idx on _hyper_1_4_chunk (actual rows=11 loops=1) +(11 rows) + +:PREFIX SELECT DISTINCT ON (dev_name) *, 'q3_7' FROM :TABLE; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=11 loops=1) + -> Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev_name + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_1_chunk_skip_scan_ht_dev_name_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_2_chunk_skip_scan_ht_dev_name_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_3_chunk_skip_scan_ht_dev_name_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_4_chunk_skip_scan_ht_dev_name_idx on _hyper_1_4_chunk (actual rows=11 loops=1) +(12 rows) + +:PREFIX SELECT DISTINCT ON (dev_name) dev_name, time, 'q3_8' FROM :TABLE; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=11 loops=1) + -> Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev_name + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_1_chunk_skip_scan_ht_dev_name_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_2_chunk_skip_scan_ht_dev_name_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_3_chunk_skip_scan_ht_dev_name_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_4_chunk_skip_scan_ht_dev_name_idx on _hyper_1_4_chunk (actual rows=11 loops=1) +(12 rows) + +:PREFIX SELECT DISTINCT ON (dev_name) dev_name, NULL, 'q3_9' FROM :TABLE; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=11 loops=1) + -> Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev_name + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_dev_name_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_ht_dev_name_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_ht_dev_name_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_ht_dev_name_idx on _hyper_1_4_chunk (actual rows=11 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 11 +(20 rows) + +:PREFIX SELECT DISTINCT ON (dev_name) time, 'q3_10' FROM :TABLE ORDER by dev_name, time; + QUERY PLAN +--------------------------------------------------------------------------------- + Unique (actual rows=11 loops=1) + -> Sort (actual rows=10020 loops=1) + Sort Key: _hyper_1_1_chunk.dev_name, _hyper_1_1_chunk."time" + Sort Method: quicksort + -> Result (actual rows=10020 loops=1) + -> Append (actual rows=10020 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=2505 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2505 loops=1) + -> Seq Scan on _hyper_1_3_chunk (actual rows=2505 loops=1) + -> Seq Scan on _hyper_1_4_chunk (actual rows=2505 loops=1) +(10 rows) + +:PREFIX SELECT DISTINCT ON (dev_name) dev_name, tableoid::regclass, 'q3_11' FROM :TABLE; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=11 loops=1) + -> Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev_name + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_1_chunk_skip_scan_ht_dev_name_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_2_chunk_skip_scan_ht_dev_name_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_3_chunk_skip_scan_ht_dev_name_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_4_chunk_skip_scan_ht_dev_name_idx on _hyper_1_4_chunk (actual rows=11 loops=1) +(12 rows) + +:PREFIX SELECT DISTINCT ON (dev_name::varchar) dev_name::varchar FROM :TABLE; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=11 loops=1) + -> Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev_name + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_dev_name_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_ht_dev_name_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_ht_dev_name_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_ht_dev_name_idx on _hyper_1_4_chunk (actual rows=11 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 11 +(20 rows) + +:PREFIX SELECT DISTINCT ON (dev_name) dev, int_func_immutable(), 'q3_13' FROM :TABLE; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=11 loops=1) + -> Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev_name + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_1_chunk_skip_scan_ht_dev_name_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_2_chunk_skip_scan_ht_dev_name_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_3_chunk_skip_scan_ht_dev_name_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_4_chunk_skip_scan_ht_dev_name_idx on _hyper_1_4_chunk (actual rows=11 loops=1) +(12 rows) + +:PREFIX SELECT DISTINCT ON (dev_name) dev, int_func_stable(), 'q3_14' FROM :TABLE; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=11 loops=1) + -> Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev_name + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_1_chunk_skip_scan_ht_dev_name_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_2_chunk_skip_scan_ht_dev_name_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_3_chunk_skip_scan_ht_dev_name_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_4_chunk_skip_scan_ht_dev_name_idx on _hyper_1_4_chunk (actual rows=11 loops=1) +(12 rows) + +:PREFIX SELECT DISTINCT ON (dev_name) dev, int_func_volatile(), 'q3_15' FROM :TABLE; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=11 loops=1) + -> Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev_name + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_1_chunk_skip_scan_ht_dev_name_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_2_chunk_skip_scan_ht_dev_name_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_3_chunk_skip_scan_ht_dev_name_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_4_chunk_skip_scan_ht_dev_name_idx on _hyper_1_4_chunk (actual rows=11 loops=1) +(12 rows) + +\qecho DISTINCT with wholerow var +DISTINCT with wholerow var +:PREFIX SELECT DISTINCT ON (dev) :TABLE FROM :TABLE; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------- + Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_1_chunk_skip_scan_ht_dev_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_2_chunk_skip_scan_ht_dev_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_3_chunk_skip_scan_ht_dev_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_4_chunk_skip_scan_ht_dev_idx on _hyper_1_4_chunk (actual rows=11 loops=1) +(11 rows) + +-- should not use SkipScan since we only support SkipScan on single-column distinct +:PREFIX SELECT DISTINCT :TABLE FROM :TABLE; + QUERY PLAN +--------------------------------------------------------------------- + HashAggregate (actual rows=10020 loops=1) + Group Key: ((skip_scan_ht.*)::skip_scan_ht) + Batches: 1 + -> Append (actual rows=10020 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=2505 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2505 loops=1) + -> Seq Scan on _hyper_1_3_chunk (actual rows=2505 loops=1) + -> Seq Scan on _hyper_1_4_chunk (actual rows=2505 loops=1) +(8 rows) + +\qecho LIMIT queries on :TABLE +LIMIT queries on skip_scan_ht +:PREFIX SELECT DISTINCT ON (dev) dev FROM :TABLE LIMIT 3; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=3 loops=1) + -> Unique (actual rows=3 loops=1) + -> Merge Append (actual rows=9 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=3 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_dev_idx on _hyper_1_1_chunk (actual rows=3 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 3 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=3 loops=1) + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_ht_dev_idx on _hyper_1_2_chunk (actual rows=3 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 3 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=3 loops=1) + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_ht_dev_idx on _hyper_1_3_chunk (actual rows=3 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 3 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=3 loops=1) + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_ht_dev_idx on _hyper_1_4_chunk (actual rows=3 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 3 +(20 rows) + +:PREFIX SELECT DISTINCT ON (dev) dev FROM :TABLE ORDER BY dev DESC, time DESC LIMIT 3; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=3 loops=1) + -> Unique (actual rows=3 loops=1) + -> Merge Append (actual rows=9 loops=1) + Sort Key: _hyper_1_1_chunk.dev DESC, _hyper_1_1_chunk."time" DESC + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=3 loops=1) + -> Index Only Scan Backward using _hyper_1_1_chunk_skip_scan_ht_dev_time_idx on _hyper_1_1_chunk (actual rows=3 loops=1) + Index Cond: (dev < NULL::integer) + Heap Fetches: 3 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=3 loops=1) + -> Index Only Scan Backward using _hyper_1_2_chunk_skip_scan_ht_dev_time_idx on _hyper_1_2_chunk (actual rows=3 loops=1) + Index Cond: (dev < NULL::integer) + Heap Fetches: 3 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=3 loops=1) + -> Index Only Scan Backward using _hyper_1_3_chunk_skip_scan_ht_dev_time_idx on _hyper_1_3_chunk (actual rows=3 loops=1) + Index Cond: (dev < NULL::integer) + Heap Fetches: 3 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=3 loops=1) + -> Index Only Scan Backward using _hyper_1_4_chunk_skip_scan_ht_dev_time_idx on _hyper_1_4_chunk (actual rows=3 loops=1) + Index Cond: (dev < NULL::integer) + Heap Fetches: 3 +(20 rows) + +:PREFIX SELECT DISTINCT ON (dev) dev FROM :TABLE ORDER BY dev, time LIMIT 3; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=3 loops=1) + -> Unique (actual rows=3 loops=1) + -> Merge Append (actual rows=9 loops=1) + Sort Key: _hyper_1_1_chunk.dev, _hyper_1_1_chunk."time" + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=3 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_dev_time_idx on _hyper_1_1_chunk (actual rows=3 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 3 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=3 loops=1) + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_ht_dev_time_idx on _hyper_1_2_chunk (actual rows=3 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 3 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=3 loops=1) + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_ht_dev_time_idx on _hyper_1_3_chunk (actual rows=3 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 3 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=3 loops=1) + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_ht_dev_time_idx on _hyper_1_4_chunk (actual rows=3 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 3 +(20 rows) + +\qecho range queries on :TABLE +range queries on skip_scan_ht +:PREFIX SELECT DISTINCT ON (dev) dev FROM :TABLE WHERE time BETWEEN 100 AND 300; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------- + Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=22 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_dev_time_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + Index Cond: ((dev > NULL::integer) AND ("time" >= 100) AND ("time" <= 300)) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_ht_dev_time_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + Index Cond: ((dev > NULL::integer) AND ("time" >= 100) AND ("time" <= 300)) + Heap Fetches: 11 +(11 rows) + +:PREFIX SELECT DISTINCT ON (dev) dev FROM :TABLE WHERE time < 200; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------- + Unique (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_dev_time_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + Index Cond: ((dev > NULL::integer) AND ("time" < 200)) + Heap Fetches: 11 +(5 rows) + +:PREFIX SELECT DISTINCT ON (dev) dev FROM :TABLE WHERE time > 800; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------- + Unique (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_ht_dev_time_idx on _hyper_1_4_chunk (actual rows=11 loops=1) + Index Cond: ((dev > NULL::integer) AND ("time" > 800)) + Heap Fetches: 11 +(5 rows) + +\qecho ordered append on :TABLE +ordered append on skip_scan_ht +:PREFIX SELECT * FROM :TABLE ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------ + Custom Scan (ChunkAppend) on skip_scan_ht (actual rows=10020 loops=1) + Order: skip_scan_ht."time" + -> Index Scan using _hyper_1_1_chunk_skip_scan_ht_time_dev_idx on _hyper_1_1_chunk (actual rows=2505 loops=1) + -> Index Scan using _hyper_1_2_chunk_skip_scan_ht_time_dev_idx on _hyper_1_2_chunk (actual rows=2505 loops=1) + -> Index Scan using _hyper_1_3_chunk_skip_scan_ht_time_dev_idx on _hyper_1_3_chunk (actual rows=2505 loops=1) + -> Index Scan using _hyper_1_4_chunk_skip_scan_ht_time_dev_idx on _hyper_1_4_chunk (actual rows=2505 loops=1) +(6 rows) + +:PREFIX SELECT DISTINCT ON (time) time FROM :TABLE WHERE time BETWEEN 0 AND 5000; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Unique (actual rows=1000 loops=1) + -> Merge Append (actual rows=1000 loops=1) + Sort Key: _hyper_1_1_chunk."time" + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=250 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_time_dev_idx on _hyper_1_1_chunk (actual rows=250 loops=1) + Index Cond: (("time" > NULL::integer) AND ("time" >= 0) AND ("time" <= 5000)) + Heap Fetches: 250 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=250 loops=1) + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_ht_time_dev_idx on _hyper_1_2_chunk (actual rows=250 loops=1) + Index Cond: (("time" > NULL::integer) AND ("time" >= 0) AND ("time" <= 5000)) + Heap Fetches: 250 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=250 loops=1) + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_ht_time_dev_idx on _hyper_1_3_chunk (actual rows=250 loops=1) + Index Cond: (("time" > NULL::integer) AND ("time" >= 0) AND ("time" <= 5000)) + Heap Fetches: 250 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=250 loops=1) + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_ht_time_dev_idx on _hyper_1_4_chunk (actual rows=250 loops=1) + Index Cond: (("time" > NULL::integer) AND ("time" >= 0) AND ("time" <= 5000)) + Heap Fetches: 250 +(19 rows) + +\qecho SUBSELECTS on :TABLE +SUBSELECTS on skip_scan_ht +:PREFIX SELECT time, dev, val, 'q4_1' FROM (SELECT DISTINCT ON (dev) * FROM :TABLE) a; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------- + Subquery Scan on a (actual rows=11 loops=1) + -> Result (actual rows=11 loops=1) + -> Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_1_chunk_skip_scan_ht_dev_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_2_chunk_skip_scan_ht_dev_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_3_chunk_skip_scan_ht_dev_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_4_chunk_skip_scan_ht_dev_idx on _hyper_1_4_chunk (actual rows=11 loops=1) +(13 rows) + +:PREFIX SELECT NULL, dev, NULL, 'q4_3' FROM (SELECT DISTINCT ON (dev) dev FROM :TABLE) a; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Subquery Scan on a (actual rows=11 loops=1) + -> Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_dev_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_ht_dev_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_ht_dev_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_ht_dev_idx on _hyper_1_4_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 +(20 rows) + +:PREFIX SELECT time, dev, NULL, 'q4_4' FROM (SELECT DISTINCT ON (dev) dev, time FROM :TABLE) a; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------- + Subquery Scan on a (actual rows=11 loops=1) + -> Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_1_chunk_skip_scan_ht_dev_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_2_chunk_skip_scan_ht_dev_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_3_chunk_skip_scan_ht_dev_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_4_chunk_skip_scan_ht_dev_idx on _hyper_1_4_chunk (actual rows=11 loops=1) +(12 rows) + +\qecho ORDER BY +ORDER BY +:PREFIX SELECT time, dev, val, 'q5_1' FROM (SELECT DISTINCT ON (dev) * FROM :TABLE ORDER BY dev, time) a; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------- + Subquery Scan on a (actual rows=11 loops=1) + -> Result (actual rows=11 loops=1) + -> Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev, _hyper_1_1_chunk."time" + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_1_chunk_skip_scan_ht_dev_time_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_2_chunk_skip_scan_ht_dev_time_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_3_chunk_skip_scan_ht_dev_time_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_4_chunk_skip_scan_ht_dev_time_idx on _hyper_1_4_chunk (actual rows=11 loops=1) +(13 rows) + +:PREFIX SELECT time, dev, val, 'q5_2' FROM (SELECT DISTINCT ON (dev) * FROM :TABLE ORDER BY dev DESC, time DESC) a; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------- + Subquery Scan on a (actual rows=11 loops=1) + -> Result (actual rows=11 loops=1) + -> Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev DESC, _hyper_1_1_chunk."time" DESC + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Scan Backward using _hyper_1_1_chunk_skip_scan_ht_dev_time_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Scan Backward using _hyper_1_2_chunk_skip_scan_ht_dev_time_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Scan Backward using _hyper_1_3_chunk_skip_scan_ht_dev_time_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Scan Backward using _hyper_1_4_chunk_skip_scan_ht_dev_time_idx on _hyper_1_4_chunk (actual rows=11 loops=1) +(13 rows) + +\qecho WHERE CLAUSES +WHERE CLAUSES +:PREFIX SELECT time, dev, val, 'q6_1' FROM (SELECT DISTINCT ON (dev) * FROM :TABLE WHERE dev > 5) a; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Subquery Scan on a (actual rows=5 loops=1) + -> Result (actual rows=5 loops=1) + -> Unique (actual rows=5 loops=1) + -> Merge Append (actual rows=20 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=5 loops=1) + -> Index Scan using _hyper_1_1_chunk_skip_scan_ht_dev_idx on _hyper_1_1_chunk (actual rows=5 loops=1) + Index Cond: (dev > 5) + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=5 loops=1) + -> Index Scan using _hyper_1_2_chunk_skip_scan_ht_dev_idx on _hyper_1_2_chunk (actual rows=5 loops=1) + Index Cond: (dev > 5) + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=5 loops=1) + -> Index Scan using _hyper_1_3_chunk_skip_scan_ht_dev_idx on _hyper_1_3_chunk (actual rows=5 loops=1) + Index Cond: (dev > 5) + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=5 loops=1) + -> Index Scan using _hyper_1_4_chunk_skip_scan_ht_dev_idx on _hyper_1_4_chunk (actual rows=5 loops=1) + Index Cond: (dev > 5) +(17 rows) + +:PREFIX SELECT time, dev, val, 'q6_2' FROM (SELECT DISTINCT ON (dev) * FROM :TABLE WHERE time > 5) a; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------- + Subquery Scan on a (actual rows=11 loops=1) + -> Result (actual rows=11 loops=1) + -> Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_1_chunk_skip_scan_ht_dev_time_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + Index Cond: ("time" > 5) + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_2_chunk_skip_scan_ht_dev_time_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + Index Cond: ("time" > 5) + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_3_chunk_skip_scan_ht_dev_time_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + Index Cond: ("time" > 5) + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_4_chunk_skip_scan_ht_dev_time_idx on _hyper_1_4_chunk (actual rows=11 loops=1) + Index Cond: ("time" > 5) +(17 rows) + +:PREFIX SELECT time, dev, val, 'q6_3' FROM (SELECT DISTINCT ON (dev) * FROM :TABLE) a WHERE dev > 5; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Subquery Scan on a (actual rows=5 loops=1) + -> Result (actual rows=5 loops=1) + -> Unique (actual rows=5 loops=1) + -> Merge Append (actual rows=20 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=5 loops=1) + -> Index Scan using _hyper_1_1_chunk_skip_scan_ht_dev_idx on _hyper_1_1_chunk (actual rows=5 loops=1) + Index Cond: (dev > 5) + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=5 loops=1) + -> Index Scan using _hyper_1_2_chunk_skip_scan_ht_dev_idx on _hyper_1_2_chunk (actual rows=5 loops=1) + Index Cond: (dev > 5) + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=5 loops=1) + -> Index Scan using _hyper_1_3_chunk_skip_scan_ht_dev_idx on _hyper_1_3_chunk (actual rows=5 loops=1) + Index Cond: (dev > 5) + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=5 loops=1) + -> Index Scan using _hyper_1_4_chunk_skip_scan_ht_dev_idx on _hyper_1_4_chunk (actual rows=5 loops=1) + Index Cond: (dev > 5) +(17 rows) + +:PREFIX SELECT time, dev, val, 'q6_4' FROM (SELECT DISTINCT ON (dev) * FROM :TABLE) a WHERE time > 5; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------- + Subquery Scan on a (actual rows=7 loops=1) + Filter: (a."time" > 5) + Rows Removed by Filter: 4 + -> Result (actual rows=11 loops=1) + -> Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_1_chunk_skip_scan_ht_dev_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_2_chunk_skip_scan_ht_dev_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_3_chunk_skip_scan_ht_dev_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_4_chunk_skip_scan_ht_dev_idx on _hyper_1_4_chunk (actual rows=11 loops=1) +(15 rows) + +--\qecho immutable func in WHERE clause on :TABLE +:PREFIX SELECT DISTINCT ON (dev) *, 'q6_5' FROM :TABLE WHERE dev > int_func_immutable(); + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=9 loops=1) + -> Unique (actual rows=9 loops=1) + -> Merge Append (actual rows=36 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=9 loops=1) + -> Index Scan using _hyper_1_1_chunk_skip_scan_ht_dev_idx on _hyper_1_1_chunk (actual rows=9 loops=1) + Index Cond: (dev > 1) + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=9 loops=1) + -> Index Scan using _hyper_1_2_chunk_skip_scan_ht_dev_idx on _hyper_1_2_chunk (actual rows=9 loops=1) + Index Cond: (dev > 1) + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=9 loops=1) + -> Index Scan using _hyper_1_3_chunk_skip_scan_ht_dev_idx on _hyper_1_3_chunk (actual rows=9 loops=1) + Index Cond: (dev > 1) + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=9 loops=1) + -> Index Scan using _hyper_1_4_chunk_skip_scan_ht_dev_idx on _hyper_1_4_chunk (actual rows=9 loops=1) + Index Cond: (dev > 1) +(16 rows) + +--\qecho stable func in WHERE clause on :TABLE +:PREFIX SELECT DISTINCT ON (dev) *, 'q6_6' FROM :TABLE WHERE dev > int_func_stable(); + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=8 loops=1) + -> Unique (actual rows=8 loops=1) + -> Custom Scan (ConstraintAwareAppend) (actual rows=32 loops=1) + Hypertable: skip_scan_ht + Chunks excluded during startup: 0 + -> Merge Append (actual rows=32 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=8 loops=1) + -> Index Scan using _hyper_1_1_chunk_skip_scan_ht_dev_idx on _hyper_1_1_chunk (actual rows=8 loops=1) + Index Cond: (dev > int_func_stable()) + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=8 loops=1) + -> Index Scan using _hyper_1_2_chunk_skip_scan_ht_dev_idx on _hyper_1_2_chunk (actual rows=8 loops=1) + Index Cond: (dev > int_func_stable()) + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=8 loops=1) + -> Index Scan using _hyper_1_3_chunk_skip_scan_ht_dev_idx on _hyper_1_3_chunk (actual rows=8 loops=1) + Index Cond: (dev > int_func_stable()) + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=8 loops=1) + -> Index Scan using _hyper_1_4_chunk_skip_scan_ht_dev_idx on _hyper_1_4_chunk (actual rows=8 loops=1) + Index Cond: (dev > int_func_stable()) +(19 rows) + +--\qecho volatile func in WHERE clause on :TABLE +:PREFIX SELECT DISTINCT ON (dev) * FROM :TABLE WHERE dev > int_func_volatile(); + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------- + Unique (actual rows=7 loops=1) + -> Custom Scan (ConstraintAwareAppend) (actual rows=28 loops=1) + Hypertable: skip_scan_ht + Chunks excluded during startup: 0 + -> Merge Append (actual rows=28 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=7 loops=1) + -> Index Scan using _hyper_1_1_chunk_skip_scan_ht_dev_idx on _hyper_1_1_chunk (actual rows=7 loops=1) + Filter: (dev > int_func_volatile()) + Rows Removed by Filter: 755 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=7 loops=1) + -> Index Scan using _hyper_1_2_chunk_skip_scan_ht_dev_idx on _hyper_1_2_chunk (actual rows=7 loops=1) + Filter: (dev > int_func_volatile()) + Rows Removed by Filter: 755 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=7 loops=1) + -> Index Scan using _hyper_1_3_chunk_skip_scan_ht_dev_idx on _hyper_1_3_chunk (actual rows=7 loops=1) + Filter: (dev > int_func_volatile()) + Rows Removed by Filter: 755 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=7 loops=1) + -> Index Scan using _hyper_1_4_chunk_skip_scan_ht_dev_idx on _hyper_1_4_chunk (actual rows=7 loops=1) + Filter: (dev > int_func_volatile()) + Rows Removed by Filter: 755 +(22 rows) + +:PREFIX SELECT DISTINCT ON (dev) * FROM :TABLE WHERE dev = ANY(inta_func_immutable()); + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------- + Unique (actual rows=3 loops=1) + -> Merge Append (actual rows=12 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=3 loops=1) + -> Index Scan using _hyper_1_1_chunk_skip_scan_ht_dev_idx on _hyper_1_1_chunk (actual rows=3 loops=1) + Index Cond: (dev = ANY ('{1,2,3}'::integer[])) + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=3 loops=1) + -> Index Scan using _hyper_1_2_chunk_skip_scan_ht_dev_idx on _hyper_1_2_chunk (actual rows=3 loops=1) + Index Cond: (dev = ANY ('{1,2,3}'::integer[])) + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=3 loops=1) + -> Index Scan using _hyper_1_3_chunk_skip_scan_ht_dev_idx on _hyper_1_3_chunk (actual rows=3 loops=1) + Index Cond: (dev = ANY ('{1,2,3}'::integer[])) + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=3 loops=1) + -> Index Scan using _hyper_1_4_chunk_skip_scan_ht_dev_idx on _hyper_1_4_chunk (actual rows=3 loops=1) + Index Cond: (dev = ANY ('{1,2,3}'::integer[])) +(15 rows) + +:PREFIX SELECT DISTINCT ON (dev) * FROM :TABLE WHERE dev = ANY(inta_func_stable()); + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------- + Unique (actual rows=3 loops=1) + -> Custom Scan (ConstraintAwareAppend) (actual rows=12 loops=1) + Hypertable: skip_scan_ht + Chunks excluded during startup: 0 + -> Merge Append (actual rows=12 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=3 loops=1) + -> Index Scan using _hyper_1_1_chunk_skip_scan_ht_dev_idx on _hyper_1_1_chunk (actual rows=3 loops=1) + Index Cond: (dev = ANY (inta_func_stable())) + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=3 loops=1) + -> Index Scan using _hyper_1_2_chunk_skip_scan_ht_dev_idx on _hyper_1_2_chunk (actual rows=3 loops=1) + Index Cond: (dev = ANY (inta_func_stable())) + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=3 loops=1) + -> Index Scan using _hyper_1_3_chunk_skip_scan_ht_dev_idx on _hyper_1_3_chunk (actual rows=3 loops=1) + Index Cond: (dev = ANY (inta_func_stable())) + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=3 loops=1) + -> Index Scan using _hyper_1_4_chunk_skip_scan_ht_dev_idx on _hyper_1_4_chunk (actual rows=3 loops=1) + Index Cond: (dev = ANY (inta_func_stable())) +(18 rows) + +:PREFIX SELECT DISTINCT ON (dev) * FROM :TABLE WHERE dev = ANY(inta_func_volatile()); + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------- + Unique (actual rows=3 loops=1) + -> Custom Scan (ConstraintAwareAppend) (actual rows=12 loops=1) + Hypertable: skip_scan_ht + Chunks excluded during startup: 0 + -> Merge Append (actual rows=12 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=3 loops=1) + -> Index Scan using _hyper_1_1_chunk_skip_scan_ht_dev_idx on _hyper_1_1_chunk (actual rows=3 loops=1) + Filter: (dev = ANY (inta_func_volatile())) + Rows Removed by Filter: 1755 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=3 loops=1) + -> Index Scan using _hyper_1_2_chunk_skip_scan_ht_dev_idx on _hyper_1_2_chunk (actual rows=3 loops=1) + Filter: (dev = ANY (inta_func_volatile())) + Rows Removed by Filter: 1755 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=3 loops=1) + -> Index Scan using _hyper_1_3_chunk_skip_scan_ht_dev_idx on _hyper_1_3_chunk (actual rows=3 loops=1) + Filter: (dev = ANY (inta_func_volatile())) + Rows Removed by Filter: 1755 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=3 loops=1) + -> Index Scan using _hyper_1_4_chunk_skip_scan_ht_dev_idx on _hyper_1_4_chunk (actual rows=3 loops=1) + Filter: (dev = ANY (inta_func_volatile())) + Rows Removed by Filter: 1755 +(22 rows) + +-- RowCompareExpr +:PREFIX SELECT DISTINCT ON (dev) * FROM :TABLE WHERE (dev, time) > (5,100); + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------- + Unique (actual rows=6 loops=1) + -> Merge Append (actual rows=24 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=6 loops=1) + -> Index Scan using _hyper_1_1_chunk_skip_scan_ht_dev_time_idx on _hyper_1_1_chunk (actual rows=6 loops=1) + Index Cond: (ROW(dev, "time") > ROW(5, 100)) + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=6 loops=1) + -> Index Scan using _hyper_1_2_chunk_skip_scan_ht_dev_time_idx on _hyper_1_2_chunk (actual rows=6 loops=1) + Index Cond: (ROW(dev, "time") > ROW(5, 100)) + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=6 loops=1) + -> Index Scan using _hyper_1_3_chunk_skip_scan_ht_dev_time_idx on _hyper_1_3_chunk (actual rows=6 loops=1) + Index Cond: (ROW(dev, "time") > ROW(5, 100)) + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=6 loops=1) + -> Index Scan using _hyper_1_4_chunk_skip_scan_ht_dev_time_idx on _hyper_1_4_chunk (actual rows=6 loops=1) + Index Cond: (ROW(dev, "time") > ROW(5, 100)) +(15 rows) + +-- always false expr similar to our initial skip qual +:PREFIX SELECT DISTINCT ON (dev) * FROM :TABLE WHERE dev > NULL; + QUERY PLAN +---------------------------------------------- + Unique (actual rows=0 loops=1) + -> Sort (actual rows=0 loops=1) + Sort Key: dev + Sort Method: quicksort + -> Result (actual rows=0 loops=1) + One-Time Filter: false +(6 rows) + +-- no tuples matching +:PREFIX SELECT DISTINCT ON (dev) * FROM :TABLE WHERE dev > 20; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------- + Unique (actual rows=0 loops=1) + -> Merge Append (actual rows=0 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=0 loops=1) + -> Index Scan using _hyper_1_1_chunk_skip_scan_ht_dev_idx on _hyper_1_1_chunk (actual rows=0 loops=1) + Index Cond: (dev > 20) + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=0 loops=1) + -> Index Scan using _hyper_1_2_chunk_skip_scan_ht_dev_idx on _hyper_1_2_chunk (actual rows=0 loops=1) + Index Cond: (dev > 20) + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=0 loops=1) + -> Index Scan using _hyper_1_3_chunk_skip_scan_ht_dev_idx on _hyper_1_3_chunk (actual rows=0 loops=1) + Index Cond: (dev > 20) + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=0 loops=1) + -> Index Scan using _hyper_1_4_chunk_skip_scan_ht_dev_idx on _hyper_1_4_chunk (actual rows=0 loops=1) + Index Cond: (dev > 20) +(15 rows) + +-- multiple constraints in WHERE clause +:PREFIX SELECT DISTINCT ON (dev) dev,time FROM :TABLE WHERE dev > 5 AND time = 100; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------ + Unique (actual rows=5 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_time_dev_val_idx on _hyper_1_1_chunk (actual rows=5 loops=1) + Index Cond: (("time" = 100) AND (dev > 5)) + Heap Fetches: 5 +(4 rows) + +:PREFIX SELECT DISTINCT ON (dev) dev,time FROM :TABLE WHERE dev > 5 AND time > 200; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------- + Unique (actual rows=5 loops=1) + -> Merge Append (actual rows=20 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=5 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_dev_time_idx on _hyper_1_1_chunk (actual rows=5 loops=1) + Index Cond: ((dev > NULL::integer) AND (dev > 5) AND ("time" > 200)) + Heap Fetches: 5 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=5 loops=1) + -> Index Scan using _hyper_1_2_chunk_skip_scan_ht_dev_idx on _hyper_1_2_chunk (actual rows=5 loops=1) + Index Cond: (dev > 5) + Filter: ("time" > 200) + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=5 loops=1) + -> Index Scan using _hyper_1_3_chunk_skip_scan_ht_dev_idx on _hyper_1_3_chunk (actual rows=5 loops=1) + Index Cond: (dev > 5) + Filter: ("time" > 200) + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=5 loops=1) + -> Index Scan using _hyper_1_4_chunk_skip_scan_ht_dev_idx on _hyper_1_4_chunk (actual rows=5 loops=1) + Index Cond: (dev > 5) + Filter: ("time" > 200) +(19 rows) + +:PREFIX SELECT DISTINCT ON (dev) dev,time FROM :TABLE WHERE dev >= 5 AND dev < 7 AND dev >= 2; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------- + Unique (actual rows=2 loops=1) + -> Merge Append (actual rows=8 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=2 loops=1) + -> Index Scan using _hyper_1_1_chunk_skip_scan_ht_dev_idx on _hyper_1_1_chunk (actual rows=2 loops=1) + Index Cond: ((dev >= 5) AND (dev < 7) AND (dev >= 2)) + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=2 loops=1) + -> Index Scan using _hyper_1_2_chunk_skip_scan_ht_dev_idx on _hyper_1_2_chunk (actual rows=2 loops=1) + Index Cond: ((dev >= 5) AND (dev < 7) AND (dev >= 2)) + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=2 loops=1) + -> Index Scan using _hyper_1_3_chunk_skip_scan_ht_dev_idx on _hyper_1_3_chunk (actual rows=2 loops=1) + Index Cond: ((dev >= 5) AND (dev < 7) AND (dev >= 2)) + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=2 loops=1) + -> Index Scan using _hyper_1_4_chunk_skip_scan_ht_dev_idx on _hyper_1_4_chunk (actual rows=2 loops=1) + Index Cond: ((dev >= 5) AND (dev < 7) AND (dev >= 2)) +(15 rows) + +:PREFIX SELECT DISTINCT ON (dev) dev,time,val FROM :TABLE WHERE time > 100 AND time < 200 AND val > 10 AND val < 10000 AND dev > 2 AND dev < 7 ORDER BY dev,time; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------- + Unique (actual rows=0 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=0 loops=1) + -> Index Scan using _hyper_1_1_chunk_skip_scan_ht_dev_time_idx on _hyper_1_1_chunk (actual rows=0 loops=1) + Index Cond: ((dev > 2) AND (dev < 7) AND ("time" > 100) AND ("time" < 200)) + Filter: ((val > 10) AND (val < 10000)) + Rows Removed by Filter: 396 +(6 rows) + +:PREFIX SELECT DISTINCT ON (dev) dev FROM :TABLE WHERE dev IS NULL; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------- + Unique (actual rows=1 loops=1) + -> Merge Append (actual rows=4 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=1 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_dev_idx on _hyper_1_1_chunk (actual rows=1 loops=1) + Index Cond: ((dev > NULL::integer) AND (dev IS NULL)) + Heap Fetches: 1 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=1 loops=1) + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_ht_dev_idx on _hyper_1_2_chunk (actual rows=1 loops=1) + Index Cond: ((dev > NULL::integer) AND (dev IS NULL)) + Heap Fetches: 1 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=1 loops=1) + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_ht_dev_idx on _hyper_1_3_chunk (actual rows=1 loops=1) + Index Cond: ((dev > NULL::integer) AND (dev IS NULL)) + Heap Fetches: 1 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=1 loops=1) + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_ht_dev_idx on _hyper_1_4_chunk (actual rows=1 loops=1) + Index Cond: ((dev > NULL::integer) AND (dev IS NULL)) + Heap Fetches: 1 +(19 rows) + +:PREFIX SELECT DISTINCT ON (dev_name) dev_name FROM :TABLE WHERE dev_name IS NULL; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------- + Unique (actual rows=1 loops=1) + -> Merge Append (actual rows=4 loops=1) + Sort Key: _hyper_1_1_chunk.dev_name + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=1 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_dev_name_idx on _hyper_1_1_chunk (actual rows=1 loops=1) + Index Cond: ((dev_name > NULL::text) AND (dev_name IS NULL)) + Heap Fetches: 1 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=1 loops=1) + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_ht_dev_name_idx on _hyper_1_2_chunk (actual rows=1 loops=1) + Index Cond: ((dev_name > NULL::text) AND (dev_name IS NULL)) + Heap Fetches: 1 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=1 loops=1) + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_ht_dev_name_idx on _hyper_1_3_chunk (actual rows=1 loops=1) + Index Cond: ((dev_name > NULL::text) AND (dev_name IS NULL)) + Heap Fetches: 1 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=1 loops=1) + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_ht_dev_name_idx on _hyper_1_4_chunk (actual rows=1 loops=1) + Index Cond: ((dev_name > NULL::text) AND (dev_name IS NULL)) + Heap Fetches: 1 +(19 rows) + +-- test constants in ORDER BY +:PREFIX SELECT DISTINCT ON (dev) * FROM :TABLE WHERE dev = 1 ORDER BY dev, time DESC; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------ + Limit (actual rows=1 loops=1) + -> Merge Append (actual rows=1 loops=1) + Sort Key: _hyper_1_1_chunk."time" DESC + -> Index Scan Backward using _hyper_1_1_chunk_skip_scan_ht_dev_time_idx on _hyper_1_1_chunk (actual rows=1 loops=1) + Index Cond: (dev = 1) + -> Index Scan Backward using _hyper_1_2_chunk_skip_scan_ht_dev_time_idx on _hyper_1_2_chunk (actual rows=1 loops=1) + Index Cond: (dev = 1) + -> Index Scan Backward using _hyper_1_3_chunk_skip_scan_ht_dev_time_idx on _hyper_1_3_chunk (actual rows=1 loops=1) + Index Cond: (dev = 1) + -> Index Scan Backward using _hyper_1_4_chunk_skip_scan_ht_dev_time_idx on _hyper_1_4_chunk (actual rows=1 loops=1) + Index Cond: (dev = 1) +(11 rows) + +-- CTE +:PREFIX WITH devices AS ( + SELECT DISTINCT ON (dev) dev FROM :TABLE +) +SELECT * FROM devices; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------- + Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_dev_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_ht_dev_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_ht_dev_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_ht_dev_idx on _hyper_1_4_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 +(19 rows) + +:PREFIX WITH devices AS ( + SELECT DISTINCT dev FROM :TABLE +) +SELECT * FROM devices ORDER BY dev; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------- + Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_dev_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_ht_dev_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_ht_dev_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_ht_dev_idx on _hyper_1_4_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 +(19 rows) + +-- prepared statements +PREPARE prep AS SELECT DISTINCT ON (dev_name) dev_name FROM :TABLE; +:PREFIX EXECUTE prep; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------- + Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev_name + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_dev_name_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_ht_dev_name_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_ht_dev_name_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_ht_dev_name_idx on _hyper_1_4_chunk (actual rows=11 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 11 +(19 rows) + +:PREFIX EXECUTE prep; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------- + Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev_name + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_dev_name_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_ht_dev_name_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_ht_dev_name_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_ht_dev_name_idx on _hyper_1_4_chunk (actual rows=11 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 11 +(19 rows) + +:PREFIX EXECUTE prep; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------- + Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev_name + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_dev_name_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_ht_dev_name_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_ht_dev_name_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_ht_dev_name_idx on _hyper_1_4_chunk (actual rows=11 loops=1) + Index Cond: (dev_name > NULL::text) + Heap Fetches: 11 +(19 rows) + +DEALLOCATE prep; +-- ReScan tests +:PREFIX SELECT time, dev, val, 'q7_1' FROM (SELECT DISTINCT ON (dev) * FROM ( + VALUES (1), (2)) a(v), + LATERAL (SELECT * FROM :TABLE WHERE time != a.v) b) a; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Subquery Scan on a (actual rows=11 loops=1) + -> Unique (actual rows=11 loops=1) + -> Nested Loop (actual rows=20020 loops=1) + Join Filter: (_hyper_1_1_chunk."time" <> "*VALUES*".column1) + Rows Removed by Join Filter: 20 + -> Merge Append (actual rows=10020 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Index Scan using _hyper_1_1_chunk_skip_scan_ht_dev_idx on _hyper_1_1_chunk (actual rows=2505 loops=1) + -> Index Scan using _hyper_1_2_chunk_skip_scan_ht_dev_idx on _hyper_1_2_chunk (actual rows=2505 loops=1) + -> Index Scan using _hyper_1_3_chunk_skip_scan_ht_dev_idx on _hyper_1_3_chunk (actual rows=2505 loops=1) + -> Index Scan using _hyper_1_4_chunk_skip_scan_ht_dev_idx on _hyper_1_4_chunk (actual rows=2505 loops=1) + -> Materialize (actual rows=2 loops=10020) + -> Values Scan on "*VALUES*" (actual rows=2 loops=1) +(13 rows) + +:PREFIX SELECT time, dev, val, 'q7_2' FROM (SELECT * FROM ( + VALUES (1), (2)) a(v), + LATERAL (SELECT DISTINCT ON (dev) * FROM :TABLE WHERE dev != a.v) b) a; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Nested Loop (actual rows=18 loops=1) + -> Values Scan on "*VALUES*" (actual rows=2 loops=1) + -> Result (actual rows=9 loops=2) + -> Unique (actual rows=9 loops=2) + -> Merge Append (actual rows=36 loops=2) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=9 loops=2) + -> Index Scan using _hyper_1_1_chunk_skip_scan_ht_dev_idx on _hyper_1_1_chunk (actual rows=9 loops=2) + Filter: (dev <> "*VALUES*".column1) + Rows Removed by Filter: 255 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=9 loops=2) + -> Index Scan using _hyper_1_2_chunk_skip_scan_ht_dev_idx on _hyper_1_2_chunk (actual rows=9 loops=2) + Filter: (dev <> "*VALUES*".column1) + Rows Removed by Filter: 255 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=9 loops=2) + -> Index Scan using _hyper_1_3_chunk_skip_scan_ht_dev_idx on _hyper_1_3_chunk (actual rows=9 loops=2) + Filter: (dev <> "*VALUES*".column1) + Rows Removed by Filter: 255 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=9 loops=2) + -> Index Scan using _hyper_1_4_chunk_skip_scan_ht_dev_idx on _hyper_1_4_chunk (actual rows=9 loops=2) + Filter: (dev <> "*VALUES*".column1) + Rows Removed by Filter: 255 +(22 rows) + +-- RuntimeKeys +:PREFIX SELECT time, dev, val, 'q8_1' FROM (SELECT * FROM ( + VALUES (1), (2)) a(v), + LATERAL (SELECT DISTINCT ON (dev) * FROM :TABLE WHERE dev >= a.v) b) c; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------- + Nested Loop (actual rows=19 loops=1) + -> Values Scan on "*VALUES*" (actual rows=2 loops=1) + -> Result (actual rows=10 loops=2) + -> Unique (actual rows=10 loops=2) + -> Merge Append (actual rows=38 loops=2) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=10 loops=2) + -> Index Scan using _hyper_1_1_chunk_skip_scan_ht_dev_idx on _hyper_1_1_chunk (actual rows=10 loops=2) + Index Cond: (dev >= "*VALUES*".column1) + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=10 loops=2) + -> Index Scan using _hyper_1_2_chunk_skip_scan_ht_dev_idx on _hyper_1_2_chunk (actual rows=10 loops=2) + Index Cond: (dev >= "*VALUES*".column1) + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=10 loops=2) + -> Index Scan using _hyper_1_3_chunk_skip_scan_ht_dev_idx on _hyper_1_3_chunk (actual rows=10 loops=2) + Index Cond: (dev >= "*VALUES*".column1) + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=10 loops=2) + -> Index Scan using _hyper_1_4_chunk_skip_scan_ht_dev_idx on _hyper_1_4_chunk (actual rows=10 loops=2) + Index Cond: (dev >= "*VALUES*".column1) +(18 rows) + +-- Emulate multi-column DISTINCT using multiple SkipSkans +:PREFIX SELECT time, dev, val, 'q9_1' FROM (SELECT b.* FROM + (SELECT DISTINCT ON (dev) dev FROM :TABLE) a, + LATERAL (SELECT DISTINCT ON (time) * FROM :TABLE WHERE dev = a.dev) b) c; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------- + Nested Loop (actual rows=10000 loops=1) + -> Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_dev_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_ht_dev_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_ht_dev_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_ht_dev_idx on _hyper_1_4_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Result (actual rows=909 loops=11) + -> Unique (actual rows=909 loops=11) + -> Merge Append (actual rows=909 loops=11) + Sort Key: _hyper_1_1_chunk_1."time" + -> Custom Scan (SkipScan) on _hyper_1_1_chunk _hyper_1_1_chunk_1 (actual rows=227 loops=11) + -> Index Scan using _hyper_1_1_chunk_skip_scan_ht_dev_time_idx on _hyper_1_1_chunk _hyper_1_1_chunk_1 (actual rows=227 loops=11) + Index Cond: (dev = _hyper_1_1_chunk.dev) + -> Custom Scan (SkipScan) on _hyper_1_2_chunk _hyper_1_2_chunk_1 (actual rows=227 loops=11) + -> Index Scan using _hyper_1_2_chunk_skip_scan_ht_dev_time_idx on _hyper_1_2_chunk _hyper_1_2_chunk_1 (actual rows=227 loops=11) + Index Cond: (dev = _hyper_1_1_chunk.dev) + -> Custom Scan (SkipScan) on _hyper_1_3_chunk _hyper_1_3_chunk_1 (actual rows=227 loops=11) + -> Index Scan using _hyper_1_3_chunk_skip_scan_ht_dev_time_idx on _hyper_1_3_chunk _hyper_1_3_chunk_1 (actual rows=227 loops=11) + Index Cond: (dev = _hyper_1_1_chunk.dev) + -> Custom Scan (SkipScan) on _hyper_1_4_chunk _hyper_1_4_chunk_1 (actual rows=227 loops=11) + -> Index Scan using _hyper_1_4_chunk_skip_scan_ht_dev_time_idx on _hyper_1_4_chunk _hyper_1_4_chunk_1 (actual rows=227 loops=11) + Index Cond: (dev = _hyper_1_1_chunk.dev) +(36 rows) + +:PREFIX SELECT time, dev, NULL, 'q9_2' FROM (SELECT b.* FROM + (SELECT DISTINCT ON (dev) dev FROM :TABLE) a, + LATERAL (SELECT DISTINCT ON (time) dev, time FROM :TABLE WHERE dev = a.dev) b) c; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------ + Nested Loop (actual rows=10000 loops=1) + -> Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_dev_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_ht_dev_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_ht_dev_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_ht_dev_idx on _hyper_1_4_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Unique (actual rows=909 loops=11) + -> Merge Append (actual rows=909 loops=11) + Sort Key: _hyper_1_1_chunk_1."time" + -> Custom Scan (SkipScan) on _hyper_1_1_chunk _hyper_1_1_chunk_1 (actual rows=227 loops=11) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_dev_time_idx on _hyper_1_1_chunk _hyper_1_1_chunk_1 (actual rows=227 loops=11) + Index Cond: ((dev = _hyper_1_1_chunk.dev) AND ("time" > NULL::integer)) + Heap Fetches: 2500 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk _hyper_1_2_chunk_1 (actual rows=227 loops=11) + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_ht_dev_time_idx on _hyper_1_2_chunk _hyper_1_2_chunk_1 (actual rows=227 loops=11) + Index Cond: ((dev = _hyper_1_1_chunk.dev) AND ("time" > NULL::integer)) + Heap Fetches: 2500 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk _hyper_1_3_chunk_1 (actual rows=227 loops=11) + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_ht_dev_time_idx on _hyper_1_3_chunk _hyper_1_3_chunk_1 (actual rows=227 loops=11) + Index Cond: ((dev = _hyper_1_1_chunk.dev) AND ("time" > NULL::integer)) + Heap Fetches: 2500 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk _hyper_1_4_chunk_1 (actual rows=227 loops=11) + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_ht_dev_time_idx on _hyper_1_4_chunk _hyper_1_4_chunk_1 (actual rows=227 loops=11) + Index Cond: ((dev = _hyper_1_1_chunk.dev) AND ("time" > NULL::integer)) + Heap Fetches: 2500 +(39 rows) + +-- Test that the multi-column DISTINCT emulation is equivalent to a real multi-column DISTINCT +:PREFIX SELECT * FROM + (SELECT DISTINCT ON (dev) dev FROM :TABLE) a, + LATERAL (SELECT DISTINCT ON (time) dev, time FROM :TABLE WHERE dev = a.dev) b; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------ + Nested Loop (actual rows=10000 loops=1) + -> Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_dev_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_ht_dev_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_ht_dev_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_ht_dev_idx on _hyper_1_4_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Unique (actual rows=909 loops=11) + -> Merge Append (actual rows=909 loops=11) + Sort Key: _hyper_1_1_chunk_1."time" + -> Custom Scan (SkipScan) on _hyper_1_1_chunk _hyper_1_1_chunk_1 (actual rows=227 loops=11) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_dev_time_idx on _hyper_1_1_chunk _hyper_1_1_chunk_1 (actual rows=227 loops=11) + Index Cond: ((dev = _hyper_1_1_chunk.dev) AND ("time" > NULL::integer)) + Heap Fetches: 2500 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk _hyper_1_2_chunk_1 (actual rows=227 loops=11) + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_ht_dev_time_idx on _hyper_1_2_chunk _hyper_1_2_chunk_1 (actual rows=227 loops=11) + Index Cond: ((dev = _hyper_1_1_chunk.dev) AND ("time" > NULL::integer)) + Heap Fetches: 2500 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk _hyper_1_3_chunk_1 (actual rows=227 loops=11) + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_ht_dev_time_idx on _hyper_1_3_chunk _hyper_1_3_chunk_1 (actual rows=227 loops=11) + Index Cond: ((dev = _hyper_1_1_chunk.dev) AND ("time" > NULL::integer)) + Heap Fetches: 2500 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk _hyper_1_4_chunk_1 (actual rows=227 loops=11) + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_ht_dev_time_idx on _hyper_1_4_chunk _hyper_1_4_chunk_1 (actual rows=227 loops=11) + Index Cond: ((dev = _hyper_1_1_chunk.dev) AND ("time" > NULL::integer)) + Heap Fetches: 2500 +(39 rows) + +:PREFIX SELECT DISTINCT ON (dev, time) dev, time FROM :TABLE WHERE dev IS NOT NULL; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------- + Unique (actual rows=10000 loops=1) + -> Merge Append (actual rows=10000 loops=1) + Sort Key: _hyper_1_1_chunk.dev, _hyper_1_1_chunk."time" + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_dev_time_idx on _hyper_1_1_chunk (actual rows=2500 loops=1) + Index Cond: (dev IS NOT NULL) + Heap Fetches: 2500 + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_ht_dev_time_idx on _hyper_1_2_chunk (actual rows=2500 loops=1) + Index Cond: (dev IS NOT NULL) + Heap Fetches: 2500 + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_ht_dev_time_idx on _hyper_1_3_chunk (actual rows=2500 loops=1) + Index Cond: (dev IS NOT NULL) + Heap Fetches: 2500 + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_ht_dev_time_idx on _hyper_1_4_chunk (actual rows=2500 loops=1) + Index Cond: (dev IS NOT NULL) + Heap Fetches: 2500 +(15 rows) + +:PREFIX SELECT DISTINCT ON (dev, time) dev, time FROM :TABLE WHERE dev IS NOT NULL +UNION SELECT b.* FROM + (SELECT DISTINCT ON (dev) dev FROM :TABLE) a, + LATERAL (SELECT DISTINCT ON (time) dev, time FROM :TABLE WHERE dev = a.dev) b; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Unique (actual rows=10000 loops=1) + -> Sort (actual rows=20000 loops=1) + Sort Key: _hyper_1_1_chunk.dev, _hyper_1_1_chunk."time" + Sort Method: quicksort + -> Append (actual rows=20000 loops=1) + -> Unique (actual rows=10000 loops=1) + -> Merge Append (actual rows=10000 loops=1) + Sort Key: _hyper_1_1_chunk.dev, _hyper_1_1_chunk."time" + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_dev_time_idx on _hyper_1_1_chunk (actual rows=2500 loops=1) + Index Cond: (dev IS NOT NULL) + Heap Fetches: 2500 + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_ht_dev_time_idx on _hyper_1_2_chunk (actual rows=2500 loops=1) + Index Cond: (dev IS NOT NULL) + Heap Fetches: 2500 + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_ht_dev_time_idx on _hyper_1_3_chunk (actual rows=2500 loops=1) + Index Cond: (dev IS NOT NULL) + Heap Fetches: 2500 + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_ht_dev_time_idx on _hyper_1_4_chunk (actual rows=2500 loops=1) + Index Cond: (dev IS NOT NULL) + Heap Fetches: 2500 + -> Nested Loop (actual rows=10000 loops=1) + -> Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk_1.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk _hyper_1_1_chunk_1 (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_dev_idx on _hyper_1_1_chunk _hyper_1_1_chunk_1 (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk _hyper_1_2_chunk_1 (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_ht_dev_idx on _hyper_1_2_chunk _hyper_1_2_chunk_1 (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk _hyper_1_3_chunk_1 (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_ht_dev_idx on _hyper_1_3_chunk _hyper_1_3_chunk_1 (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk _hyper_1_4_chunk_1 (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_ht_dev_idx on _hyper_1_4_chunk _hyper_1_4_chunk_1 (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Unique (actual rows=909 loops=11) + -> Merge Append (actual rows=909 loops=11) + Sort Key: _hyper_1_1_chunk_2."time" + -> Custom Scan (SkipScan) on _hyper_1_1_chunk _hyper_1_1_chunk_2 (actual rows=227 loops=11) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_dev_time_idx on _hyper_1_1_chunk _hyper_1_1_chunk_2 (actual rows=227 loops=11) + Index Cond: ((dev = _hyper_1_1_chunk_1.dev) AND ("time" > NULL::integer)) + Heap Fetches: 2500 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk _hyper_1_2_chunk_2 (actual rows=227 loops=11) + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_ht_dev_time_idx on _hyper_1_2_chunk _hyper_1_2_chunk_2 (actual rows=227 loops=11) + Index Cond: ((dev = _hyper_1_1_chunk_1.dev) AND ("time" > NULL::integer)) + Heap Fetches: 2500 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk _hyper_1_3_chunk_2 (actual rows=227 loops=11) + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_ht_dev_time_idx on _hyper_1_3_chunk _hyper_1_3_chunk_2 (actual rows=227 loops=11) + Index Cond: ((dev = _hyper_1_1_chunk_1.dev) AND ("time" > NULL::integer)) + Heap Fetches: 2500 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk _hyper_1_4_chunk_2 (actual rows=227 loops=11) + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_ht_dev_time_idx on _hyper_1_4_chunk _hyper_1_4_chunk_2 (actual rows=227 loops=11) + Index Cond: ((dev = _hyper_1_1_chunk_1.dev) AND ("time" > NULL::integer)) + Heap Fetches: 2500 +(59 rows) + +-- SkipScan into INSERT +:PREFIX INSERT INTO skip_scan_insert(time, dev, val, query) SELECT time, dev, val, 'q10_1' FROM (SELECT DISTINCT ON (dev) * FROM :TABLE) a; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------- + Insert on skip_scan_insert (actual rows=0 loops=1) + -> Subquery Scan on a (actual rows=11 loops=1) + -> Result (actual rows=11 loops=1) + -> Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_1_chunk_skip_scan_ht_dev_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_2_chunk_skip_scan_ht_dev_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_3_chunk_skip_scan_ht_dev_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_4_chunk_skip_scan_ht_dev_idx on _hyper_1_4_chunk (actual rows=11 loops=1) +(14 rows) + +-- parallel query +SELECT set_config(CASE WHEN current_setting('server_version_num')::int < 160000 THEN 'force_parallel_mode' ELSE 'debug_parallel_query' END,'on', false); + set_config +------------ + on +(1 row) + +:PREFIX SELECT DISTINCT dev FROM :TABLE ORDER BY dev; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------- + Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_dev_idx on _hyper_1_1_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_ht_dev_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_ht_dev_idx on _hyper_1_3_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_ht_dev_idx on _hyper_1_4_chunk (actual rows=11 loops=1) + Index Cond: (dev > NULL::integer) + Heap Fetches: 11 +(19 rows) + +SELECT set_config(CASE WHEN current_setting('server_version_num')::int < 160000 THEN 'force_parallel_mode' ELSE 'debug_parallel_query' END,'off', false); + set_config +------------ + off +(1 row) + +TRUNCATE skip_scan_insert; +-- table with only nulls +:PREFIX SELECT DISTINCT ON (time) time FROM skip_scan_nulls; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Unique (actual rows=1 loops=1) + -> Custom Scan (SkipScan) on skip_scan_nulls (actual rows=1 loops=1) + -> Index Only Scan using skip_scan_nulls_time_idx on skip_scan_nulls (actual rows=1 loops=1) + Index Cond: ("time" > NULL::integer) + Heap Fetches: 1 +(5 rows) + +-- no tuples in resultset +:PREFIX SELECT DISTINCT ON (time) time FROM skip_scan_nulls WHERE time IS NOT NULL; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Unique (actual rows=0 loops=1) + -> Custom Scan (SkipScan) on skip_scan_nulls (actual rows=0 loops=1) + -> Index Only Scan using skip_scan_nulls_time_idx on skip_scan_nulls (actual rows=0 loops=1) + Index Cond: (("time" > NULL::integer) AND ("time" IS NOT NULL)) + Heap Fetches: 0 +(5 rows) + +\ir include/skip_scan_query_ht.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +CREATE INDEX ON :TABLE(dev); +CREATE INDEX ON :TABLE(time); +-- SkipScan with ordered append +:PREFIX SELECT DISTINCT ON (time) time FROM :TABLE ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------ + Unique (actual rows=1000 loops=1) + -> Custom Scan (ChunkAppend) on skip_scan_ht (actual rows=1000 loops=1) + Order: skip_scan_ht."time" + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=250 loops=1) + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_time_idx on _hyper_1_1_chunk (actual rows=250 loops=1) + Index Cond: ("time" > NULL::integer) + Heap Fetches: 250 + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=250 loops=1) + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_ht_time_idx on _hyper_1_2_chunk (actual rows=250 loops=1) + Index Cond: ("time" > NULL::integer) + Heap Fetches: 250 + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=250 loops=1) + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_ht_time_idx on _hyper_1_3_chunk (actual rows=250 loops=1) + Index Cond: ("time" > NULL::integer) + Heap Fetches: 250 + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=250 loops=1) + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_ht_time_idx on _hyper_1_4_chunk (actual rows=250 loops=1) + Index Cond: ("time" > NULL::integer) + Heap Fetches: 250 +(19 rows) + +--baseline query with skipscan +:PREFIX SELECT DISTINCT ON (dev) dev, dev_name FROM :TABLE; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------ + Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_1_chunk_skip_scan_ht_dev_idx1 on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_2_chunk_skip_scan_ht_dev_idx1 on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_3_chunk_skip_scan_ht_dev_idx1 on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_4_chunk_skip_scan_ht_dev_idx1 on _hyper_1_4_chunk (actual rows=11 loops=1) +(11 rows) + +-- compression doesnt prevent skipscan +SELECT compress_chunk('_timescaledb_internal._hyper_1_1_chunk'); + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk +(1 row) + +:PREFIX SELECT DISTINCT ON (dev) dev, dev_name FROM :TABLE; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------- + Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=2538 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=2505 loops=1) + -> Index Scan using compress_hyper_2_5_chunk__compressed_hypertable_2_dev__ts_meta_ on compress_hyper_2_5_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_2_chunk_skip_scan_ht_dev_idx1 on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_3_chunk_skip_scan_ht_dev_idx1 on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_4_chunk_skip_scan_ht_dev_idx1 on _hyper_1_4_chunk (actual rows=11 loops=1) +(11 rows) + +SELECT decompress_chunk('_timescaledb_internal._hyper_1_1_chunk'); + decompress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk +(1 row) + +--baseline query with skipscan +:PREFIX SELECT DISTINCT ON (dev) dev, dev_name FROM :TABLE; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------ + Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_1_chunk_skip_scan_ht_dev_idx1 on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_2_chunk_skip_scan_ht_dev_idx1 on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_3_chunk_skip_scan_ht_dev_idx1 on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_4_chunk_skip_scan_ht_dev_idx1 on _hyper_1_4_chunk (actual rows=11 loops=1) +(11 rows) + +-- partial indexes don't prevent skipscan +DROP INDEX _timescaledb_internal._hyper_1_1_chunk_skip_scan_ht_dev_idx; +:PREFIX SELECT DISTINCT ON (dev) dev, dev_name FROM :TABLE; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------ + Unique (actual rows=11 loops=1) + -> Merge Append (actual rows=44 loops=1) + Sort Key: _hyper_1_1_chunk.dev + -> Custom Scan (SkipScan) on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_1_chunk_skip_scan_ht_dev_idx1 on _hyper_1_1_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_2_chunk_skip_scan_ht_dev_idx1 on _hyper_1_2_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_3_chunk_skip_scan_ht_dev_idx1 on _hyper_1_3_chunk (actual rows=11 loops=1) + -> Custom Scan (SkipScan) on _hyper_1_4_chunk (actual rows=11 loops=1) + -> Index Scan using _hyper_1_4_chunk_skip_scan_ht_dev_idx1 on _hyper_1_4_chunk (actual rows=11 loops=1) +(11 rows) + +-- IndexPath without pathkeys doesnt use SkipScan +EXPLAIN (costs off, timing off, summary off) SELECT DISTINCT 1 FROM pg_rewrite; + QUERY PLAN +------------------------------------------------------------------------- + Limit + -> Index Only Scan using pg_rewrite_rel_rulename_index on pg_rewrite +(2 rows) + +-- try one query with EXPLAIN only for coverage +EXPLAIN (costs off, timing off, summary off) SELECT DISTINCT ON (dev_name) dev_name FROM skip_scan; + QUERY PLAN +----------------------------------------------------------------------- + Unique + -> Custom Scan (SkipScan) on skip_scan + -> Index Only Scan using skip_scan_dev_name_idx on skip_scan + Index Cond: (dev_name > NULL::text) +(4 rows) + +EXPLAIN (costs off, timing off, summary off) SELECT DISTINCT ON (dev_name) dev_name FROM skip_scan_ht; + QUERY PLAN +-------------------------------------------------------------------------------------------------------- + Unique + -> Merge Append + Sort Key: _hyper_1_1_chunk.dev_name + -> Custom Scan (SkipScan) on _hyper_1_1_chunk + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_dev_name_idx on _hyper_1_1_chunk + Index Cond: (dev_name > NULL::text) + -> Custom Scan (SkipScan) on _hyper_1_2_chunk + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_ht_dev_name_idx on _hyper_1_2_chunk + Index Cond: (dev_name > NULL::text) + -> Custom Scan (SkipScan) on _hyper_1_3_chunk + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_ht_dev_name_idx on _hyper_1_3_chunk + Index Cond: (dev_name > NULL::text) + -> Custom Scan (SkipScan) on _hyper_1_4_chunk + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_ht_dev_name_idx on _hyper_1_4_chunk + Index Cond: (dev_name > NULL::text) +(15 rows) + +-- #3629 skipscan with constant skipscan column in where clause +CREATE TABLE i3629(a int, time timestamptz NOT NULL); +SELECT table_name FROM create_hypertable('i3629', 'time'); + table_name +------------ + i3629 +(1 row) + +INSERT INTO i3629 SELECT i, '2020-04-01'::date-10-i from generate_series(1,20) i; +EXPLAIN (SUMMARY OFF, COSTS OFF) SELECT DISTINCT ON (a) * FROM i3629 WHERE a in (2) ORDER BY a ASC, time DESC; + QUERY PLAN +---------------------------------------------------------------------------------- + Limit + -> Merge Append + Sort Key: _hyper_3_6_chunk."time" DESC + -> Index Scan using _hyper_3_6_chunk_i3629_time_idx on _hyper_3_6_chunk + Filter: (a = 2) + -> Index Scan using _hyper_3_7_chunk_i3629_time_idx on _hyper_3_7_chunk + Filter: (a = 2) + -> Index Scan using _hyper_3_8_chunk_i3629_time_idx on _hyper_3_8_chunk + Filter: (a = 2) + -> Index Scan using _hyper_3_9_chunk_i3629_time_idx on _hyper_3_9_chunk + Filter: (a = 2) +(11 rows) + +SELECT DISTINCT ON (a) * FROM i3629 WHERE a in (2) ORDER BY a ASC, time DESC; + a | time +---+------------------------------ + 2 | Fri Mar 20 00:00:00 2020 PDT +(1 row) + +-- #3720 skipscan not being used on varchar column +CREATE TABLE i3720(time timestamptz not null,data varchar); +SELECT table_name FROM create_hypertable('i3720','time'); +WARNING: column type "character varying" used for "data" does not follow best practices + table_name +------------ + i3720 +(1 row) + +INSERT INTO i3720 +SELECT time, (array['Yes', 'No', 'Maybe'])[floor(random() * 3 + 1)] +FROM generate_series('2000-01-01'::timestamptz,'2000-01-03'::timestamptz, '10 minute'::interval) AS g1(time); +CREATE INDEX ON i3720(data, time); +ANALYZE i3720; +:PREFIX SELECT DISTINCT ON(data) * FROM i3720; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------- + Unique (actual rows=3 loops=1) + -> Custom Scan (SkipScan) on _hyper_4_10_chunk (actual rows=3 loops=1) + -> Index Only Scan using _hyper_4_10_chunk_i3720_data_time_idx on _hyper_4_10_chunk (actual rows=3 loops=1) + Index Cond: (data > NULL::text) + Heap Fetches: 3 +(5 rows) + diff --git a/tsl/test/expected/telemetry_stats-16.out b/tsl/test/expected/telemetry_stats-16.out new file mode 100644 index 00000000000..cf2aa3db3df --- /dev/null +++ b/tsl/test/expected/telemetry_stats-16.out @@ -0,0 +1,1197 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +--telemetry tests that require a community license +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; +-- function call info size is too variable for this test, so disable it +SET timescaledb.telemetry_level='no_functions'; +SELECT setseed(1); + setseed +--------- + +(1 row) + +-- Create a materialized view from the telemetry report so that we +-- don't regenerate telemetry for every query. Filter heap_size for +-- materialized views since PG14 reports a different heap size for +-- them compared to earlier PG versions. +CREATE MATERIALIZED VIEW telemetry_report AS +SELECT (r #- '{relations,materialized_views,heap_size}') AS r +FROM get_telemetry_report() r; +CREATE VIEW relations AS +SELECT r -> 'relations' AS rels +FROM telemetry_report; +SELECT rels -> 'continuous_aggregates' -> 'num_relations' AS num_continuous_aggs, + rels -> 'hypertables' -> 'num_relations' AS num_hypertables +FROM relations; + num_continuous_aggs | num_hypertables +---------------------+----------------- + 0 | 0 +(1 row) + +-- check telemetry picks up flagged content from metadata +SELECT r -> 'db_metadata' AS db_metadata +FROM telemetry_report; + db_metadata +------------- + {} +(1 row) + +-- check timescaledb_telemetry.cloud +SELECT r -> 'instance_metadata' AS instance_metadata +FROM telemetry_report r; + instance_metadata +------------------- + {"cloud": "ci"} +(1 row) + +CREATE TABLE normal (time timestamptz NOT NULL, device int, temp float); +CREATE TABLE part (time timestamptz NOT NULL, device int, temp float) PARTITION BY RANGE (time); +CREATE TABLE part_t1 PARTITION OF part FOR VALUES FROM ('2018-01-01') TO ('2018-02-01') PARTITION BY HASH (device); +CREATE TABLE part_t2 PARTITION OF part FOR VALUES FROM ('2018-02-01') TO ('2018-03-01') PARTITION BY HASH (device); +CREATE TABLE part_t1_d1 PARTITION OF part_t1 FOR VALUES WITH (MODULUS 2, REMAINDER 0); +CREATE TABLE part_t1_d2 PARTITION OF part_t1 FOR VALUES WITH (MODULUS 2, REMAINDER 1); +CREATE TABLE part_t2_d1 PARTITION OF part_t2 FOR VALUES WITH (MODULUS 2, REMAINDER 0); +CREATE TABLE part_t2_d2 PARTITION OF part_t2 FOR VALUES WITH (MODULUS 2, REMAINDER 1); +CREATE TABLE hyper (LIKE normal); +SELECT table_name FROM create_hypertable('hyper', 'time'); + table_name +------------ + hyper +(1 row) + +CREATE MATERIALIZED VIEW contagg +WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT + time_bucket('1 hour', time) AS hour, + device, + min(time) +FROM + hyper +GROUP BY hour, device; +NOTICE: continuous aggregate "contagg" is already up-to-date +CREATE MATERIALIZED VIEW contagg_old +WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) AS +SELECT + time_bucket('1 hour', time) AS hour, + device, + min(time) +FROM + hyper +GROUP BY hour, device; +NOTICE: continuous aggregate "contagg_old" is already up-to-date +-- Create another view (already have the "relations" view) +CREATE VIEW devices AS +SELECT DISTINCT ON (device) device +FROM hyper; +-- Show relations with no data +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT jsonb_pretty(rels) AS relations FROM relations; + relations +----------------------------------------------------- + { + + "views": { + + "num_relations": 2 + + }, + + "tables": { + + "heap_size": 0, + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 2, + + "num_reltuples": 0 + + }, + + "hypertables": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0 + + }, + + "indexes_size": 8192, + + "num_children": 0, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "materialized_views": { + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "partitioned_tables": { + + "heap_size": 0, + + "toast_size": 0, + + "indexes_size": 0, + + "num_children": 6, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "continuous_aggregates": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "num_compressed_caggs": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0 + + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 2, + + "num_reltuples": 0, + + "num_caggs_nested": 0, + + "num_caggs_finalized": 1, + + "num_caggs_on_distributed_hypertables": 0, + + "num_caggs_using_real_time_aggregation": 2 + + }, + + "distributed_hypertables_data_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0 + + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0 + + }, + + "distributed_hypertables_access_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0 + + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0, + + "num_replica_chunks": 0, + + "num_replicated_distributed_hypertables": 0+ + } + + } +(1 row) + +-- Insert data +INSERT INTO normal +SELECT t, ceil(random() * 10)::int, random() * 30 +FROM generate_series('2018-01-01'::timestamptz, '2018-02-28', '2h') t; +INSERT INTO hyper +SELECT * FROM normal; +INSERT INTO part +SELECT * FROM normal; +CALL refresh_continuous_aggregate('contagg', NULL, NULL); +CALL refresh_continuous_aggregate('contagg_old', NULL, NULL); +-- ANALYZE to get updated reltuples stats +ANALYZE normal, hyper, part; +SELECT count(c) FROM show_chunks('hyper') c; + count +------- + 9 +(1 row) + +SELECT count(c) FROM show_chunks('contagg') c; + count +------- + 2 +(1 row) + +SELECT count(c) FROM show_chunks('contagg_old') c; + count +------- + 2 +(1 row) + +-- Update and show the telemetry report +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT jsonb_pretty(rels) AS relations FROM relations; + relations +----------------------------------------------------- + { + + "views": { + + "num_relations": 2 + + }, + + "tables": { + + "heap_size": 65536, + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 2, + + "num_reltuples": 697 + + }, + + "hypertables": { + + "heap_size": 73728, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0 + + }, + + "indexes_size": 155648, + + "num_children": 9, + + "num_relations": 1, + + "num_reltuples": 697 + + }, + + "materialized_views": { + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "partitioned_tables": { + + "heap_size": 98304, + + "toast_size": 0, + + "indexes_size": 0, + + "num_children": 6, + + "num_relations": 1, + + "num_reltuples": 697 + + }, + + "continuous_aggregates": { + + "heap_size": 188416, + + "toast_size": 16384, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "num_compressed_caggs": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0 + + }, + + "indexes_size": 229376, + + "num_children": 4, + + "num_relations": 2, + + "num_reltuples": 0, + + "num_caggs_nested": 0, + + "num_caggs_finalized": 1, + + "num_caggs_on_distributed_hypertables": 0, + + "num_caggs_using_real_time_aggregation": 2 + + }, + + "distributed_hypertables_data_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0 + + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0 + + }, + + "distributed_hypertables_access_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0 + + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0, + + "num_replica_chunks": 0, + + "num_replicated_distributed_hypertables": 0+ + } + + } +(1 row) + +-- Actual row count should be the same as reltuples stats for all tables +SELECT (SELECT count(*) FROM normal) num_inserted_rows, + (SELECT rels -> 'tables' -> 'num_reltuples' FROM relations) normal_reltuples, + (SELECT rels -> 'hypertables' -> 'num_reltuples' FROM relations) hyper_reltuples, + (SELECT rels -> 'partitioned_tables' -> 'num_reltuples' FROM relations) part_reltuples; + num_inserted_rows | normal_reltuples | hyper_reltuples | part_reltuples +-------------------+------------------+-----------------+---------------- + 697 | 697 | 697 | 697 +(1 row) + +-- Add compression +ALTER TABLE hyper SET (timescaledb.compress); +SELECT compress_chunk(c) +FROM show_chunks('hyper') c ORDER BY c LIMIT 4; + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk + _timescaledb_internal._hyper_1_2_chunk + _timescaledb_internal._hyper_1_3_chunk + _timescaledb_internal._hyper_1_4_chunk +(4 rows) + +ALTER MATERIALIZED VIEW contagg SET (timescaledb.compress); +NOTICE: defaulting compress_segmentby to device +NOTICE: defaulting compress_orderby to hour +SELECT compress_chunk(c) +FROM show_chunks('contagg') c ORDER BY c LIMIT 1; + compress_chunk +----------------------------------------- + _timescaledb_internal._hyper_2_10_chunk +(1 row) + +-- Turn of real-time aggregation +ALTER MATERIALIZED VIEW contagg SET (timescaledb.materialized_only = true); +ANALYZE normal, hyper, part; +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT jsonb_pretty(rels) AS relations FROM relations; + relations +----------------------------------------------------- + { + + "views": { + + "num_relations": 2 + + }, + + "tables": { + + "heap_size": 65536, + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 2, + + "num_reltuples": 697 + + }, + + "hypertables": { + + "heap_size": 73728, + + "toast_size": 32768, + + "compression": { + + "compressed_heap_size": 32768, + + "compressed_row_count": 4, + + "compressed_toast_size": 32768, + + "num_compressed_chunks": 4, + + "uncompressed_heap_size": 32768, + + "uncompressed_row_count": 284, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 65536, + + "num_compressed_hypertables": 1 + + }, + + "indexes_size": 122880, + + "num_children": 9, + + "num_relations": 1, + + "num_reltuples": 413 + + }, + + "materialized_views": { + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "partitioned_tables": { + + "heap_size": 98304, + + "toast_size": 0, + + "indexes_size": 0, + + "num_children": 6, + + "num_relations": 1, + + "num_reltuples": 697 + + }, + + "continuous_aggregates": { + + "heap_size": 180224, + + "toast_size": 24576, + + "compression": { + + "compressed_heap_size": 40960, + + "compressed_row_count": 10, + + "num_compressed_caggs": 1, + + "compressed_toast_size": 8192, + + "num_compressed_chunks": 1, + + "uncompressed_heap_size": 49152, + + "uncompressed_row_count": 452, + + "compressed_indexes_size": 16384, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 81920 + + }, + + "indexes_size": 180224, + + "num_children": 4, + + "num_relations": 2, + + "num_reltuples": 0, + + "num_caggs_nested": 0, + + "num_caggs_finalized": 1, + + "num_caggs_on_distributed_hypertables": 0, + + "num_caggs_using_real_time_aggregation": 1 + + }, + + "distributed_hypertables_data_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0 + + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0 + + }, + + "distributed_hypertables_access_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0 + + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0, + + "num_replica_chunks": 0, + + "num_replicated_distributed_hypertables": 0+ + } + + } +(1 row) + +-- Add distributed hypertables +\set DN_DBNAME_1 :TEST_DBNAME _1 +\set DN_DBNAME_2 :TEST_DBNAME _2 +-- Not an access node or data node +SELECT r -> 'num_data_nodes' AS num_data_nodes, + r -> 'distributed_member' AS distributed_member +FROM telemetry_report; + num_data_nodes | distributed_member +----------------+-------------------- + | "none" +(1 row) + +-- Become an access node by adding a data node +SELECT node_name, database, node_created, database_created, extension_created +FROM add_data_node('data_node_1', host => 'localhost', database => :'DN_DBNAME_1'); + node_name | database | node_created | database_created | extension_created +-------------+----------------------+--------------+------------------+------------------- + data_node_1 | db_telemetry_stats_1 | t | t | t +(1 row) + +-- Telemetry should show one data node and "acces node" status +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT r -> 'num_data_nodes' AS num_data_nodes, + r -> 'distributed_member' AS distributed_member +FROM telemetry_report; + num_data_nodes | distributed_member +----------------+-------------------- + 1 | "access node" +(1 row) + +-- See telemetry report from a data node +\ir include/remote_exec.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +CREATE SCHEMA IF NOT EXISTS test; +psql:include/remote_exec.sql:5: NOTICE: schema "test" already exists, skipping +GRANT USAGE ON SCHEMA test TO PUBLIC; +CREATE OR REPLACE FUNCTION test.remote_exec(srv_name name[], command text) +RETURNS VOID +AS :TSL_MODULE_PATHNAME, 'ts_remote_exec' +LANGUAGE C; +CREATE OR REPLACE FUNCTION test.remote_exec_get_result_strings(srv_name name[], command text) +RETURNS TABLE("table_record" CSTRING[]) +AS :TSL_MODULE_PATHNAME, 'ts_remote_exec_get_result_strings' +LANGUAGE C; +SELECT test.remote_exec(NULL, $$ + SELECT t -> 'num_data_nodes' AS num_data_nodes, + t -> 'distributed_member' AS distributed_member + FROM get_telemetry_report() t; +$$); +NOTICE: [data_node_1]: + SELECT t -> 'num_data_nodes' AS num_data_nodes, + t -> 'distributed_member' AS distributed_member + FROM get_telemetry_report() t +NOTICE: [data_node_1]: +num_data_nodes|distributed_member +--------------+------------------ + |"data node" +(1 row) + + + remote_exec +------------- + +(1 row) + +SELECT node_name, database, node_created, database_created, extension_created +FROM add_data_node('data_node_2', host => 'localhost', database => :'DN_DBNAME_2'); + node_name | database | node_created | database_created | extension_created +-------------+----------------------+--------------+------------------+------------------- + data_node_2 | db_telemetry_stats_2 | t | t | t +(1 row) + +CREATE TABLE disthyper (LIKE normal); +SELECT create_distributed_hypertable('disthyper', 'time', 'device'); + create_distributed_hypertable +------------------------------- + (6,public,disthyper,t) +(1 row) + +-- Show distributed hypertables stats with no data +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT + jsonb_pretty(rels -> 'distributed_hypertables_access_node') AS distributed_hypertables_an +FROM relations; + distributed_hypertables_an +------------------------------------------------- + { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0 + + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 1, + + "num_reltuples": 0, + + "num_replica_chunks": 0, + + "num_replicated_distributed_hypertables": 0+ + } +(1 row) + +-- No datanode-related stats on the access node +SELECT + jsonb_pretty(rels -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn +FROM relations; + distributed_hypertables_dn +----------------------------------------- + { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0,+ + "num_compressed_hypertables": 0+ + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0 + + } +(1 row) + +-- Insert data into the distributed hypertable +INSERT INTO disthyper +SELECT * FROM normal; +-- Update telemetry stats and show output on access node and data +-- nodes. Note that the access node doesn't store data so shows +-- zero. It should have stats from ANALYZE, though, like +-- num_reltuples. +ANALYZE disthyper; +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT + jsonb_pretty(rels -> 'distributed_hypertables_access_node') AS distributed_hypertables_an +FROM relations; + distributed_hypertables_an +------------------------------------------------- + { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0 + + }, + + "indexes_size": 0, + + "num_children": 18, + + "num_relations": 1, + + "num_reltuples": 697, + + "num_replica_chunks": 0, + + "num_replicated_distributed_hypertables": 0+ + } +(1 row) + +-- Show data node stats +SELECT test.remote_exec(NULL, $$ + SELECT + jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn + FROM get_telemetry_report() t; +$$); +NOTICE: [data_node_1]: + SELECT + jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn + FROM get_telemetry_report() t +NOTICE: [data_node_1]: +distributed_hypertables_dn +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +{ + "heap_size": 73728, + "toast_size": 0, + "compression": { + "compressed_heap_size": 0, + "compressed_row_count": 0, + "compressed_toast_size": 0, + "num_compressed_chunks": 0, + "uncompressed_heap_size": 0, + "uncompressed_row_count": 0, + "compressed_indexes_size": 0, + "uncompressed_toast_size": 0, + "uncompressed_indexes_size": 0, + "num_compressed_hypertables": 0 + }, + "indexes_size": 311296, + "num_children": 9, + "num_relations": 1, + "num_reltuples": 368 +} +(1 row) + + +NOTICE: [data_node_2]: + SELECT + jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn + FROM get_telemetry_report() t +NOTICE: [data_node_2]: +distributed_hypertables_dn +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +{ + "heap_size": 73728, + "toast_size": 0, + "compression": { + "compressed_heap_size": 0, + "compressed_row_count": 0, + "compressed_toast_size": 0, + "num_compressed_chunks": 0, + "uncompressed_heap_size": 0, + "uncompressed_row_count": 0, + "compressed_indexes_size": 0, + "uncompressed_toast_size": 0, + "uncompressed_indexes_size": 0, + "num_compressed_hypertables": 0 + }, + "indexes_size": 311296, + "num_children": 9, + "num_relations": 1, + "num_reltuples": 329 +} +(1 row) + + + remote_exec +------------- + +(1 row) + +-- Add compression +ALTER TABLE disthyper SET (timescaledb.compress); +SELECT compress_chunk(c) +FROM show_chunks('disthyper') c ORDER BY c LIMIT 4; + compress_chunk +---------------------------------------------- + _timescaledb_internal._dist_hyper_6_19_chunk + _timescaledb_internal._dist_hyper_6_20_chunk + _timescaledb_internal._dist_hyper_6_21_chunk + _timescaledb_internal._dist_hyper_6_22_chunk +(4 rows) + +ANALYZE disthyper; +-- Update telemetry stats and show updated compression stats +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT + jsonb_pretty(rels -> 'distributed_hypertables_access_node') AS distributed_hypertables_an +FROM relations; + distributed_hypertables_an +------------------------------------------------- + { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 4, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 1 + + }, + + "indexes_size": 0, + + "num_children": 18, + + "num_relations": 1, + + "num_reltuples": 581, + + "num_replica_chunks": 0, + + "num_replicated_distributed_hypertables": 0+ + } +(1 row) + +-- Show data node stats +SELECT test.remote_exec(NULL, $$ + SELECT + jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn + FROM get_telemetry_report() t; +$$); +NOTICE: [data_node_1]: + SELECT + jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn + FROM get_telemetry_report() t +NOTICE: [data_node_1]: +distributed_hypertables_dn +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +{ + "heap_size": 73728, + "toast_size": 16384, + "compression": { + "compressed_heap_size": 16384, + "compressed_row_count": 2, + "compressed_toast_size": 16384, + "num_compressed_chunks": 2, + "uncompressed_heap_size": 16384, + "uncompressed_row_count": 56, + "compressed_indexes_size": 0, + "uncompressed_toast_size": 0, + "uncompressed_indexes_size": 65536, + "num_compressed_hypertables": 1 + }, + "indexes_size": 278528, + "num_children": 9, + "num_relations": 1, + "num_reltuples": 312 +} +(1 row) + + +NOTICE: [data_node_2]: + SELECT + jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn + FROM get_telemetry_report() t +NOTICE: [data_node_2]: +distributed_hypertables_dn +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +{ + "heap_size": 73728, + "toast_size": 16384, + "compression": { + "compressed_heap_size": 16384, + "compressed_row_count": 2, + "compressed_toast_size": 16384, + "num_compressed_chunks": 2, + "uncompressed_heap_size": 16384, + "uncompressed_row_count": 60, + "compressed_indexes_size": 0, + "uncompressed_toast_size": 0, + "uncompressed_indexes_size": 65536, + "num_compressed_hypertables": 1 + }, + "indexes_size": 278528, + "num_children": 9, + "num_relations": 1, + "num_reltuples": 269 +} +(1 row) + + + remote_exec +------------- + +(1 row) + +-- Create a replicated distributed hypertable and show replication stats +CREATE TABLE disthyper_repl (LIKE normal); +SELECT create_distributed_hypertable('disthyper_repl', 'time', 'device', replication_factor => 2); + create_distributed_hypertable +------------------------------- + (7,public,disthyper_repl,t) +(1 row) + +INSERT INTO disthyper_repl +SELECT * FROM normal; +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT + jsonb_pretty(rels -> 'distributed_hypertables_access_node') AS distributed_hypertables_an +FROM relations; + distributed_hypertables_an +------------------------------------------------- + { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 4, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 1 + + }, + + "indexes_size": 0, + + "num_children": 36, + + "num_relations": 2, + + "num_reltuples": 581, + + "num_replica_chunks": 18, + + "num_replicated_distributed_hypertables": 1+ + } +(1 row) + +-- Create a continuous aggregate on the distributed hypertable +CREATE MATERIALIZED VIEW distcontagg +WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT + time_bucket('1 hour', time) AS hour, + device, + min(time) +FROM + disthyper +GROUP BY hour, device; +NOTICE: refreshing continuous aggregate "distcontagg" +CREATE MATERIALIZED VIEW distcontagg_old +WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) AS +SELECT + time_bucket('1 hour', time) AS hour, + device, + min(time) +FROM + disthyper +GROUP BY hour, device; +NOTICE: refreshing continuous aggregate "distcontagg_old" +VACUUM; +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT + jsonb_pretty(rels -> 'continuous_aggregates') AS continuous_aggregates +FROM relations; + continuous_aggregates +------------------------------------------------ + { + + "heap_size": 425984, + + "toast_size": 40960, + + "compression": { + + "compressed_heap_size": 40960, + + "compressed_row_count": 10, + + "num_compressed_caggs": 1, + + "compressed_toast_size": 8192, + + "num_compressed_chunks": 1, + + "uncompressed_heap_size": 49152, + + "uncompressed_row_count": 452, + + "compressed_indexes_size": 16384, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 81920 + + }, + + "indexes_size": 409600, + + "num_children": 8, + + "num_relations": 4, + + "num_reltuples": 2336, + + "num_caggs_nested": 0, + + "num_caggs_finalized": 2, + + "num_caggs_on_distributed_hypertables": 2,+ + "num_caggs_using_real_time_aggregation": 3+ + } +(1 row) + +-- check telemetry for fixed schedule jobs works +create or replace procedure job_test_fixed(jobid int, config jsonb) language plpgsql as $$ +begin +raise log 'this is job_test_fixed'; +end +$$; +create or replace procedure job_test_drifting(jobid int, config jsonb) language plpgsql as $$ +begin +raise log 'this is job_test_drifting'; +end +$$; +-- before adding the jobs +select get_telemetry_report()->'num_user_defined_actions_fixed'; + ?column? +---------- + 0 +(1 row) + +select get_telemetry_report()->'num_user_defined_actions'; + ?column? +---------- + 0 +(1 row) + +select add_job('job_test_fixed', '1 week'); + add_job +--------- + 1000 +(1 row) + +select add_job('job_test_drifting', '1 week', fixed_schedule => false); + add_job +--------- + 1001 +(1 row) + +-- add continuous aggregate refresh policy for contagg +select add_continuous_aggregate_policy('contagg', interval '3 weeks', NULL, interval '3 weeks'); -- drifting + add_continuous_aggregate_policy +--------------------------------- + 1002 +(1 row) + +select add_continuous_aggregate_policy('contagg_old', interval '3 weeks', NULL, interval '3 weeks', initial_start => now()); -- fixed + add_continuous_aggregate_policy +--------------------------------- + 1003 +(1 row) + +-- add retention policy, fixed +select add_retention_policy('hyper', interval '1 year', initial_start => now()); + add_retention_policy +---------------------- + 1004 +(1 row) + +-- add compression policy +select add_compression_policy('hyper', interval '3 weeks', initial_start => now()); + add_compression_policy +------------------------ + 1005 +(1 row) + +select r->'num_user_defined_actions_fixed' as UDA_fixed, r->'num_user_defined_actions' AS UDA_drifting FROM get_telemetry_report() r; + uda_fixed | uda_drifting +-----------+-------------- + 1 | 1 +(1 row) + +select r->'num_continuous_aggs_policies_fixed' as contagg_fixed, r->'num_continuous_aggs_policies' as contagg_drifting FROM get_telemetry_report() r; + contagg_fixed | contagg_drifting +---------------+------------------ + 1 | 1 +(1 row) + +select r->'num_compression_policies_fixed' as compress_fixed, r->'num_retention_policies_fixed' as retention_fixed FROM get_telemetry_report() r; + compress_fixed | retention_fixed +----------------+----------------- + 1 | 1 +(1 row) + +DELETE FROM _timescaledb_config.bgw_job WHERE id = 2; +TRUNCATE _timescaledb_internal.job_errors; +-- create some "errors" for testing +INSERT INTO +_timescaledb_config.bgw_job(id, application_name, schedule_interval, max_runtime, max_retries, retry_period, proc_schema, proc_name) +VALUES (2000, 'User-Defined Action [2000]', interval '3 days', interval '1 hour', 5, interval '5 min', 'public', 'custom_action_1'), +(2001, 'User-Defined Action [2001]', interval '3 days', interval '1 hour', 5, interval '5 min', 'public', 'custom_action_2'), +(2002, 'Compression Policy [2002]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_compression'), +(2003, 'Retention Policy [2003]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_retention'), +(2004, 'Refresh Continuous Aggregate Policy [2004]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_refresh_continuous_aggregate'), +-- user decided to define a custom action in the _timescaledb_functions schema, we group it with the User-defined actions +(2005, 'User-Defined Action [2005]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_refresh_continuous_aggregate'); +-- create some errors for them +INSERT INTO +_timescaledb_internal.job_errors(job_id, pid, start_time, finish_time, error_data) +values (2000, 12345, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"public", "proc_name": "custom_action_1"}'), +(2000, 23456, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"ABCDE", "proc_schema": "public", "proc_name": "custom_action_1"}'), +(2001, 54321, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"public", "proc_name": "custom_action_2"}'), +(2002, 23443, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"JF009", "proc_schema":"_timescaledb_functions", "proc_name": "policy_compression"}'), +(2003, 14567, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_retention"}'), +(2004, 78907, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_refresh_continuous_aggregate"}'), +(2005, 45757, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_refresh_continuous_aggregate"}'); +-- we have 3 error records for user-defined actions, and three for policies, so we expect 4 types of jobs +SELECT jsonb_pretty(get_telemetry_report() -> 'errors_by_sqlerrcode'); + jsonb_pretty +---------------------------------------------- + { + + "policy_retention": { + + "P0001": 1 + + }, + + "policy_compression": { + + "JF009": 1 + + }, + + "user_defined_action": { + + "ABCDE": 1, + + "P0001": 2 + + }, + + "policy_refresh_continuous_aggregate": {+ + "P0001": 2 + + } + + } +(1 row) + +-- for job statistics, insert some records into bgw_job_stats +INSERT INTO _timescaledb_internal.bgw_job_stat +values +(2000, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2001, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2002, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2003, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2004, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2005, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0); +SELECT jsonb_pretty(get_telemetry_report() -> 'stats_by_job_type'); + jsonb_pretty +------------------------------------------------ + { + + "policy_retention": { + + "total_runs": 1, + + "total_crashes": 0, + + "total_duration": "@ 0", + + "total_failures": 1, + + "total_successes": 0, + + "max_consecutive_crashes": 0, + + "total_duration_failures": "@ 2 secs",+ + "max_consecutive_failures": 1 + + }, + + "policy_compression": { + + "total_runs": 1, + + "total_crashes": 0, + + "total_duration": "@ 0", + + "total_failures": 1, + + "total_successes": 0, + + "max_consecutive_crashes": 0, + + "total_duration_failures": "@ 2 secs",+ + "max_consecutive_failures": 1 + + }, + + "user_defined_action": { + + "total_runs": 2, + + "total_crashes": 0, + + "total_duration": "@ 0", + + "total_failures": 2, + + "total_successes": 0, + + "max_consecutive_crashes": 0, + + "total_duration_failures": "@ 4 secs",+ + "max_consecutive_failures": 1 + + }, + + "policy_refresh_continuous_aggregate": { + + "total_runs": 2, + + "total_crashes": 0, + + "total_duration": "@ 0", + + "total_failures": 2, + + "total_successes": 0, + + "max_consecutive_crashes": 0, + + "total_duration_failures": "@ 4 secs",+ + "max_consecutive_failures": 1 + + } + + } +(1 row) + +-- create nested continuous aggregates - copied from cagg_on_cagg_common +CREATE TABLE conditions ( + time timestamptz NOT NULL, + temperature int +); +SELECT create_hypertable('conditions', 'time'); + create_hypertable +-------------------------- + (10,public,conditions,t) +(1 row) + +CREATE MATERIALIZED VIEW conditions_summary_hourly_1 +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket('1 hour', "time") AS bucket, + SUM(temperature) AS temperature +FROM conditions +GROUP BY 1 +WITH NO DATA; +CREATE MATERIALIZED VIEW conditions_summary_daily_2 +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket('1 day', "bucket") AS bucket, + SUM(temperature) AS temperature +FROM conditions_summary_hourly_1 +GROUP BY 1 +WITH NO DATA; +CREATE MATERIALIZED VIEW conditions_summary_weekly_3 +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket('1 week', "bucket") AS bucket, + SUM(temperature) AS temperature +FROM conditions_summary_daily_2 +GROUP BY 1 +WITH NO DATA; +SELECT jsonb_pretty(get_telemetry_report() -> 'relations' -> 'continuous_aggregates' -> 'num_caggs_nested'); + jsonb_pretty +-------------- + 2 +(1 row) + +DROP VIEW relations; +DROP MATERIALIZED VIEW telemetry_report; +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER +DROP DATABASE :DN_DBNAME_1 WITH (FORCE); +DROP DATABASE :DN_DBNAME_2 WITH (FORCE); diff --git a/tsl/test/expected/transparent_decompression-16.out b/tsl/test/expected/transparent_decompression-16.out new file mode 100644 index 00000000000..a7977d88063 --- /dev/null +++ b/tsl/test/expected/transparent_decompression-16.out @@ -0,0 +1,10170 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\set TEST_BASE_NAME transparent_decompression +SELECT format('include/%s_load.sql', :'TEST_BASE_NAME') AS "TEST_LOAD_NAME", + format('include/%s_query.sql', :'TEST_BASE_NAME') AS "TEST_QUERY_NAME", + format('%s/results/%s_results_uncompressed.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') AS "TEST_RESULTS_UNCOMPRESSED", + format('%s/results/%s_results_compressed.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') AS "TEST_RESULTS_COMPRESSED" \gset +SELECT format('\! diff %s %s', :'TEST_RESULTS_UNCOMPRESSED', :'TEST_RESULTS_COMPRESSED') AS "DIFF_CMD" \gset +SET work_mem TO '50MB'; +-- disable memoize node to make EXPLAIN output comparable between PG14 and previous versions +SELECT CASE WHEN current_setting('server_version_num')::int/10000 >= 14 THEN set_config('enable_memoize','off',false) ELSE 'off' END AS enable_memoize; + enable_memoize +---------------- + off +(1 row) + +CREATE TABLE metrics ( + filler_1 int, + filler_2 int, + filler_3 int, + time timestamptz NOT NULL, + device_id int, + device_id_peer int, + v0 int, + v1 int, + v2 float, + v3 float +); +SELECT create_hypertable ('metrics', 'time'); + create_hypertable +---------------------- + (1,public,metrics,t) +(1 row) + +ALTER TABLE metrics + DROP COLUMN filler_1; +INSERT INTO metrics (time, device_id, device_id_peer, v0, v1, v2, v3) +SELECT time, + device_id, + 0, + device_id + 1, + device_id + 2, + device_id + 0.5, + NULL +FROM generate_series('2000-01-01 0:00:00+0'::timestamptz, '2000-01-05 23:55:00+0', '20m') gtime (time), + generate_series(1, 5, 1) gdevice (device_id); +ALTER TABLE metrics + DROP COLUMN filler_2; +INSERT INTO metrics (time, device_id, device_id_peer, v0, v1, v2, v3) +SELECT time, + device_id, + 0, + device_id - 1, + device_id + 2, + device_id + 0.5, + NULL +FROM generate_series('2000-01-06 0:00:00+0'::timestamptz, '2000-01-12 23:55:00+0', '20m') gtime (time), + generate_series(1, 5, 1) gdevice (device_id); +ALTER TABLE metrics + DROP COLUMN filler_3; +INSERT INTO metrics (time, device_id, device_id_peer, v0, v1, v2, v3) +SELECT time, + device_id, + 0, + device_id, + device_id + 2, + device_id + 0.5, + NULL +FROM generate_series('2000-01-13 0:00:00+0'::timestamptz, '2000-01-19 23:55:00+0', '20m') gtime (time), + generate_series(1, 5, 1) gdevice (device_id); +ANALYZE metrics; +-- create identical hypertable with space partitioning +CREATE TABLE metrics_space ( + filler_1 int, + filler_2 int, + filler_3 int, + time timestamptz NOT NULL, + device_id int, + device_id_peer int, + v0 int, + v1 float, + v2 float, + v3 float +); +SELECT create_hypertable ('metrics_space', 'time', 'device_id', 3); + create_hypertable +---------------------------- + (2,public,metrics_space,t) +(1 row) + +ALTER TABLE metrics_space + DROP COLUMN filler_1; +INSERT INTO metrics_space (time, device_id, device_id_peer, v0, v1, v2, v3) +SELECT time, + device_id, + 0, + device_id + 1, + device_id + 2, + device_id + 0.5, + NULL +FROM generate_series('2000-01-01 0:00:00+0'::timestamptz, '2000-01-05 23:55:00+0', '20m') gtime (time), + generate_series(1, 5, 1) gdevice (device_id); +ALTER TABLE metrics_space + DROP COLUMN filler_2; +INSERT INTO metrics_space (time, device_id, device_id_peer, v0, v1, v2, v3) +SELECT time, + device_id, + 0, + device_id + 1, + device_id + 2, + device_id + 0.5, + NULL +FROM generate_series('2000-01-06 0:00:00+0'::timestamptz, '2000-01-12 23:55:00+0', '20m') gtime (time), + generate_series(1, 5, 1) gdevice (device_id); +ALTER TABLE metrics_space + DROP COLUMN filler_3; +INSERT INTO metrics_space (time, device_id, device_id_peer, v0, v1, v2, v3) +SELECT time, + device_id, + 0, + device_id + 1, + device_id + 2, + device_id + 0.5, + NULL +FROM generate_series('2000-01-13 0:00:00+0'::timestamptz, '2000-01-19 23:55:00+0', '20m') gtime (time), + generate_series(1, 5, 1) gdevice (device_id); +ANALYZE metrics_space; +-- run queries on uncompressed hypertable and store result +\set PREFIX '' +\set PREFIX_VERBOSE '' +\set PREFIX_NO_ANALYZE '' +\set ECHO none +-- compress first and last chunk on the hypertable +ALTER TABLE metrics SET (timescaledb.compress, timescaledb.compress_orderby = 'v0, v1 desc, time', timescaledb.compress_segmentby = 'device_id,device_id_peer'); +SELECT compress_chunk ('_timescaledb_internal._hyper_1_1_chunk'); + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk +(1 row) + +SELECT compress_chunk ('_timescaledb_internal._hyper_1_3_chunk'); + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_3_chunk +(1 row) + +-- compress some chunks on space partitioned hypertable +-- we compress all chunks of first time slice, none of second, and 2 of the last time slice +ALTER TABLE metrics_space SET (timescaledb.compress, timescaledb.compress_orderby = 'v0, v1 desc, time', timescaledb.compress_segmentby = 'device_id,device_id_peer'); +SELECT compress_chunk ('_timescaledb_internal._hyper_2_4_chunk'); + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_2_4_chunk +(1 row) + +SELECT compress_chunk ('_timescaledb_internal._hyper_2_5_chunk'); + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_2_5_chunk +(1 row) + +SELECT compress_chunk ('_timescaledb_internal._hyper_2_6_chunk'); + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_2_6_chunk +(1 row) + +SELECT compress_chunk ('_timescaledb_internal._hyper_2_10_chunk'); + compress_chunk +----------------------------------------- + _timescaledb_internal._hyper_2_10_chunk +(1 row) + +SELECT compress_chunk ('_timescaledb_internal._hyper_2_11_chunk'); + compress_chunk +----------------------------------------- + _timescaledb_internal._hyper_2_11_chunk +(1 row) + +SELECT ht.schema_name || '.' || ht.table_name AS "METRICS_COMPRESSED" +FROM _timescaledb_catalog.hypertable ht + INNER JOIN _timescaledb_catalog.hypertable ht2 ON ht.id = ht2.compressed_hypertable_id + AND ht2.table_name = 'metrics' \gset +SELECT ht.schema_name || '.' || ht.table_name AS "METRICS_SPACE_COMPRESSED" +FROM _timescaledb_catalog.hypertable ht + INNER JOIN _timescaledb_catalog.hypertable ht2 ON ht.id = ht2.compressed_hypertable_id + AND ht2.table_name = 'metrics_space' \gset +\c :TEST_DBNAME :ROLE_SUPERUSER +-- Index created using query saved in variable used because there was +-- no standard way to create an index on a compressed table. +-- Once a standard way exists, modify this test to use that method. +CREATE INDEX c_index ON :METRICS_COMPRESSED (device_id); +CREATE INDEX c_space_index ON :METRICS_SPACE_COMPRESSED (device_id); +CREATE INDEX c_index_2 ON :METRICS_COMPRESSED (device_id, _ts_meta_count); +CREATE INDEX c_space_index_2 ON :METRICS_SPACE_COMPRESSED (device_id, _ts_meta_count); +CREATE INDEX ON :METRICS_COMPRESSED (device_id_peer); +CREATE INDEX ON :METRICS_SPACE_COMPRESSED (device_id_peer); +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +CREATE INDEX ON metrics_space (device_id, device_id_peer, v0, v1 DESC, time); +CREATE INDEX ON metrics_space (device_id, device_id_peer DESC, v0, v1 DESC, time); +CREATE INDEX ON metrics_space (device_id DESC, device_id_peer DESC, v0, v1 DESC, time); +ANALYZE metrics_space; +-- run queries on compressed hypertable and store result +\set PREFIX '' +\set PREFIX_VERBOSE '' +\set PREFIX_NO_ANALYZE '' +\set ECHO none +\set PREFIX 'EXPLAIN (analyze, costs off, timing off, summary off)' +\set PREFIX_VERBOSE 'EXPLAIN (analyze, costs off, timing off, summary off, verbose)' +\set PREFIX_NO_ANALYZE 'EXPLAIN (verbose, costs off)' +-- we disable parallelism here otherwise EXPLAIN ANALYZE output +-- will be not stable and differ depending on worker assignment +SET max_parallel_workers_per_gather TO 0; +-- get explain for queries on hypertable with compression +\set TEST_TABLE 'metrics' +\ir :TEST_QUERY_NAME +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- this should use DecompressChunk node +:PREFIX_VERBOSE +SELECT * +FROM :TEST_TABLE +WHERE device_id = 1 +ORDER BY time +LIMIT 5; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=5 loops=1) + Output: metrics."time", metrics.device_id, metrics.device_id_peer, metrics.v0, metrics.v1, metrics.v2, metrics.v3 + -> Custom Scan (ChunkAppend) on public.metrics (actual rows=5 loops=1) + Output: metrics."time", metrics.device_id, metrics.device_id_peer, metrics.v0, metrics.v1, metrics.v2, metrics.v3 + Order: metrics."time" + Startup Exclusion: false + Runtime Exclusion: false + -> Sort (actual rows=5 loops=1) + Output: _hyper_1_1_chunk."time", _hyper_1_1_chunk.device_id, _hyper_1_1_chunk.device_id_peer, _hyper_1_1_chunk.v0, _hyper_1_1_chunk.v1, _hyper_1_1_chunk.v2, _hyper_1_1_chunk.v3 + Sort Key: _hyper_1_1_chunk."time" + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (actual rows=360 loops=1) + Output: _hyper_1_1_chunk."time", _hyper_1_1_chunk.device_id, _hyper_1_1_chunk.device_id_peer, _hyper_1_1_chunk.v0, _hyper_1_1_chunk.v1, _hyper_1_1_chunk.v2, _hyper_1_1_chunk.v3 + Bulk Decompression: true + -> Seq Scan on _timescaledb_internal.compress_hyper_5_15_chunk (actual rows=1 loops=1) + Output: compress_hyper_5_15_chunk."time", compress_hyper_5_15_chunk.device_id, compress_hyper_5_15_chunk.device_id_peer, compress_hyper_5_15_chunk.v0, compress_hyper_5_15_chunk.v1, compress_hyper_5_15_chunk.v2, compress_hyper_5_15_chunk.v3, compress_hyper_5_15_chunk._ts_meta_count, compress_hyper_5_15_chunk._ts_meta_sequence_num, compress_hyper_5_15_chunk._ts_meta_min_3, compress_hyper_5_15_chunk._ts_meta_max_3, compress_hyper_5_15_chunk._ts_meta_min_1, compress_hyper_5_15_chunk._ts_meta_max_1, compress_hyper_5_15_chunk._ts_meta_min_2, compress_hyper_5_15_chunk._ts_meta_max_2 + Filter: (compress_hyper_5_15_chunk.device_id = 1) + Rows Removed by Filter: 4 + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _timescaledb_internal._hyper_1_2_chunk (never executed) + Output: _hyper_1_2_chunk."time", _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.device_id_peer, _hyper_1_2_chunk.v0, _hyper_1_2_chunk.v1, _hyper_1_2_chunk.v2, _hyper_1_2_chunk.v3 + Filter: (_hyper_1_2_chunk.device_id = 1) + -> Sort (never executed) + Output: _hyper_1_3_chunk."time", _hyper_1_3_chunk.device_id, _hyper_1_3_chunk.device_id_peer, _hyper_1_3_chunk.v0, _hyper_1_3_chunk.v1, _hyper_1_3_chunk.v2, _hyper_1_3_chunk.v3 + Sort Key: _hyper_1_3_chunk."time" + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk (never executed) + Output: _hyper_1_3_chunk."time", _hyper_1_3_chunk.device_id, _hyper_1_3_chunk.device_id_peer, _hyper_1_3_chunk.v0, _hyper_1_3_chunk.v1, _hyper_1_3_chunk.v2, _hyper_1_3_chunk.v3 + Bulk Decompression: true + -> Seq Scan on _timescaledb_internal.compress_hyper_5_16_chunk (never executed) + Output: compress_hyper_5_16_chunk."time", compress_hyper_5_16_chunk.device_id, compress_hyper_5_16_chunk.device_id_peer, compress_hyper_5_16_chunk.v0, compress_hyper_5_16_chunk.v1, compress_hyper_5_16_chunk.v2, compress_hyper_5_16_chunk.v3, compress_hyper_5_16_chunk._ts_meta_count, compress_hyper_5_16_chunk._ts_meta_sequence_num, compress_hyper_5_16_chunk._ts_meta_min_3, compress_hyper_5_16_chunk._ts_meta_max_3, compress_hyper_5_16_chunk._ts_meta_min_1, compress_hyper_5_16_chunk._ts_meta_max_1, compress_hyper_5_16_chunk._ts_meta_min_2, compress_hyper_5_16_chunk._ts_meta_max_2 + Filter: (compress_hyper_5_16_chunk.device_id = 1) +(30 rows) + +-- test RECORD by itself +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE device_id = 1 +ORDER BY time; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics (actual rows=1368 loops=1) + Order: metrics."time" + -> Sort (actual rows=360 loops=1) + Sort Key: _hyper_1_1_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=1 loops=1) + Filter: (device_id = 1) + Rows Removed by Filter: 4 + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk (actual rows=504 loops=1) + Filter: (device_id = 1) + Rows Removed by Filter: 2016 + -> Sort (actual rows=504 loops=1) + Sort Key: _hyper_1_3_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=504 loops=1) + -> Seq Scan on compress_hyper_5_16_chunk (actual rows=1 loops=1) + Filter: (device_id = 1) + Rows Removed by Filter: 4 +(19 rows) + +-- test expressions +:PREFIX +SELECT time_bucket ('1d', time), + v1 + v2 AS "sum", + COALESCE(NULL, v1, v2) AS "coalesce", + NULL AS "NULL", + 'text' AS "text", + :TEST_TABLE AS "RECORD" +FROM :TEST_TABLE +WHERE device_id IN (1, 2) +ORDER BY time, + device_id; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------ + Incremental Sort (actual rows=2736 loops=1) + Sort Key: metrics."time", metrics.device_id + Presorted Key: metrics."time" + Full-sort Groups: 86 Sort Method: quicksort + -> Result (actual rows=2736 loops=1) + -> Custom Scan (ChunkAppend) on metrics (actual rows=2736 loops=1) + Order: metrics."time" + -> Sort (actual rows=720 loops=1) + Sort Key: _hyper_1_1_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=720 loops=1) + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=2 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 3 + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk (actual rows=1008 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 1512 + -> Sort (actual rows=1008 loops=1) + Sort Key: _hyper_1_3_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=1008 loops=1) + -> Seq Scan on compress_hyper_5_16_chunk (actual rows=2 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 3 +(24 rows) + +-- test empty targetlist +:PREFIX +SELECT +FROM :TEST_TABLE; + QUERY PLAN +------------------------------------------------------------------------------------ + Append (actual rows=6840 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=1800 loops=1) + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=5 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2520 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=2520 loops=1) + -> Seq Scan on compress_hyper_5_16_chunk (actual rows=5 loops=1) +(6 rows) + +-- test empty resultset +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE device_id < 0; + QUERY PLAN +--------------------------------------------------------------------------------- + Append (actual rows=0 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=0 loops=1) + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=0 loops=1) + Filter: (device_id < 0) + Rows Removed by Filter: 5 + -> Seq Scan on _hyper_1_2_chunk (actual rows=0 loops=1) + Filter: (device_id < 0) + Rows Removed by Filter: 2520 + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=0 loops=1) + -> Seq Scan on compress_hyper_5_16_chunk (actual rows=0 loops=1) + Filter: (device_id < 0) + Rows Removed by Filter: 5 +(12 rows) + +-- test targetlist not referencing columns +:PREFIX +SELECT 1 +FROM :TEST_TABLE; + QUERY PLAN +------------------------------------------------------------------------------------------ + Result (actual rows=6840 loops=1) + -> Append (actual rows=6840 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=1800 loops=1) + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=5 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2520 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=2520 loops=1) + -> Seq Scan on compress_hyper_5_16_chunk (actual rows=5 loops=1) +(7 rows) + +-- test constraints not present in targetlist +:PREFIX +SELECT v1 +FROM :TEST_TABLE +WHERE device_id = 1 +ORDER BY v1; + QUERY PLAN +----------------------------------------------------------------------------------------- + Sort (actual rows=1368 loops=1) + Sort Key: _hyper_1_1_chunk.v1 + Sort Method: quicksort + -> Append (actual rows=1368 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=1 loops=1) + Filter: (device_id = 1) + Rows Removed by Filter: 4 + -> Seq Scan on _hyper_1_2_chunk (actual rows=504 loops=1) + Filter: (device_id = 1) + Rows Removed by Filter: 2016 + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=504 loops=1) + -> Seq Scan on compress_hyper_5_16_chunk (actual rows=1 loops=1) + Filter: (device_id = 1) + Rows Removed by Filter: 4 +(15 rows) + +-- test order not present in targetlist +:PREFIX +SELECT v2 +FROM :TEST_TABLE +WHERE device_id = 1 +ORDER BY v1; + QUERY PLAN +----------------------------------------------------------------------------------------- + Sort (actual rows=1368 loops=1) + Sort Key: _hyper_1_1_chunk.v1 + Sort Method: quicksort + -> Append (actual rows=1368 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=1 loops=1) + Filter: (device_id = 1) + Rows Removed by Filter: 4 + -> Seq Scan on _hyper_1_2_chunk (actual rows=504 loops=1) + Filter: (device_id = 1) + Rows Removed by Filter: 2016 + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=504 loops=1) + -> Seq Scan on compress_hyper_5_16_chunk (actual rows=1 loops=1) + Filter: (device_id = 1) + Rows Removed by Filter: 4 +(15 rows) + +-- test column with all NULL +:PREFIX +SELECT v3 +FROM :TEST_TABLE +WHERE device_id = 1; + QUERY PLAN +----------------------------------------------------------------------------------- + Append (actual rows=1368 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=1 loops=1) + Filter: (device_id = 1) + Rows Removed by Filter: 4 + -> Seq Scan on _hyper_1_2_chunk (actual rows=504 loops=1) + Filter: (device_id = 1) + Rows Removed by Filter: 2016 + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=504 loops=1) + -> Seq Scan on compress_hyper_5_16_chunk (actual rows=1 loops=1) + Filter: (device_id = 1) + Rows Removed by Filter: 4 +(12 rows) + +-- +-- test qual pushdown +-- +-- v3 is not segment by or order by column so should not be pushed down +:PREFIX_VERBOSE +SELECT * +FROM :TEST_TABLE +WHERE v3 > 10.0 +ORDER BY time, + device_id; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort (actual rows=0 loops=1) + Output: metrics."time", metrics.device_id, metrics.device_id_peer, metrics.v0, metrics.v1, metrics.v2, metrics.v3 + Sort Key: metrics."time", metrics.device_id + Sort Method: quicksort + -> Custom Scan (ChunkAppend) on public.metrics (actual rows=0 loops=1) + Output: metrics."time", metrics.device_id, metrics.device_id_peer, metrics.v0, metrics.v1, metrics.v2, metrics.v3 + Order: metrics."time" + Startup Exclusion: false + Runtime Exclusion: false + -> Sort (actual rows=0 loops=1) + Output: _hyper_1_1_chunk."time", _hyper_1_1_chunk.device_id, _hyper_1_1_chunk.device_id_peer, _hyper_1_1_chunk.v0, _hyper_1_1_chunk.v1, _hyper_1_1_chunk.v2, _hyper_1_1_chunk.v3 + Sort Key: _hyper_1_1_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (actual rows=0 loops=1) + Output: _hyper_1_1_chunk."time", _hyper_1_1_chunk.device_id, _hyper_1_1_chunk.device_id_peer, _hyper_1_1_chunk.v0, _hyper_1_1_chunk.v1, _hyper_1_1_chunk.v2, _hyper_1_1_chunk.v3 + Vectorized Filter: (_hyper_1_1_chunk.v3 > '10'::double precision) + Rows Removed by Filter: 1800 + Bulk Decompression: true + -> Seq Scan on _timescaledb_internal.compress_hyper_5_15_chunk (actual rows=5 loops=1) + Output: compress_hyper_5_15_chunk."time", compress_hyper_5_15_chunk.device_id, compress_hyper_5_15_chunk.device_id_peer, compress_hyper_5_15_chunk.v0, compress_hyper_5_15_chunk.v1, compress_hyper_5_15_chunk.v2, compress_hyper_5_15_chunk.v3, compress_hyper_5_15_chunk._ts_meta_count, compress_hyper_5_15_chunk._ts_meta_sequence_num, compress_hyper_5_15_chunk._ts_meta_min_3, compress_hyper_5_15_chunk._ts_meta_max_3, compress_hyper_5_15_chunk._ts_meta_min_1, compress_hyper_5_15_chunk._ts_meta_max_1, compress_hyper_5_15_chunk._ts_meta_min_2, compress_hyper_5_15_chunk._ts_meta_max_2 + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _timescaledb_internal._hyper_1_2_chunk (actual rows=0 loops=1) + Output: _hyper_1_2_chunk."time", _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.device_id_peer, _hyper_1_2_chunk.v0, _hyper_1_2_chunk.v1, _hyper_1_2_chunk.v2, _hyper_1_2_chunk.v3 + Filter: (_hyper_1_2_chunk.v3 > '10'::double precision) + Rows Removed by Filter: 2520 + -> Sort (actual rows=0 loops=1) + Output: _hyper_1_3_chunk."time", _hyper_1_3_chunk.device_id, _hyper_1_3_chunk.device_id_peer, _hyper_1_3_chunk.v0, _hyper_1_3_chunk.v1, _hyper_1_3_chunk.v2, _hyper_1_3_chunk.v3 + Sort Key: _hyper_1_3_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk (actual rows=0 loops=1) + Output: _hyper_1_3_chunk."time", _hyper_1_3_chunk.device_id, _hyper_1_3_chunk.device_id_peer, _hyper_1_3_chunk.v0, _hyper_1_3_chunk.v1, _hyper_1_3_chunk.v2, _hyper_1_3_chunk.v3 + Vectorized Filter: (_hyper_1_3_chunk.v3 > '10'::double precision) + Rows Removed by Filter: 2520 + Bulk Decompression: true + -> Seq Scan on _timescaledb_internal.compress_hyper_5_16_chunk (actual rows=5 loops=1) + Output: compress_hyper_5_16_chunk."time", compress_hyper_5_16_chunk.device_id, compress_hyper_5_16_chunk.device_id_peer, compress_hyper_5_16_chunk.v0, compress_hyper_5_16_chunk.v1, compress_hyper_5_16_chunk.v2, compress_hyper_5_16_chunk.v3, compress_hyper_5_16_chunk._ts_meta_count, compress_hyper_5_16_chunk._ts_meta_sequence_num, compress_hyper_5_16_chunk._ts_meta_min_3, compress_hyper_5_16_chunk._ts_meta_max_3, compress_hyper_5_16_chunk._ts_meta_min_1, compress_hyper_5_16_chunk._ts_meta_max_1, compress_hyper_5_16_chunk._ts_meta_min_2, compress_hyper_5_16_chunk._ts_meta_max_2 +(35 rows) + +-- device_id constraint should be pushed down +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE device_id = 1 +ORDER BY time, + device_id +LIMIT 10; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Custom Scan (ChunkAppend) on metrics (actual rows=10 loops=1) + Order: metrics."time" + -> Sort (actual rows=10 loops=1) + Sort Key: _hyper_1_1_chunk."time" + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=1 loops=1) + Filter: (device_id = 1) + Rows Removed by Filter: 4 + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk (never executed) + Filter: (device_id = 1) + -> Sort (never executed) + Sort Key: _hyper_1_3_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (never executed) + -> Seq Scan on compress_hyper_5_16_chunk (never executed) + Filter: (device_id = 1) +(17 rows) + +-- test IS NULL / IS NOT NULL +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE device_id IS NOT NULL +ORDER BY time, + device_id +LIMIT 10; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Incremental Sort (actual rows=10 loops=1) + Sort Key: metrics."time", metrics.device_id + Presorted Key: metrics."time" + Full-sort Groups: 1 Sort Method: quicksort + -> Custom Scan (ChunkAppend) on metrics (actual rows=11 loops=1) + Order: metrics."time" + -> Sort (actual rows=11 loops=1) + Sort Key: _hyper_1_1_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=1800 loops=1) + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=5 loops=1) + Filter: (device_id IS NOT NULL) + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk (never executed) + Filter: (device_id IS NOT NULL) + -> Sort (never executed) + Sort Key: _hyper_1_3_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (never executed) + -> Seq Scan on compress_hyper_5_16_chunk (never executed) + Filter: (device_id IS NOT NULL) +(20 rows) + +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE device_id IS NULL +ORDER BY time, + device_id +LIMIT 10; + QUERY PLAN +--------------------------------------------------------------------------------------------- + Limit (actual rows=0 loops=1) + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_1_1_chunk."time", _hyper_1_1_chunk.device_id + Sort Method: quicksort + -> Append (actual rows=0 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=0 loops=1) + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=0 loops=1) + Filter: (device_id IS NULL) + Rows Removed by Filter: 5 + -> Seq Scan on _hyper_1_2_chunk (actual rows=0 loops=1) + Filter: (device_id IS NULL) + Rows Removed by Filter: 2520 + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=0 loops=1) + -> Seq Scan on compress_hyper_5_16_chunk (actual rows=0 loops=1) + Filter: (device_id IS NULL) + Rows Removed by Filter: 5 +(16 rows) + +-- test IN (Const,Const) +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE device_id IN (1, 2) +ORDER BY time, + device_id +LIMIT 10; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Incremental Sort (actual rows=10 loops=1) + Sort Key: metrics."time", metrics.device_id + Presorted Key: metrics."time" + Full-sort Groups: 1 Sort Method: quicksort + -> Custom Scan (ChunkAppend) on metrics (actual rows=11 loops=1) + Order: metrics."time" + -> Sort (actual rows=11 loops=1) + Sort Key: _hyper_1_1_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=720 loops=1) + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=2 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 3 + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk (never executed) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Sort (never executed) + Sort Key: _hyper_1_3_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (never executed) + -> Seq Scan on compress_hyper_5_16_chunk (never executed) + Filter: (device_id = ANY ('{1,2}'::integer[])) +(21 rows) + +-- test cast pushdown +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE device_id = '1'::text::int +ORDER BY time, + device_id +LIMIT 10; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Custom Scan (ChunkAppend) on metrics (actual rows=10 loops=1) + Order: metrics."time" + -> Sort (actual rows=10 loops=1) + Sort Key: _hyper_1_1_chunk."time" + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=1 loops=1) + Filter: (device_id = 1) + Rows Removed by Filter: 4 + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk (never executed) + Filter: (device_id = 1) + -> Sort (never executed) + Sort Key: _hyper_1_3_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (never executed) + -> Seq Scan on compress_hyper_5_16_chunk (never executed) + Filter: (device_id = 1) +(17 rows) + +--test var op var with two segment by +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE device_id = device_id_peer +ORDER BY time, + device_id +LIMIT 10; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=0 loops=1) + -> Incremental Sort (actual rows=0 loops=1) + Sort Key: metrics."time", metrics.device_id + Presorted Key: metrics."time" + Full-sort Groups: 1 Sort Method: quicksort + -> Custom Scan (ChunkAppend) on metrics (actual rows=0 loops=1) + Order: metrics."time" + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_1_1_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=0 loops=1) + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=0 loops=1) + Filter: (device_id = device_id_peer) + Rows Removed by Filter: 5 + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk (actual rows=0 loops=1) + Filter: (device_id = device_id_peer) + Rows Removed by Filter: 2520 + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_1_3_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=0 loops=1) + -> Seq Scan on compress_hyper_5_16_chunk (actual rows=0 loops=1) + Filter: (device_id = device_id_peer) + Rows Removed by Filter: 5 +(24 rows) + +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE device_id_peer < device_id +ORDER BY time, + device_id +LIMIT 10; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Incremental Sort (actual rows=10 loops=1) + Sort Key: metrics."time", metrics.device_id + Presorted Key: metrics."time" + Full-sort Groups: 1 Sort Method: quicksort + -> Custom Scan (ChunkAppend) on metrics (actual rows=11 loops=1) + Order: metrics."time" + -> Sort (actual rows=11 loops=1) + Sort Key: _hyper_1_1_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=1800 loops=1) + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=5 loops=1) + Filter: (device_id_peer < device_id) + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk (never executed) + Filter: (device_id_peer < device_id) + -> Sort (never executed) + Sort Key: _hyper_1_3_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (never executed) + -> Seq Scan on compress_hyper_5_16_chunk (never executed) + Filter: (device_id_peer < device_id) +(20 rows) + +-- test expressions +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE device_id = 1 + 4 / 2 +ORDER BY time, + device_id +LIMIT 10; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Custom Scan (ChunkAppend) on metrics (actual rows=10 loops=1) + Order: metrics."time" + -> Sort (actual rows=10 loops=1) + Sort Key: _hyper_1_1_chunk."time" + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=1 loops=1) + Filter: (device_id = 3) + Rows Removed by Filter: 4 + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk (never executed) + Filter: (device_id = 3) + -> Sort (never executed) + Sort Key: _hyper_1_3_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (never executed) + -> Seq Scan on compress_hyper_5_16_chunk (never executed) + Filter: (device_id = 3) +(17 rows) + +-- test function calls +-- not yet pushed down +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE device_id = length(substring(version(), 1, 3)) +ORDER BY time, + device_id +LIMIT 10; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Custom Scan (ChunkAppend) on metrics (actual rows=10 loops=1) + Order: metrics."time" + Chunks excluded during startup: 0 + -> Sort (actual rows=10 loops=1) + Sort Key: _hyper_1_1_chunk."time" + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=360 loops=1) + Filter: (device_id = length("substring"(version(), 1, 3))) + Rows Removed by Filter: 1440 + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=5 loops=1) + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk (never executed) + Filter: (device_id = length("substring"(version(), 1, 3))) + -> Sort (never executed) + Sort Key: _hyper_1_3_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (never executed) + Filter: (device_id = length("substring"(version(), 1, 3))) + -> Seq Scan on compress_hyper_5_16_chunk (never executed) +(18 rows) + +-- +-- test segment meta pushdown +-- +-- order by column and const +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE time = '2000-01-01 1:00:00+0' +ORDER BY time, + device_id +LIMIT 10; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=5 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=5 loops=1) + Vectorized Filter: ("time" = 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + Rows Removed by Filter: 1795 + -> Sort (actual rows=5 loops=1) + Sort Key: compress_hyper_5_15_chunk.device_id + Sort Method: quicksort + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=5 loops=1) + Filter: ((_ts_meta_min_3 <= 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) AND (_ts_meta_max_3 >= 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone)) +(9 rows) + +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE time < '2000-01-01 1:00:00+0' +ORDER BY time, + device_id +LIMIT 10; + QUERY PLAN +--------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Sort (actual rows=10 loops=1) + Sort Key: _hyper_1_1_chunk."time", _hyper_1_1_chunk.device_id + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=15 loops=1) + Vectorized Filter: ("time" < 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + Rows Removed by Filter: 1785 + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=5 loops=1) + Filter: (_ts_meta_min_3 < 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) +(9 rows) + +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE time <= '2000-01-01 1:00:00+0' +ORDER BY time, + device_id +LIMIT 10; + QUERY PLAN +---------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Sort (actual rows=10 loops=1) + Sort Key: _hyper_1_1_chunk."time", _hyper_1_1_chunk.device_id + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=20 loops=1) + Vectorized Filter: ("time" <= 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + Rows Removed by Filter: 1780 + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=5 loops=1) + Filter: (_ts_meta_min_3 <= 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) +(9 rows) + +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE time >= '2000-01-01 1:00:00+0' +ORDER BY time, + device_id +LIMIT 10; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Incremental Sort (actual rows=10 loops=1) + Sort Key: metrics."time", metrics.device_id + Presorted Key: metrics."time" + Full-sort Groups: 1 Sort Method: quicksort + -> Custom Scan (ChunkAppend) on metrics (actual rows=11 loops=1) + Order: metrics."time" + -> Sort (actual rows=11 loops=1) + Sort Key: _hyper_1_1_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=1785 loops=1) + Vectorized Filter: ("time" >= 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + Rows Removed by Filter: 15 + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=5 loops=1) + Filter: (_ts_meta_max_3 >= 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk (never executed) + Index Cond: ("time" >= 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + -> Sort (never executed) + Sort Key: _hyper_1_3_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (never executed) + Vectorized Filter: ("time" >= 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + -> Seq Scan on compress_hyper_5_16_chunk (never executed) + Filter: (_ts_meta_max_3 >= 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) +(23 rows) + +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE time > '2000-01-01 1:00:00+0' +ORDER BY time, + device_id +LIMIT 10; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Incremental Sort (actual rows=10 loops=1) + Sort Key: metrics."time", metrics.device_id + Presorted Key: metrics."time" + Full-sort Groups: 1 Sort Method: quicksort + -> Custom Scan (ChunkAppend) on metrics (actual rows=11 loops=1) + Order: metrics."time" + -> Sort (actual rows=11 loops=1) + Sort Key: _hyper_1_1_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=1780 loops=1) + Vectorized Filter: ("time" > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + Rows Removed by Filter: 20 + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=5 loops=1) + Filter: (_ts_meta_max_3 > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk (never executed) + Index Cond: ("time" > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + -> Sort (never executed) + Sort Key: _hyper_1_3_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (never executed) + Vectorized Filter: ("time" > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + -> Seq Scan on compress_hyper_5_16_chunk (never executed) + Filter: (_ts_meta_max_3 > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) +(23 rows) + +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE '2000-01-01 1:00:00+0' < time +ORDER BY time, + device_id +LIMIT 10; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Incremental Sort (actual rows=10 loops=1) + Sort Key: metrics."time", metrics.device_id + Presorted Key: metrics."time" + Full-sort Groups: 1 Sort Method: quicksort + -> Custom Scan (ChunkAppend) on metrics (actual rows=11 loops=1) + Order: metrics."time" + -> Sort (actual rows=11 loops=1) + Sort Key: _hyper_1_1_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=1780 loops=1) + Vectorized Filter: ("time" > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + Rows Removed by Filter: 20 + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=5 loops=1) + Filter: (_ts_meta_max_3 > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk (never executed) + Index Cond: ("time" > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + -> Sort (never executed) + Sort Key: _hyper_1_3_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (never executed) + Vectorized Filter: ("time" > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + -> Seq Scan on compress_hyper_5_16_chunk (never executed) + Filter: (_ts_meta_max_3 > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) +(23 rows) + +--pushdowns between order by and segment by columns +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE v0 < 1 +ORDER BY time, + device_id +LIMIT 10; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Incremental Sort (actual rows=10 loops=1) + Sort Key: metrics."time", metrics.device_id + Presorted Key: metrics."time" + Full-sort Groups: 1 Sort Method: quicksort + -> Custom Scan (ChunkAppend) on metrics (actual rows=11 loops=1) + Order: metrics."time" + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_1_1_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=0 loops=1) + Vectorized Filter: (v0 < 1) + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=0 loops=1) + Filter: (_ts_meta_min_1 < 1) + Rows Removed by Filter: 5 + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + Filter: (v0 < 1) + Rows Removed by Filter: 44 + -> Sort (never executed) + Sort Key: _hyper_1_3_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (never executed) + Vectorized Filter: (v0 < 1) + -> Seq Scan on compress_hyper_5_16_chunk (never executed) + Filter: (_ts_meta_min_1 < 1) +(24 rows) + +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE v0 < device_id +ORDER BY time, + device_id +LIMIT 10; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Incremental Sort (actual rows=10 loops=1) + Sort Key: metrics."time", metrics.device_id + Presorted Key: metrics."time" + Full-sort Groups: 1 Sort Method: quicksort + -> Custom Scan (ChunkAppend) on metrics (actual rows=11 loops=1) + Order: metrics."time" + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_1_1_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=0 loops=1) + Filter: (v0 < device_id) + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=0 loops=1) + Filter: (_ts_meta_min_1 < device_id) + Rows Removed by Filter: 5 + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk (actual rows=11 loops=1) + Filter: (v0 < device_id) + -> Sort (never executed) + Sort Key: _hyper_1_3_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (never executed) + Filter: (v0 < device_id) + -> Seq Scan on compress_hyper_5_16_chunk (never executed) + Filter: (_ts_meta_min_1 < device_id) +(23 rows) + +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE device_id < v0 +ORDER BY time, + device_id +LIMIT 10; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Incremental Sort (actual rows=10 loops=1) + Sort Key: metrics."time", metrics.device_id + Presorted Key: metrics."time" + Full-sort Groups: 1 Sort Method: quicksort + -> Custom Scan (ChunkAppend) on metrics (actual rows=11 loops=1) + Order: metrics."time" + -> Sort (actual rows=11 loops=1) + Sort Key: _hyper_1_1_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=1800 loops=1) + Filter: (device_id < v0) + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=5 loops=1) + Filter: (_ts_meta_max_1 > device_id) + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk (never executed) + Filter: (device_id < v0) + -> Sort (never executed) + Sort Key: _hyper_1_3_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (never executed) + Filter: (device_id < v0) + -> Seq Scan on compress_hyper_5_16_chunk (never executed) + Filter: (_ts_meta_max_1 > device_id) +(22 rows) + +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE v1 = device_id +ORDER BY time, + device_id +LIMIT 10; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=0 loops=1) + -> Incremental Sort (actual rows=0 loops=1) + Sort Key: metrics."time", metrics.device_id + Presorted Key: metrics."time" + Full-sort Groups: 1 Sort Method: quicksort + -> Custom Scan (ChunkAppend) on metrics (actual rows=0 loops=1) + Order: metrics."time" + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_1_1_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=0 loops=1) + Filter: (v1 = device_id) + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=0 loops=1) + Filter: ((_ts_meta_min_2 <= device_id) AND (_ts_meta_max_2 >= device_id)) + Rows Removed by Filter: 5 + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk (actual rows=0 loops=1) + Filter: (v1 = device_id) + Rows Removed by Filter: 2520 + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_1_3_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=0 loops=1) + Filter: (v1 = device_id) + -> Seq Scan on compress_hyper_5_16_chunk (actual rows=0 loops=1) + Filter: ((_ts_meta_min_2 <= device_id) AND (_ts_meta_max_2 >= device_id)) + Rows Removed by Filter: 5 +(26 rows) + +--pushdown between two order by column (not pushed down) +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE v0 = v1 +ORDER BY time, + device_id +LIMIT 10; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=0 loops=1) + -> Incremental Sort (actual rows=0 loops=1) + Sort Key: metrics."time", metrics.device_id + Presorted Key: metrics."time" + Full-sort Groups: 1 Sort Method: quicksort + -> Custom Scan (ChunkAppend) on metrics (actual rows=0 loops=1) + Order: metrics."time" + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_1_1_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=0 loops=1) + Filter: (v0 = v1) + Rows Removed by Filter: 1800 + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=5 loops=1) + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk (actual rows=0 loops=1) + Filter: (v0 = v1) + Rows Removed by Filter: 2520 + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_1_3_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=0 loops=1) + Filter: (v0 = v1) + Rows Removed by Filter: 2520 + -> Seq Scan on compress_hyper_5_16_chunk (actual rows=5 loops=1) +(24 rows) + +--pushdown of quals on order by and segment by cols anded together +:PREFIX_VERBOSE +SELECT * +FROM :TEST_TABLE +WHERE time > '2000-01-01 1:00:00+0' + AND device_id = 1 +ORDER BY time, + device_id +LIMIT 10; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + Output: metrics."time", metrics.device_id, metrics.device_id_peer, metrics.v0, metrics.v1, metrics.v2, metrics.v3 + -> Custom Scan (ChunkAppend) on public.metrics (actual rows=10 loops=1) + Output: metrics."time", metrics.device_id, metrics.device_id_peer, metrics.v0, metrics.v1, metrics.v2, metrics.v3 + Order: metrics."time" + Startup Exclusion: false + Runtime Exclusion: false + -> Sort (actual rows=10 loops=1) + Output: _hyper_1_1_chunk."time", _hyper_1_1_chunk.device_id, _hyper_1_1_chunk.device_id_peer, _hyper_1_1_chunk.v0, _hyper_1_1_chunk.v1, _hyper_1_1_chunk.v2, _hyper_1_1_chunk.v3 + Sort Key: _hyper_1_1_chunk."time" + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (actual rows=356 loops=1) + Output: _hyper_1_1_chunk."time", _hyper_1_1_chunk.device_id, _hyper_1_1_chunk.device_id_peer, _hyper_1_1_chunk.v0, _hyper_1_1_chunk.v1, _hyper_1_1_chunk.v2, _hyper_1_1_chunk.v3 + Vectorized Filter: (_hyper_1_1_chunk."time" > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + Rows Removed by Filter: 4 + Bulk Decompression: true + -> Seq Scan on _timescaledb_internal.compress_hyper_5_15_chunk (actual rows=1 loops=1) + Output: compress_hyper_5_15_chunk."time", compress_hyper_5_15_chunk.device_id, compress_hyper_5_15_chunk.device_id_peer, compress_hyper_5_15_chunk.v0, compress_hyper_5_15_chunk.v1, compress_hyper_5_15_chunk.v2, compress_hyper_5_15_chunk.v3, compress_hyper_5_15_chunk._ts_meta_count, compress_hyper_5_15_chunk._ts_meta_sequence_num, compress_hyper_5_15_chunk._ts_meta_min_3, compress_hyper_5_15_chunk._ts_meta_max_3, compress_hyper_5_15_chunk._ts_meta_min_1, compress_hyper_5_15_chunk._ts_meta_max_1, compress_hyper_5_15_chunk._ts_meta_min_2, compress_hyper_5_15_chunk._ts_meta_max_2 + Filter: ((compress_hyper_5_15_chunk._ts_meta_max_3 > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) AND (compress_hyper_5_15_chunk.device_id = 1)) + Rows Removed by Filter: 4 + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _timescaledb_internal._hyper_1_2_chunk (never executed) + Output: _hyper_1_2_chunk."time", _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.device_id_peer, _hyper_1_2_chunk.v0, _hyper_1_2_chunk.v1, _hyper_1_2_chunk.v2, _hyper_1_2_chunk.v3 + Index Cond: (_hyper_1_2_chunk."time" > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + Filter: (_hyper_1_2_chunk.device_id = 1) + -> Sort (never executed) + Output: _hyper_1_3_chunk."time", _hyper_1_3_chunk.device_id, _hyper_1_3_chunk.device_id_peer, _hyper_1_3_chunk.v0, _hyper_1_3_chunk.v1, _hyper_1_3_chunk.v2, _hyper_1_3_chunk.v3 + Sort Key: _hyper_1_3_chunk."time" + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk (never executed) + Output: _hyper_1_3_chunk."time", _hyper_1_3_chunk.device_id, _hyper_1_3_chunk.device_id_peer, _hyper_1_3_chunk.v0, _hyper_1_3_chunk.v1, _hyper_1_3_chunk.v2, _hyper_1_3_chunk.v3 + Vectorized Filter: (_hyper_1_3_chunk."time" > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + Bulk Decompression: true + -> Seq Scan on _timescaledb_internal.compress_hyper_5_16_chunk (never executed) + Output: compress_hyper_5_16_chunk."time", compress_hyper_5_16_chunk.device_id, compress_hyper_5_16_chunk.device_id_peer, compress_hyper_5_16_chunk.v0, compress_hyper_5_16_chunk.v1, compress_hyper_5_16_chunk.v2, compress_hyper_5_16_chunk.v3, compress_hyper_5_16_chunk._ts_meta_count, compress_hyper_5_16_chunk._ts_meta_sequence_num, compress_hyper_5_16_chunk._ts_meta_min_3, compress_hyper_5_16_chunk._ts_meta_max_3, compress_hyper_5_16_chunk._ts_meta_min_1, compress_hyper_5_16_chunk._ts_meta_max_1, compress_hyper_5_16_chunk._ts_meta_min_2, compress_hyper_5_16_chunk._ts_meta_max_2 + Filter: ((compress_hyper_5_16_chunk._ts_meta_max_3 > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) AND (compress_hyper_5_16_chunk.device_id = 1)) +(34 rows) + +--pushdown of quals on order by and segment by cols or together (not pushed down) +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE time > '2000-01-01 1:00:00+0' + OR device_id = 1 +ORDER BY time, + device_id +LIMIT 10; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Incremental Sort (actual rows=10 loops=1) + Sort Key: metrics."time", metrics.device_id + Presorted Key: metrics."time" + Full-sort Groups: 1 Sort Method: quicksort + -> Custom Scan (ChunkAppend) on metrics (actual rows=15 loops=1) + Order: metrics."time" + -> Sort (actual rows=15 loops=1) + Sort Key: _hyper_1_1_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=1784 loops=1) + Filter: (("time" > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) OR (device_id = 1)) + Rows Removed by Filter: 16 + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=5 loops=1) + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk (never executed) + Filter: (("time" > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) OR (device_id = 1)) + -> Sort (never executed) + Sort Key: _hyper_1_3_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (never executed) + Filter: (("time" > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) OR (device_id = 1)) + -> Seq Scan on compress_hyper_5_16_chunk (never executed) +(21 rows) + +--functions not yet optimized +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE time < now() +ORDER BY time, + device_id +LIMIT 10; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Incremental Sort (actual rows=10 loops=1) + Sort Key: metrics."time", metrics.device_id + Presorted Key: metrics."time" + Full-sort Groups: 1 Sort Method: quicksort + -> Custom Scan (ChunkAppend) on metrics (actual rows=11 loops=1) + Order: metrics."time" + Chunks excluded during startup: 0 + -> Sort (actual rows=11 loops=1) + Sort Key: _hyper_1_1_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=1800 loops=1) + Filter: ("time" < now()) + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=5 loops=1) + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk (never executed) + Index Cond: ("time" < now()) + -> Sort (never executed) + Sort Key: _hyper_1_3_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (never executed) + Filter: ("time" < now()) + -> Seq Scan on compress_hyper_5_16_chunk (never executed) +(21 rows) + +-- test sort optimization interaction +:PREFIX +SELECT time +FROM :TEST_TABLE +ORDER BY time DESC +LIMIT 10; + QUERY PLAN +---------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Custom Scan (ChunkAppend) on metrics (actual rows=10 loops=1) + Order: metrics."time" DESC + -> Sort (actual rows=10 loops=1) + Sort Key: _hyper_1_3_chunk."time" DESC + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=2520 loops=1) + -> Seq Scan on compress_hyper_5_16_chunk (actual rows=5 loops=1) + -> Index Only Scan using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk (never executed) + Heap Fetches: 0 + -> Sort (never executed) + Sort Key: _hyper_1_1_chunk."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (never executed) + -> Seq Scan on compress_hyper_5_15_chunk (never executed) +(14 rows) + +:PREFIX +SELECT time, + device_id +FROM :TEST_TABLE +ORDER BY time DESC, + device_id +LIMIT 10; + QUERY PLAN +----------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Incremental Sort (actual rows=10 loops=1) + Sort Key: metrics."time" DESC, metrics.device_id + Presorted Key: metrics."time" + Full-sort Groups: 1 Sort Method: quicksort + -> Custom Scan (ChunkAppend) on metrics (actual rows=11 loops=1) + Order: metrics."time" DESC + -> Sort (actual rows=11 loops=1) + Sort Key: _hyper_1_3_chunk."time" DESC + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=2520 loops=1) + -> Seq Scan on compress_hyper_5_16_chunk (actual rows=5 loops=1) + -> Index Scan using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk (never executed) + -> Sort (never executed) + Sort Key: _hyper_1_1_chunk."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (never executed) + -> Seq Scan on compress_hyper_5_15_chunk (never executed) +(17 rows) + +:PREFIX +SELECT time, + device_id +FROM :TEST_TABLE +ORDER BY device_id, + time DESC +LIMIT 10; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Limit (actual rows=10 loops=1) + -> Sort (actual rows=10 loops=1) + Sort Key: _hyper_1_1_chunk.device_id, _hyper_1_1_chunk."time" DESC + Sort Method: top-N heapsort + -> Append (actual rows=6840 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=1800 loops=1) + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=5 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2520 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=2520 loops=1) + -> Seq Scan on compress_hyper_5_16_chunk (actual rows=5 loops=1) +(10 rows) + +-- +-- test ordered path +-- +-- should not produce ordered path +:PREFIX_VERBOSE +SELECT * +FROM :TEST_TABLE +WHERE time > '2000-01-08' +ORDER BY time, + device_id; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Incremental Sort (actual rows=4195 loops=1) + Output: metrics."time", metrics.device_id, metrics.device_id_peer, metrics.v0, metrics.v1, metrics.v2, metrics.v3 + Sort Key: metrics."time", metrics.device_id + Presorted Key: metrics."time" + Full-sort Groups: 120 Sort Method: quicksort + -> Custom Scan (ChunkAppend) on public.metrics (actual rows=4195 loops=1) + Output: metrics."time", metrics.device_id, metrics.device_id_peer, metrics.v0, metrics.v1, metrics.v2, metrics.v3 + Order: metrics."time" + Startup Exclusion: false + Runtime Exclusion: false + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _timescaledb_internal._hyper_1_2_chunk (actual rows=1675 loops=1) + Output: _hyper_1_2_chunk."time", _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.device_id_peer, _hyper_1_2_chunk.v0, _hyper_1_2_chunk.v1, _hyper_1_2_chunk.v2, _hyper_1_2_chunk.v3 + Index Cond: (_hyper_1_2_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Sort (actual rows=2520 loops=1) + Output: _hyper_1_3_chunk."time", _hyper_1_3_chunk.device_id, _hyper_1_3_chunk.device_id_peer, _hyper_1_3_chunk.v0, _hyper_1_3_chunk.v1, _hyper_1_3_chunk.v2, _hyper_1_3_chunk.v3 + Sort Key: _hyper_1_3_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk (actual rows=2520 loops=1) + Output: _hyper_1_3_chunk."time", _hyper_1_3_chunk.device_id, _hyper_1_3_chunk.device_id_peer, _hyper_1_3_chunk.v0, _hyper_1_3_chunk.v1, _hyper_1_3_chunk.v2, _hyper_1_3_chunk.v3 + Vectorized Filter: (_hyper_1_3_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Bulk Decompression: true + -> Seq Scan on _timescaledb_internal.compress_hyper_5_16_chunk (actual rows=5 loops=1) + Output: compress_hyper_5_16_chunk."time", compress_hyper_5_16_chunk.device_id, compress_hyper_5_16_chunk.device_id_peer, compress_hyper_5_16_chunk.v0, compress_hyper_5_16_chunk.v1, compress_hyper_5_16_chunk.v2, compress_hyper_5_16_chunk.v3, compress_hyper_5_16_chunk._ts_meta_count, compress_hyper_5_16_chunk._ts_meta_sequence_num, compress_hyper_5_16_chunk._ts_meta_min_3, compress_hyper_5_16_chunk._ts_meta_max_3, compress_hyper_5_16_chunk._ts_meta_min_1, compress_hyper_5_16_chunk._ts_meta_max_1, compress_hyper_5_16_chunk._ts_meta_min_2, compress_hyper_5_16_chunk._ts_meta_max_2 + Filter: (compress_hyper_5_16_chunk._ts_meta_max_3 > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) +(24 rows) + +-- should produce ordered path +:PREFIX_VERBOSE +SELECT * +FROM :TEST_TABLE +WHERE time > '2000-01-08' +ORDER BY device_id, + device_id_peer, + v0, + v1 DESC, + time; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Append (actual rows=4195 loops=1) + Sort Key: _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.device_id_peer, _hyper_1_2_chunk.v0, _hyper_1_2_chunk.v1 DESC, _hyper_1_2_chunk."time" + -> Sort (actual rows=1675 loops=1) + Output: _hyper_1_2_chunk."time", _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.device_id_peer, _hyper_1_2_chunk.v0, _hyper_1_2_chunk.v1, _hyper_1_2_chunk.v2, _hyper_1_2_chunk.v3 + Sort Key: _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.device_id_peer, _hyper_1_2_chunk.v0, _hyper_1_2_chunk.v1 DESC, _hyper_1_2_chunk."time" + Sort Method: quicksort + -> Seq Scan on _timescaledb_internal._hyper_1_2_chunk (actual rows=1675 loops=1) + Output: _hyper_1_2_chunk."time", _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.device_id_peer, _hyper_1_2_chunk.v0, _hyper_1_2_chunk.v1, _hyper_1_2_chunk.v2, _hyper_1_2_chunk.v3 + Filter: (_hyper_1_2_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 845 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk (actual rows=2520 loops=1) + Output: _hyper_1_3_chunk."time", _hyper_1_3_chunk.device_id, _hyper_1_3_chunk.device_id_peer, _hyper_1_3_chunk.v0, _hyper_1_3_chunk.v1, _hyper_1_3_chunk.v2, _hyper_1_3_chunk.v3 + Vectorized Filter: (_hyper_1_3_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Bulk Decompression: true + -> Index Scan using compress_hyper_5_16_chunk__compressed_hypertable_5_device_id_de on _timescaledb_internal.compress_hyper_5_16_chunk (actual rows=5 loops=1) + Output: compress_hyper_5_16_chunk."time", compress_hyper_5_16_chunk.device_id, compress_hyper_5_16_chunk.device_id_peer, compress_hyper_5_16_chunk.v0, compress_hyper_5_16_chunk.v1, compress_hyper_5_16_chunk.v2, compress_hyper_5_16_chunk.v3, compress_hyper_5_16_chunk._ts_meta_count, compress_hyper_5_16_chunk._ts_meta_sequence_num, compress_hyper_5_16_chunk._ts_meta_min_3, compress_hyper_5_16_chunk._ts_meta_max_3, compress_hyper_5_16_chunk._ts_meta_min_1, compress_hyper_5_16_chunk._ts_meta_max_1, compress_hyper_5_16_chunk._ts_meta_min_2, compress_hyper_5_16_chunk._ts_meta_max_2 + Filter: (compress_hyper_5_16_chunk._ts_meta_max_3 > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) +(17 rows) + +-- test order by columns not in targetlist +:PREFIX_VERBOSE +SELECT device_id, + device_id_peer +FROM :TEST_TABLE +WHERE time > '2000-01-08' +ORDER BY device_id, + device_id_peer, + v0, + v1 DESC, + time +LIMIT 100; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=100 loops=1) + Output: _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.device_id_peer, _hyper_1_2_chunk.v0, _hyper_1_2_chunk.v1, _hyper_1_2_chunk."time" + -> Merge Append (actual rows=100 loops=1) + Sort Key: _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.device_id_peer, _hyper_1_2_chunk.v0, _hyper_1_2_chunk.v1 DESC, _hyper_1_2_chunk."time" + -> Sort (actual rows=100 loops=1) + Output: _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.device_id_peer, _hyper_1_2_chunk.v0, _hyper_1_2_chunk.v1, _hyper_1_2_chunk."time" + Sort Key: _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.device_id_peer, _hyper_1_2_chunk.v0, _hyper_1_2_chunk.v1 DESC, _hyper_1_2_chunk."time" + Sort Method: top-N heapsort + -> Seq Scan on _timescaledb_internal._hyper_1_2_chunk (actual rows=1675 loops=1) + Output: _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.device_id_peer, _hyper_1_2_chunk.v0, _hyper_1_2_chunk.v1, _hyper_1_2_chunk."time" + Filter: (_hyper_1_2_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 845 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk (actual rows=1 loops=1) + Output: _hyper_1_3_chunk.device_id, _hyper_1_3_chunk.device_id_peer, _hyper_1_3_chunk.v0, _hyper_1_3_chunk.v1, _hyper_1_3_chunk."time" + Vectorized Filter: (_hyper_1_3_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Bulk Decompression: true + -> Index Scan using compress_hyper_5_16_chunk__compressed_hypertable_5_device_id_de on _timescaledb_internal.compress_hyper_5_16_chunk (actual rows=1 loops=1) + Output: compress_hyper_5_16_chunk."time", compress_hyper_5_16_chunk.device_id, compress_hyper_5_16_chunk.device_id_peer, compress_hyper_5_16_chunk.v0, compress_hyper_5_16_chunk.v1, compress_hyper_5_16_chunk.v2, compress_hyper_5_16_chunk.v3, compress_hyper_5_16_chunk._ts_meta_count, compress_hyper_5_16_chunk._ts_meta_sequence_num, compress_hyper_5_16_chunk._ts_meta_min_3, compress_hyper_5_16_chunk._ts_meta_max_3, compress_hyper_5_16_chunk._ts_meta_min_1, compress_hyper_5_16_chunk._ts_meta_max_1, compress_hyper_5_16_chunk._ts_meta_min_2, compress_hyper_5_16_chunk._ts_meta_max_2 + Filter: (compress_hyper_5_16_chunk._ts_meta_max_3 > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) +(19 rows) + +-- test ordering only by segmentby columns +-- should produce ordered path and not have sequence number in targetlist of compressed scan +:PREFIX_VERBOSE +SELECT device_id, + device_id_peer +FROM :TEST_TABLE +WHERE time > '2000-01-08' +ORDER BY device_id, + device_id_peer +LIMIT 100; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=100 loops=1) + Output: _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.device_id_peer + -> Merge Append (actual rows=100 loops=1) + Sort Key: _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.device_id_peer + -> Sort (actual rows=100 loops=1) + Output: _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.device_id_peer + Sort Key: _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.device_id_peer + Sort Method: top-N heapsort + -> Seq Scan on _timescaledb_internal._hyper_1_2_chunk (actual rows=1675 loops=1) + Output: _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.device_id_peer + Filter: (_hyper_1_2_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 845 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk (actual rows=1 loops=1) + Output: _hyper_1_3_chunk.device_id, _hyper_1_3_chunk.device_id_peer + Vectorized Filter: (_hyper_1_3_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Bulk Decompression: true + -> Index Scan using compress_hyper_5_16_chunk__compressed_hypertable_5_device_id_de on _timescaledb_internal.compress_hyper_5_16_chunk (actual rows=1 loops=1) + Output: compress_hyper_5_16_chunk."time", compress_hyper_5_16_chunk.device_id, compress_hyper_5_16_chunk.device_id_peer, compress_hyper_5_16_chunk.v0, compress_hyper_5_16_chunk.v1, compress_hyper_5_16_chunk.v2, compress_hyper_5_16_chunk.v3, compress_hyper_5_16_chunk._ts_meta_count, compress_hyper_5_16_chunk._ts_meta_sequence_num, compress_hyper_5_16_chunk._ts_meta_min_3, compress_hyper_5_16_chunk._ts_meta_max_3, compress_hyper_5_16_chunk._ts_meta_min_1, compress_hyper_5_16_chunk._ts_meta_max_1, compress_hyper_5_16_chunk._ts_meta_min_2, compress_hyper_5_16_chunk._ts_meta_max_2 + Filter: (compress_hyper_5_16_chunk._ts_meta_max_3 > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) +(19 rows) + +-- should produce ordered path +-- only referencing PREFIX_VERBOSE should work +:PREFIX_VERBOSE +SELECT device_id, + device_id_peer, + v0 +FROM :TEST_TABLE +WHERE time > '2000-01-08' +ORDER BY device_id, + device_id_peer, + v0; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Append (actual rows=4195 loops=1) + Sort Key: _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.device_id_peer, _hyper_1_2_chunk.v0 + -> Sort (actual rows=1675 loops=1) + Output: _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.device_id_peer, _hyper_1_2_chunk.v0 + Sort Key: _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.device_id_peer, _hyper_1_2_chunk.v0 + Sort Method: quicksort + -> Seq Scan on _timescaledb_internal._hyper_1_2_chunk (actual rows=1675 loops=1) + Output: _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.device_id_peer, _hyper_1_2_chunk.v0 + Filter: (_hyper_1_2_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 845 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk (actual rows=2520 loops=1) + Output: _hyper_1_3_chunk.device_id, _hyper_1_3_chunk.device_id_peer, _hyper_1_3_chunk.v0 + Vectorized Filter: (_hyper_1_3_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Bulk Decompression: true + -> Index Scan using compress_hyper_5_16_chunk__compressed_hypertable_5_device_id_de on _timescaledb_internal.compress_hyper_5_16_chunk (actual rows=5 loops=1) + Output: compress_hyper_5_16_chunk."time", compress_hyper_5_16_chunk.device_id, compress_hyper_5_16_chunk.device_id_peer, compress_hyper_5_16_chunk.v0, compress_hyper_5_16_chunk.v1, compress_hyper_5_16_chunk.v2, compress_hyper_5_16_chunk.v3, compress_hyper_5_16_chunk._ts_meta_count, compress_hyper_5_16_chunk._ts_meta_sequence_num, compress_hyper_5_16_chunk._ts_meta_min_3, compress_hyper_5_16_chunk._ts_meta_max_3, compress_hyper_5_16_chunk._ts_meta_min_1, compress_hyper_5_16_chunk._ts_meta_max_1, compress_hyper_5_16_chunk._ts_meta_min_2, compress_hyper_5_16_chunk._ts_meta_max_2 + Filter: (compress_hyper_5_16_chunk._ts_meta_max_3 > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) +(17 rows) + +-- should produce ordered path +-- only referencing PREFIX_VERBOSE should work +:PREFIX_VERBOSE +SELECT device_id, + device_id_peer, + v0, + v1 +FROM :TEST_TABLE +WHERE time > '2000-01-08' +ORDER BY device_id, + device_id_peer, + v0, + v1 DESC; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Append (actual rows=4195 loops=1) + Sort Key: _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.device_id_peer, _hyper_1_2_chunk.v0, _hyper_1_2_chunk.v1 DESC + -> Sort (actual rows=1675 loops=1) + Output: _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.device_id_peer, _hyper_1_2_chunk.v0, _hyper_1_2_chunk.v1 + Sort Key: _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.device_id_peer, _hyper_1_2_chunk.v0, _hyper_1_2_chunk.v1 DESC + Sort Method: quicksort + -> Seq Scan on _timescaledb_internal._hyper_1_2_chunk (actual rows=1675 loops=1) + Output: _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.device_id_peer, _hyper_1_2_chunk.v0, _hyper_1_2_chunk.v1 + Filter: (_hyper_1_2_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 845 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk (actual rows=2520 loops=1) + Output: _hyper_1_3_chunk.device_id, _hyper_1_3_chunk.device_id_peer, _hyper_1_3_chunk.v0, _hyper_1_3_chunk.v1 + Vectorized Filter: (_hyper_1_3_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Bulk Decompression: true + -> Index Scan using compress_hyper_5_16_chunk__compressed_hypertable_5_device_id_de on _timescaledb_internal.compress_hyper_5_16_chunk (actual rows=5 loops=1) + Output: compress_hyper_5_16_chunk."time", compress_hyper_5_16_chunk.device_id, compress_hyper_5_16_chunk.device_id_peer, compress_hyper_5_16_chunk.v0, compress_hyper_5_16_chunk.v1, compress_hyper_5_16_chunk.v2, compress_hyper_5_16_chunk.v3, compress_hyper_5_16_chunk._ts_meta_count, compress_hyper_5_16_chunk._ts_meta_sequence_num, compress_hyper_5_16_chunk._ts_meta_min_3, compress_hyper_5_16_chunk._ts_meta_max_3, compress_hyper_5_16_chunk._ts_meta_min_1, compress_hyper_5_16_chunk._ts_meta_max_1, compress_hyper_5_16_chunk._ts_meta_min_2, compress_hyper_5_16_chunk._ts_meta_max_2 + Filter: (compress_hyper_5_16_chunk._ts_meta_max_3 > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) +(17 rows) + +-- should not produce ordered path +:PREFIX_VERBOSE +SELECT * +FROM :TEST_TABLE +WHERE time > '2000-01-08' +ORDER BY device_id, + device_id_peer, + v0, + v1 DESC, + time, + v3; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort (actual rows=4195 loops=1) + Output: _hyper_1_2_chunk."time", _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.device_id_peer, _hyper_1_2_chunk.v0, _hyper_1_2_chunk.v1, _hyper_1_2_chunk.v2, _hyper_1_2_chunk.v3 + Sort Key: _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.device_id_peer, _hyper_1_2_chunk.v0, _hyper_1_2_chunk.v1 DESC, _hyper_1_2_chunk."time", _hyper_1_2_chunk.v3 + Sort Method: quicksort + -> Append (actual rows=4195 loops=1) + -> Seq Scan on _timescaledb_internal._hyper_1_2_chunk (actual rows=1675 loops=1) + Output: _hyper_1_2_chunk."time", _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.device_id_peer, _hyper_1_2_chunk.v0, _hyper_1_2_chunk.v1, _hyper_1_2_chunk.v2, _hyper_1_2_chunk.v3 + Filter: (_hyper_1_2_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 845 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk (actual rows=2520 loops=1) + Output: _hyper_1_3_chunk."time", _hyper_1_3_chunk.device_id, _hyper_1_3_chunk.device_id_peer, _hyper_1_3_chunk.v0, _hyper_1_3_chunk.v1, _hyper_1_3_chunk.v2, _hyper_1_3_chunk.v3 + Vectorized Filter: (_hyper_1_3_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Bulk Decompression: true + -> Seq Scan on _timescaledb_internal.compress_hyper_5_16_chunk (actual rows=5 loops=1) + Output: compress_hyper_5_16_chunk."time", compress_hyper_5_16_chunk.device_id, compress_hyper_5_16_chunk.device_id_peer, compress_hyper_5_16_chunk.v0, compress_hyper_5_16_chunk.v1, compress_hyper_5_16_chunk.v2, compress_hyper_5_16_chunk.v3, compress_hyper_5_16_chunk._ts_meta_count, compress_hyper_5_16_chunk._ts_meta_sequence_num, compress_hyper_5_16_chunk._ts_meta_min_3, compress_hyper_5_16_chunk._ts_meta_max_3, compress_hyper_5_16_chunk._ts_meta_min_1, compress_hyper_5_16_chunk._ts_meta_max_1, compress_hyper_5_16_chunk._ts_meta_min_2, compress_hyper_5_16_chunk._ts_meta_max_2 + Filter: (compress_hyper_5_16_chunk._ts_meta_max_3 > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) +(16 rows) + +-- should produce ordered path +-- ASC/DESC for segmentby columns can be pushed down +:PREFIX_VERBOSE +SELECT * +FROM :TEST_TABLE +WHERE time > '2000-01-08' +ORDER BY device_id DESC, + device_id_peer DESC, + v0, + v1 DESC, + time; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Append (actual rows=4195 loops=1) + Sort Key: _hyper_1_2_chunk.device_id DESC, _hyper_1_2_chunk.device_id_peer DESC, _hyper_1_2_chunk.v0, _hyper_1_2_chunk.v1 DESC, _hyper_1_2_chunk."time" + -> Sort (actual rows=1675 loops=1) + Output: _hyper_1_2_chunk."time", _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.device_id_peer, _hyper_1_2_chunk.v0, _hyper_1_2_chunk.v1, _hyper_1_2_chunk.v2, _hyper_1_2_chunk.v3 + Sort Key: _hyper_1_2_chunk.device_id DESC, _hyper_1_2_chunk.device_id_peer DESC, _hyper_1_2_chunk.v0, _hyper_1_2_chunk.v1 DESC, _hyper_1_2_chunk."time" + Sort Method: quicksort + -> Seq Scan on _timescaledb_internal._hyper_1_2_chunk (actual rows=1675 loops=1) + Output: _hyper_1_2_chunk."time", _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.device_id_peer, _hyper_1_2_chunk.v0, _hyper_1_2_chunk.v1, _hyper_1_2_chunk.v2, _hyper_1_2_chunk.v3 + Filter: (_hyper_1_2_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 845 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk (actual rows=2520 loops=1) + Output: _hyper_1_3_chunk."time", _hyper_1_3_chunk.device_id, _hyper_1_3_chunk.device_id_peer, _hyper_1_3_chunk.v0, _hyper_1_3_chunk.v1, _hyper_1_3_chunk.v2, _hyper_1_3_chunk.v3 + Vectorized Filter: (_hyper_1_3_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Bulk Decompression: true + -> Sort (actual rows=5 loops=1) + Output: compress_hyper_5_16_chunk."time", compress_hyper_5_16_chunk.device_id, compress_hyper_5_16_chunk.device_id_peer, compress_hyper_5_16_chunk.v0, compress_hyper_5_16_chunk.v1, compress_hyper_5_16_chunk.v2, compress_hyper_5_16_chunk.v3, compress_hyper_5_16_chunk._ts_meta_count, compress_hyper_5_16_chunk._ts_meta_sequence_num, compress_hyper_5_16_chunk._ts_meta_min_3, compress_hyper_5_16_chunk._ts_meta_max_3, compress_hyper_5_16_chunk._ts_meta_min_1, compress_hyper_5_16_chunk._ts_meta_max_1, compress_hyper_5_16_chunk._ts_meta_min_2, compress_hyper_5_16_chunk._ts_meta_max_2 + Sort Key: compress_hyper_5_16_chunk.device_id DESC, compress_hyper_5_16_chunk.device_id_peer DESC, compress_hyper_5_16_chunk._ts_meta_sequence_num + Sort Method: quicksort + -> Seq Scan on _timescaledb_internal.compress_hyper_5_16_chunk (actual rows=5 loops=1) + Output: compress_hyper_5_16_chunk."time", compress_hyper_5_16_chunk.device_id, compress_hyper_5_16_chunk.device_id_peer, compress_hyper_5_16_chunk.v0, compress_hyper_5_16_chunk.v1, compress_hyper_5_16_chunk.v2, compress_hyper_5_16_chunk.v3, compress_hyper_5_16_chunk._ts_meta_count, compress_hyper_5_16_chunk._ts_meta_sequence_num, compress_hyper_5_16_chunk._ts_meta_min_3, compress_hyper_5_16_chunk._ts_meta_max_3, compress_hyper_5_16_chunk._ts_meta_min_1, compress_hyper_5_16_chunk._ts_meta_max_1, compress_hyper_5_16_chunk._ts_meta_min_2, compress_hyper_5_16_chunk._ts_meta_max_2 + Filter: (compress_hyper_5_16_chunk._ts_meta_max_3 > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) +(21 rows) + +-- should not produce ordered path +:PREFIX_VERBOSE +SELECT * +FROM :TEST_TABLE +WHERE time > '2000-01-08' +ORDER BY device_id DESC, + device_id_peer DESC, + v0, + v1, + time; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort (actual rows=4195 loops=1) + Output: _hyper_1_2_chunk."time", _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.device_id_peer, _hyper_1_2_chunk.v0, _hyper_1_2_chunk.v1, _hyper_1_2_chunk.v2, _hyper_1_2_chunk.v3 + Sort Key: _hyper_1_2_chunk.device_id DESC, _hyper_1_2_chunk.device_id_peer DESC, _hyper_1_2_chunk.v0, _hyper_1_2_chunk.v1, _hyper_1_2_chunk."time" + Sort Method: quicksort + -> Append (actual rows=4195 loops=1) + -> Seq Scan on _timescaledb_internal._hyper_1_2_chunk (actual rows=1675 loops=1) + Output: _hyper_1_2_chunk."time", _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.device_id_peer, _hyper_1_2_chunk.v0, _hyper_1_2_chunk.v1, _hyper_1_2_chunk.v2, _hyper_1_2_chunk.v3 + Filter: (_hyper_1_2_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 845 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk (actual rows=2520 loops=1) + Output: _hyper_1_3_chunk."time", _hyper_1_3_chunk.device_id, _hyper_1_3_chunk.device_id_peer, _hyper_1_3_chunk.v0, _hyper_1_3_chunk.v1, _hyper_1_3_chunk.v2, _hyper_1_3_chunk.v3 + Vectorized Filter: (_hyper_1_3_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Bulk Decompression: true + -> Seq Scan on _timescaledb_internal.compress_hyper_5_16_chunk (actual rows=5 loops=1) + Output: compress_hyper_5_16_chunk."time", compress_hyper_5_16_chunk.device_id, compress_hyper_5_16_chunk.device_id_peer, compress_hyper_5_16_chunk.v0, compress_hyper_5_16_chunk.v1, compress_hyper_5_16_chunk.v2, compress_hyper_5_16_chunk.v3, compress_hyper_5_16_chunk._ts_meta_count, compress_hyper_5_16_chunk._ts_meta_sequence_num, compress_hyper_5_16_chunk._ts_meta_min_3, compress_hyper_5_16_chunk._ts_meta_max_3, compress_hyper_5_16_chunk._ts_meta_min_1, compress_hyper_5_16_chunk._ts_meta_max_1, compress_hyper_5_16_chunk._ts_meta_min_2, compress_hyper_5_16_chunk._ts_meta_max_2 + Filter: (compress_hyper_5_16_chunk._ts_meta_max_3 > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) +(16 rows) + +-- +-- test constraint exclusion +-- +-- test plan time exclusion +-- first chunk should be excluded +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE time > '2000-01-08' +ORDER BY time, + device_id; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------ + Incremental Sort (actual rows=4195 loops=1) + Sort Key: metrics."time", metrics.device_id + Presorted Key: metrics."time" + Full-sort Groups: 120 Sort Method: quicksort + -> Custom Scan (ChunkAppend) on metrics (actual rows=4195 loops=1) + Order: metrics."time" + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk (actual rows=1675 loops=1) + Index Cond: ("time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Sort (actual rows=2520 loops=1) + Sort Key: _hyper_1_3_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=2520 loops=1) + Vectorized Filter: ("time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Seq Scan on compress_hyper_5_16_chunk (actual rows=5 loops=1) + Filter: (_ts_meta_max_3 > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) +(15 rows) + +-- test runtime exclusion +-- first chunk should be excluded +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE time > '2000-01-08'::text::timestamptz +ORDER BY time, + device_id; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------ + Incremental Sort (actual rows=4195 loops=1) + Sort Key: metrics."time", metrics.device_id + Presorted Key: metrics."time" + Full-sort Groups: 120 Sort Method: quicksort + -> Custom Scan (ChunkAppend) on metrics (actual rows=4195 loops=1) + Order: metrics."time" + Chunks excluded during startup: 1 + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk (actual rows=1675 loops=1) + Index Cond: ("time" > ('2000-01-08'::cstring)::timestamp with time zone) + -> Sort (actual rows=2520 loops=1) + Sort Key: _hyper_1_3_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=2520 loops=1) + Filter: ("time" > ('2000-01-08'::cstring)::timestamp with time zone) + -> Seq Scan on compress_hyper_5_16_chunk (actual rows=5 loops=1) + Filter: (_ts_meta_max_3 > ('2000-01-08'::cstring)::timestamp with time zone) +(16 rows) + +-- test aggregate +:PREFIX +SELECT count(*) +FROM :TEST_TABLE; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Finalize Aggregate (actual rows=1 loops=1) + -> Append (actual rows=3 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=1800 loops=1) + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=5 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2520 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=2520 loops=1) + -> Seq Scan on compress_hyper_5_16_chunk (actual rows=5 loops=1) +(10 rows) + +-- test aggregate with GROUP BY +-- Disable hash aggregation to get a deterministic test output +SET enable_hashagg = OFF; +:PREFIX +SELECT count(*) +FROM :TEST_TABLE +GROUP BY device_id +ORDER BY device_id; + QUERY PLAN +------------------------------------------------------------------------------------------------------------ + Finalize GroupAggregate (actual rows=5 loops=1) + Group Key: _hyper_1_1_chunk.device_id + -> Sort (actual rows=15 loops=1) + Sort Key: _hyper_1_1_chunk.device_id + Sort Method: quicksort + -> Append (actual rows=15 loops=1) + -> Partial GroupAggregate (actual rows=5 loops=1) + Group Key: _hyper_1_1_chunk.device_id + -> Sort (actual rows=1800 loops=1) + Sort Key: _hyper_1_1_chunk.device_id + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=1800 loops=1) + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=5 loops=1) + -> Partial GroupAggregate (actual rows=5 loops=1) + Group Key: _hyper_1_2_chunk.device_id + -> Sort (actual rows=2520 loops=1) + Sort Key: _hyper_1_2_chunk.device_id + Sort Method: quicksort + -> Seq Scan on _hyper_1_2_chunk (actual rows=2520 loops=1) + -> Partial GroupAggregate (actual rows=5 loops=1) + Group Key: _hyper_1_3_chunk.device_id + -> Sort (actual rows=2520 loops=1) + Sort Key: _hyper_1_3_chunk.device_id + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=2520 loops=1) + -> Seq Scan on compress_hyper_5_16_chunk (actual rows=5 loops=1) +(26 rows) + +-- test window functions with GROUP BY +:PREFIX +SELECT sum(count(*)) OVER () +FROM :TEST_TABLE +GROUP BY device_id +ORDER BY device_id; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------ + WindowAgg (actual rows=5 loops=1) + -> Finalize GroupAggregate (actual rows=5 loops=1) + Group Key: _hyper_1_1_chunk.device_id + -> Sort (actual rows=15 loops=1) + Sort Key: _hyper_1_1_chunk.device_id + Sort Method: quicksort + -> Append (actual rows=15 loops=1) + -> Partial GroupAggregate (actual rows=5 loops=1) + Group Key: _hyper_1_1_chunk.device_id + -> Sort (actual rows=1800 loops=1) + Sort Key: _hyper_1_1_chunk.device_id + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=1800 loops=1) + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=5 loops=1) + -> Partial GroupAggregate (actual rows=5 loops=1) + Group Key: _hyper_1_2_chunk.device_id + -> Sort (actual rows=2520 loops=1) + Sort Key: _hyper_1_2_chunk.device_id + Sort Method: quicksort + -> Seq Scan on _hyper_1_2_chunk (actual rows=2520 loops=1) + -> Partial GroupAggregate (actual rows=5 loops=1) + Group Key: _hyper_1_3_chunk.device_id + -> Sort (actual rows=2520 loops=1) + Sort Key: _hyper_1_3_chunk.device_id + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=2520 loops=1) + -> Seq Scan on compress_hyper_5_16_chunk (actual rows=5 loops=1) +(27 rows) + +SET enable_hashagg = ON; +-- test CTE +:PREFIX WITH q AS ( + SELECT v1 + FROM :TEST_TABLE + ORDER BY time +) +SELECT * +FROM q +ORDER BY v1; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------ + Sort (actual rows=6840 loops=1) + Sort Key: q.v1 + Sort Method: quicksort + -> Subquery Scan on q (actual rows=6840 loops=1) + -> Custom Scan (ChunkAppend) on metrics (actual rows=6840 loops=1) + Order: metrics."time" + -> Sort (actual rows=1800 loops=1) + Sort Key: _hyper_1_1_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=1800 loops=1) + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=5 loops=1) + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk (actual rows=2520 loops=1) + -> Sort (actual rows=2520 loops=1) + Sort Key: _hyper_1_3_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=2520 loops=1) + -> Seq Scan on compress_hyper_5_16_chunk (actual rows=5 loops=1) +(17 rows) + +-- test CTE join +:PREFIX WITH q1 AS ( + SELECT time, + v1 + FROM :TEST_TABLE + WHERE device_id = 1 + ORDER BY time +), +q2 AS ( + SELECT time, + v2 + FROM :TEST_TABLE + WHERE device_id = 2 + ORDER BY time +) +SELECT * +FROM q1 + INNER JOIN q2 ON q1.time = q2.time +ORDER BY q1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------ + Merge Join (actual rows=1368 loops=1) + Merge Cond: (metrics."time" = metrics_1."time") + -> Custom Scan (ChunkAppend) on metrics (actual rows=1368 loops=1) + Order: metrics."time" + -> Sort (actual rows=360 loops=1) + Sort Key: _hyper_1_1_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=1 loops=1) + Filter: (device_id = 1) + Rows Removed by Filter: 4 + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk (actual rows=504 loops=1) + Filter: (device_id = 1) + Rows Removed by Filter: 2016 + -> Sort (actual rows=504 loops=1) + Sort Key: _hyper_1_3_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=504 loops=1) + -> Seq Scan on compress_hyper_5_16_chunk (actual rows=1 loops=1) + Filter: (device_id = 1) + Rows Removed by Filter: 4 + -> Materialize (actual rows=1368 loops=1) + -> Custom Scan (ChunkAppend) on metrics metrics_1 (actual rows=1368 loops=1) + Order: metrics_1."time" + -> Sort (actual rows=360 loops=1) + Sort Key: _hyper_1_1_chunk_1."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk _hyper_1_1_chunk_1 (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_5_15_chunk compress_hyper_5_15_chunk_1 (actual rows=1 loops=1) + Filter: (device_id = 2) + Rows Removed by Filter: 4 + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk _hyper_1_2_chunk_1 (actual rows=504 loops=1) + Filter: (device_id = 2) + Rows Removed by Filter: 2016 + -> Sort (actual rows=504 loops=1) + Sort Key: _hyper_1_3_chunk_1."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk _hyper_1_3_chunk_1 (actual rows=504 loops=1) + -> Seq Scan on compress_hyper_5_16_chunk compress_hyper_5_16_chunk_1 (actual rows=1 loops=1) + Filter: (device_id = 2) + Rows Removed by Filter: 4 +(41 rows) + +-- test prepared statement +PREPARE prep AS +SELECT count(time) +FROM :TEST_TABLE +WHERE device_id = 1; +:PREFIX EXECUTE prep; + QUERY PLAN +----------------------------------------------------------------------------------------------- + Finalize Aggregate (actual rows=1 loops=1) + -> Append (actual rows=3 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=1 loops=1) + Filter: (device_id = 1) + Rows Removed by Filter: 4 + -> Partial Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=504 loops=1) + Filter: (device_id = 1) + Rows Removed by Filter: 2016 + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=504 loops=1) + -> Seq Scan on compress_hyper_5_16_chunk (actual rows=1 loops=1) + Filter: (device_id = 1) + Rows Removed by Filter: 4 +(16 rows) + +EXECUTE prep; + count +------- + 1368 +(1 row) + +EXECUTE prep; + count +------- + 1368 +(1 row) + +EXECUTE prep; + count +------- + 1368 +(1 row) + +EXECUTE prep; + count +------- + 1368 +(1 row) + +EXECUTE prep; + count +------- + 1368 +(1 row) + +EXECUTE prep; + count +------- + 1368 +(1 row) + +DEALLOCATE prep; +-- +-- test indexes +-- +SET enable_seqscan TO FALSE; +-- IndexScans should work +:PREFIX_VERBOSE +SELECT time, + device_id +FROM :TEST_TABLE +WHERE device_id = 1 +ORDER BY device_id, + time; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Append (actual rows=1368 loops=1) + Sort Key: _hyper_1_1_chunk."time" + -> Sort (actual rows=360 loops=1) + Output: _hyper_1_1_chunk."time", _hyper_1_1_chunk.device_id + Sort Key: _hyper_1_1_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (actual rows=360 loops=1) + Output: _hyper_1_1_chunk."time", _hyper_1_1_chunk.device_id + Bulk Decompression: true + -> Index Scan using compress_hyper_5_15_chunk_c_index_2 on _timescaledb_internal.compress_hyper_5_15_chunk (actual rows=1 loops=1) + Output: compress_hyper_5_15_chunk."time", compress_hyper_5_15_chunk.device_id, compress_hyper_5_15_chunk.device_id_peer, compress_hyper_5_15_chunk.v0, compress_hyper_5_15_chunk.v1, compress_hyper_5_15_chunk.v2, compress_hyper_5_15_chunk.v3, compress_hyper_5_15_chunk._ts_meta_count, compress_hyper_5_15_chunk._ts_meta_sequence_num, compress_hyper_5_15_chunk._ts_meta_min_3, compress_hyper_5_15_chunk._ts_meta_max_3, compress_hyper_5_15_chunk._ts_meta_min_1, compress_hyper_5_15_chunk._ts_meta_max_1, compress_hyper_5_15_chunk._ts_meta_min_2, compress_hyper_5_15_chunk._ts_meta_max_2 + Index Cond: (compress_hyper_5_15_chunk.device_id = 1) + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _timescaledb_internal._hyper_1_2_chunk (actual rows=504 loops=1) + Output: _hyper_1_2_chunk."time", _hyper_1_2_chunk.device_id + Filter: (_hyper_1_2_chunk.device_id = 1) + Rows Removed by Filter: 2016 + -> Sort (actual rows=504 loops=1) + Output: _hyper_1_3_chunk."time", _hyper_1_3_chunk.device_id + Sort Key: _hyper_1_3_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk (actual rows=504 loops=1) + Output: _hyper_1_3_chunk."time", _hyper_1_3_chunk.device_id + Bulk Decompression: true + -> Index Scan using compress_hyper_5_16_chunk_c_index_2 on _timescaledb_internal.compress_hyper_5_16_chunk (actual rows=1 loops=1) + Output: compress_hyper_5_16_chunk."time", compress_hyper_5_16_chunk.device_id, compress_hyper_5_16_chunk.device_id_peer, compress_hyper_5_16_chunk.v0, compress_hyper_5_16_chunk.v1, compress_hyper_5_16_chunk.v2, compress_hyper_5_16_chunk.v3, compress_hyper_5_16_chunk._ts_meta_count, compress_hyper_5_16_chunk._ts_meta_sequence_num, compress_hyper_5_16_chunk._ts_meta_min_3, compress_hyper_5_16_chunk._ts_meta_max_3, compress_hyper_5_16_chunk._ts_meta_min_1, compress_hyper_5_16_chunk._ts_meta_max_1, compress_hyper_5_16_chunk._ts_meta_min_2, compress_hyper_5_16_chunk._ts_meta_max_2 + Index Cond: (compress_hyper_5_16_chunk.device_id = 1) +(26 rows) + +-- globs should not plan IndexOnlyScans +:PREFIX_VERBOSE +SELECT * +FROM :TEST_TABLE +WHERE device_id = 1 +ORDER BY device_id, + time; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Append (actual rows=1368 loops=1) + Sort Key: _hyper_1_1_chunk."time" + -> Sort (actual rows=360 loops=1) + Output: _hyper_1_1_chunk."time", _hyper_1_1_chunk.device_id, _hyper_1_1_chunk.device_id_peer, _hyper_1_1_chunk.v0, _hyper_1_1_chunk.v1, _hyper_1_1_chunk.v2, _hyper_1_1_chunk.v3 + Sort Key: _hyper_1_1_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (actual rows=360 loops=1) + Output: _hyper_1_1_chunk."time", _hyper_1_1_chunk.device_id, _hyper_1_1_chunk.device_id_peer, _hyper_1_1_chunk.v0, _hyper_1_1_chunk.v1, _hyper_1_1_chunk.v2, _hyper_1_1_chunk.v3 + Bulk Decompression: true + -> Index Scan using compress_hyper_5_15_chunk_c_index_2 on _timescaledb_internal.compress_hyper_5_15_chunk (actual rows=1 loops=1) + Output: compress_hyper_5_15_chunk."time", compress_hyper_5_15_chunk.device_id, compress_hyper_5_15_chunk.device_id_peer, compress_hyper_5_15_chunk.v0, compress_hyper_5_15_chunk.v1, compress_hyper_5_15_chunk.v2, compress_hyper_5_15_chunk.v3, compress_hyper_5_15_chunk._ts_meta_count, compress_hyper_5_15_chunk._ts_meta_sequence_num, compress_hyper_5_15_chunk._ts_meta_min_3, compress_hyper_5_15_chunk._ts_meta_max_3, compress_hyper_5_15_chunk._ts_meta_min_1, compress_hyper_5_15_chunk._ts_meta_max_1, compress_hyper_5_15_chunk._ts_meta_min_2, compress_hyper_5_15_chunk._ts_meta_max_2 + Index Cond: (compress_hyper_5_15_chunk.device_id = 1) + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _timescaledb_internal._hyper_1_2_chunk (actual rows=504 loops=1) + Output: _hyper_1_2_chunk."time", _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.device_id_peer, _hyper_1_2_chunk.v0, _hyper_1_2_chunk.v1, _hyper_1_2_chunk.v2, _hyper_1_2_chunk.v3 + Filter: (_hyper_1_2_chunk.device_id = 1) + Rows Removed by Filter: 2016 + -> Sort (actual rows=504 loops=1) + Output: _hyper_1_3_chunk."time", _hyper_1_3_chunk.device_id, _hyper_1_3_chunk.device_id_peer, _hyper_1_3_chunk.v0, _hyper_1_3_chunk.v1, _hyper_1_3_chunk.v2, _hyper_1_3_chunk.v3 + Sort Key: _hyper_1_3_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk (actual rows=504 loops=1) + Output: _hyper_1_3_chunk."time", _hyper_1_3_chunk.device_id, _hyper_1_3_chunk.device_id_peer, _hyper_1_3_chunk.v0, _hyper_1_3_chunk.v1, _hyper_1_3_chunk.v2, _hyper_1_3_chunk.v3 + Bulk Decompression: true + -> Index Scan using compress_hyper_5_16_chunk_c_index_2 on _timescaledb_internal.compress_hyper_5_16_chunk (actual rows=1 loops=1) + Output: compress_hyper_5_16_chunk."time", compress_hyper_5_16_chunk.device_id, compress_hyper_5_16_chunk.device_id_peer, compress_hyper_5_16_chunk.v0, compress_hyper_5_16_chunk.v1, compress_hyper_5_16_chunk.v2, compress_hyper_5_16_chunk.v3, compress_hyper_5_16_chunk._ts_meta_count, compress_hyper_5_16_chunk._ts_meta_sequence_num, compress_hyper_5_16_chunk._ts_meta_min_3, compress_hyper_5_16_chunk._ts_meta_max_3, compress_hyper_5_16_chunk._ts_meta_min_1, compress_hyper_5_16_chunk._ts_meta_max_1, compress_hyper_5_16_chunk._ts_meta_min_2, compress_hyper_5_16_chunk._ts_meta_max_2 + Index Cond: (compress_hyper_5_16_chunk.device_id = 1) +(26 rows) + +-- whole row reference should work +:PREFIX_VERBOSE +SELECT test_table +FROM :TEST_TABLE AS test_table +WHERE device_id = 1 +ORDER BY device_id, + time; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Append (actual rows=1368 loops=1) + Sort Key: test_table_1."time" + -> Sort (actual rows=360 loops=1) + Output: ((test_table_1.*)::metrics), test_table_1.device_id, test_table_1."time" + Sort Key: test_table_1."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk test_table_1 (actual rows=360 loops=1) + Output: test_table_1.*, test_table_1.device_id, test_table_1."time" + Bulk Decompression: true + -> Index Scan using compress_hyper_5_15_chunk_c_index_2 on _timescaledb_internal.compress_hyper_5_15_chunk (actual rows=1 loops=1) + Output: compress_hyper_5_15_chunk."time", compress_hyper_5_15_chunk.device_id, compress_hyper_5_15_chunk.device_id_peer, compress_hyper_5_15_chunk.v0, compress_hyper_5_15_chunk.v1, compress_hyper_5_15_chunk.v2, compress_hyper_5_15_chunk.v3, compress_hyper_5_15_chunk._ts_meta_count, compress_hyper_5_15_chunk._ts_meta_sequence_num, compress_hyper_5_15_chunk._ts_meta_min_3, compress_hyper_5_15_chunk._ts_meta_max_3, compress_hyper_5_15_chunk._ts_meta_min_1, compress_hyper_5_15_chunk._ts_meta_max_1, compress_hyper_5_15_chunk._ts_meta_min_2, compress_hyper_5_15_chunk._ts_meta_max_2 + Index Cond: (compress_hyper_5_15_chunk.device_id = 1) + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _timescaledb_internal._hyper_1_2_chunk test_table_2 (actual rows=504 loops=1) + Output: test_table_2.*, test_table_2.device_id, test_table_2."time" + Filter: (test_table_2.device_id = 1) + Rows Removed by Filter: 2016 + -> Sort (actual rows=504 loops=1) + Output: ((test_table_3.*)::metrics), test_table_3.device_id, test_table_3."time" + Sort Key: test_table_3."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk test_table_3 (actual rows=504 loops=1) + Output: test_table_3.*, test_table_3.device_id, test_table_3."time" + Bulk Decompression: true + -> Index Scan using compress_hyper_5_16_chunk_c_index_2 on _timescaledb_internal.compress_hyper_5_16_chunk (actual rows=1 loops=1) + Output: compress_hyper_5_16_chunk."time", compress_hyper_5_16_chunk.device_id, compress_hyper_5_16_chunk.device_id_peer, compress_hyper_5_16_chunk.v0, compress_hyper_5_16_chunk.v1, compress_hyper_5_16_chunk.v2, compress_hyper_5_16_chunk.v3, compress_hyper_5_16_chunk._ts_meta_count, compress_hyper_5_16_chunk._ts_meta_sequence_num, compress_hyper_5_16_chunk._ts_meta_min_3, compress_hyper_5_16_chunk._ts_meta_max_3, compress_hyper_5_16_chunk._ts_meta_min_1, compress_hyper_5_16_chunk._ts_meta_max_1, compress_hyper_5_16_chunk._ts_meta_min_2, compress_hyper_5_16_chunk._ts_meta_max_2 + Index Cond: (compress_hyper_5_16_chunk.device_id = 1) +(26 rows) + +-- even when we select only a segmentby column, we still need count +:PREFIX_VERBOSE +SELECT device_id +FROM :TEST_TABLE +WHERE device_id = 1 +ORDER BY device_id; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------- + Append (actual rows=1368 loops=1) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (actual rows=360 loops=1) + Output: _hyper_1_1_chunk.device_id + Bulk Decompression: false + -> Index Only Scan using compress_hyper_5_15_chunk_c_index_2 on _timescaledb_internal.compress_hyper_5_15_chunk (actual rows=1 loops=1) + Output: compress_hyper_5_15_chunk.device_id, compress_hyper_5_15_chunk._ts_meta_count + Index Cond: (compress_hyper_5_15_chunk.device_id = 1) + Heap Fetches: 1 + -> Seq Scan on _timescaledb_internal._hyper_1_2_chunk (actual rows=504 loops=1) + Output: _hyper_1_2_chunk.device_id + Filter: (_hyper_1_2_chunk.device_id = 1) + Rows Removed by Filter: 2016 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk (actual rows=504 loops=1) + Output: _hyper_1_3_chunk.device_id + Bulk Decompression: false + -> Index Only Scan using compress_hyper_5_16_chunk_c_index_2 on _timescaledb_internal.compress_hyper_5_16_chunk (actual rows=1 loops=1) + Output: compress_hyper_5_16_chunk.device_id, compress_hyper_5_16_chunk._ts_meta_count + Index Cond: (compress_hyper_5_16_chunk.device_id = 1) + Heap Fetches: 1 +(19 rows) + +:PREFIX_VERBOSE +SELECT count(*) +FROM :TEST_TABLE +WHERE device_id = 1; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------- + Finalize Aggregate (actual rows=1 loops=1) + Output: count(*) + -> Append (actual rows=3 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + Output: PARTIAL count(*) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (actual rows=360 loops=1) + Bulk Decompression: false + -> Index Only Scan using compress_hyper_5_15_chunk_c_index_2 on _timescaledb_internal.compress_hyper_5_15_chunk (actual rows=1 loops=1) + Output: compress_hyper_5_15_chunk.device_id, compress_hyper_5_15_chunk._ts_meta_count + Index Cond: (compress_hyper_5_15_chunk.device_id = 1) + Heap Fetches: 1 + -> Partial Aggregate (actual rows=1 loops=1) + Output: PARTIAL count(*) + -> Seq Scan on _timescaledb_internal._hyper_1_2_chunk (actual rows=504 loops=1) + Filter: (_hyper_1_2_chunk.device_id = 1) + Rows Removed by Filter: 2016 + -> Partial Aggregate (actual rows=1 loops=1) + Output: PARTIAL count(*) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk (actual rows=504 loops=1) + Bulk Decompression: false + -> Index Only Scan using compress_hyper_5_16_chunk_c_index_2 on _timescaledb_internal.compress_hyper_5_16_chunk (actual rows=1 loops=1) + Output: compress_hyper_5_16_chunk.device_id, compress_hyper_5_16_chunk._ts_meta_count + Index Cond: (compress_hyper_5_16_chunk.device_id = 1) + Heap Fetches: 1 +(24 rows) + +-- should be able to order using an index +CREATE INDEX tmp_idx ON :TEST_TABLE (device_id); +:PREFIX_VERBOSE +SELECT device_id +FROM :TEST_TABLE +ORDER BY device_id; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Append (actual rows=6840 loops=1) + Sort Key: _hyper_1_1_chunk.device_id + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (actual rows=1800 loops=1) + Output: _hyper_1_1_chunk.device_id + Bulk Decompression: false + -> Index Only Scan using compress_hyper_5_15_chunk_c_index_2 on _timescaledb_internal.compress_hyper_5_15_chunk (actual rows=5 loops=1) + Output: compress_hyper_5_15_chunk.device_id, compress_hyper_5_15_chunk._ts_meta_count + Heap Fetches: 5 + -> Index Only Scan using _hyper_1_2_chunk_tmp_idx on _timescaledb_internal._hyper_1_2_chunk (actual rows=2520 loops=1) + Output: _hyper_1_2_chunk.device_id + Heap Fetches: 2520 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk (actual rows=2520 loops=1) + Output: _hyper_1_3_chunk.device_id + Bulk Decompression: false + -> Index Only Scan using compress_hyper_5_16_chunk_c_index_2 on _timescaledb_internal.compress_hyper_5_16_chunk (actual rows=5 loops=1) + Output: compress_hyper_5_16_chunk.device_id, compress_hyper_5_16_chunk._ts_meta_count + Heap Fetches: 5 +(17 rows) + +DROP INDEX tmp_idx CASCADE; +--use the peer index +:PREFIX_VERBOSE +SELECT * +FROM :TEST_TABLE +WHERE device_id_peer = 1 +ORDER BY device_id_peer, + time; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort (actual rows=0 loops=1) + Output: _hyper_1_1_chunk."time", _hyper_1_1_chunk.device_id, _hyper_1_1_chunk.device_id_peer, _hyper_1_1_chunk.v0, _hyper_1_1_chunk.v1, _hyper_1_1_chunk.v2, _hyper_1_1_chunk.v3 + Sort Key: _hyper_1_1_chunk."time" + Sort Method: quicksort + -> Append (actual rows=0 loops=1) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (actual rows=0 loops=1) + Output: _hyper_1_1_chunk."time", _hyper_1_1_chunk.device_id, _hyper_1_1_chunk.device_id_peer, _hyper_1_1_chunk.v0, _hyper_1_1_chunk.v1, _hyper_1_1_chunk.v2, _hyper_1_1_chunk.v3 + Bulk Decompression: true + -> Index Scan using compress_hyper_5_15_chunk__compressed_hypertable_5_device_id_pe on _timescaledb_internal.compress_hyper_5_15_chunk (actual rows=0 loops=1) + Output: compress_hyper_5_15_chunk."time", compress_hyper_5_15_chunk.device_id, compress_hyper_5_15_chunk.device_id_peer, compress_hyper_5_15_chunk.v0, compress_hyper_5_15_chunk.v1, compress_hyper_5_15_chunk.v2, compress_hyper_5_15_chunk.v3, compress_hyper_5_15_chunk._ts_meta_count, compress_hyper_5_15_chunk._ts_meta_sequence_num, compress_hyper_5_15_chunk._ts_meta_min_3, compress_hyper_5_15_chunk._ts_meta_max_3, compress_hyper_5_15_chunk._ts_meta_min_1, compress_hyper_5_15_chunk._ts_meta_max_1, compress_hyper_5_15_chunk._ts_meta_min_2, compress_hyper_5_15_chunk._ts_meta_max_2 + Index Cond: (compress_hyper_5_15_chunk.device_id_peer = 1) + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _timescaledb_internal._hyper_1_2_chunk (actual rows=0 loops=1) + Output: _hyper_1_2_chunk."time", _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.device_id_peer, _hyper_1_2_chunk.v0, _hyper_1_2_chunk.v1, _hyper_1_2_chunk.v2, _hyper_1_2_chunk.v3 + Filter: (_hyper_1_2_chunk.device_id_peer = 1) + Rows Removed by Filter: 2520 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk (actual rows=0 loops=1) + Output: _hyper_1_3_chunk."time", _hyper_1_3_chunk.device_id, _hyper_1_3_chunk.device_id_peer, _hyper_1_3_chunk.v0, _hyper_1_3_chunk.v1, _hyper_1_3_chunk.v2, _hyper_1_3_chunk.v3 + Bulk Decompression: true + -> Index Scan using compress_hyper_5_16_chunk__compressed_hypertable_5_device_id_pe on _timescaledb_internal.compress_hyper_5_16_chunk (actual rows=0 loops=1) + Output: compress_hyper_5_16_chunk."time", compress_hyper_5_16_chunk.device_id, compress_hyper_5_16_chunk.device_id_peer, compress_hyper_5_16_chunk.v0, compress_hyper_5_16_chunk.v1, compress_hyper_5_16_chunk.v2, compress_hyper_5_16_chunk.v3, compress_hyper_5_16_chunk._ts_meta_count, compress_hyper_5_16_chunk._ts_meta_sequence_num, compress_hyper_5_16_chunk._ts_meta_min_3, compress_hyper_5_16_chunk._ts_meta_max_3, compress_hyper_5_16_chunk._ts_meta_min_1, compress_hyper_5_16_chunk._ts_meta_max_1, compress_hyper_5_16_chunk._ts_meta_min_2, compress_hyper_5_16_chunk._ts_meta_max_2 + Index Cond: (compress_hyper_5_16_chunk.device_id_peer = 1) +(21 rows) + +:PREFIX_VERBOSE +SELECT device_id_peer +FROM :TEST_TABLE +WHERE device_id_peer = 1 +ORDER BY device_id_peer; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Append (actual rows=0 loops=1) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (actual rows=0 loops=1) + Output: _hyper_1_1_chunk.device_id_peer + Bulk Decompression: false + -> Index Scan using compress_hyper_5_15_chunk__compressed_hypertable_5_device_id_pe on _timescaledb_internal.compress_hyper_5_15_chunk (actual rows=0 loops=1) + Output: compress_hyper_5_15_chunk."time", compress_hyper_5_15_chunk.device_id, compress_hyper_5_15_chunk.device_id_peer, compress_hyper_5_15_chunk.v0, compress_hyper_5_15_chunk.v1, compress_hyper_5_15_chunk.v2, compress_hyper_5_15_chunk.v3, compress_hyper_5_15_chunk._ts_meta_count, compress_hyper_5_15_chunk._ts_meta_sequence_num, compress_hyper_5_15_chunk._ts_meta_min_3, compress_hyper_5_15_chunk._ts_meta_max_3, compress_hyper_5_15_chunk._ts_meta_min_1, compress_hyper_5_15_chunk._ts_meta_max_1, compress_hyper_5_15_chunk._ts_meta_min_2, compress_hyper_5_15_chunk._ts_meta_max_2 + Index Cond: (compress_hyper_5_15_chunk.device_id_peer = 1) + -> Seq Scan on _timescaledb_internal._hyper_1_2_chunk (actual rows=0 loops=1) + Output: _hyper_1_2_chunk.device_id_peer + Filter: (_hyper_1_2_chunk.device_id_peer = 1) + Rows Removed by Filter: 2520 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk (actual rows=0 loops=1) + Output: _hyper_1_3_chunk.device_id_peer + Bulk Decompression: false + -> Index Scan using compress_hyper_5_16_chunk__compressed_hypertable_5_device_id_pe on _timescaledb_internal.compress_hyper_5_16_chunk (actual rows=0 loops=1) + Output: compress_hyper_5_16_chunk."time", compress_hyper_5_16_chunk.device_id, compress_hyper_5_16_chunk.device_id_peer, compress_hyper_5_16_chunk.v0, compress_hyper_5_16_chunk.v1, compress_hyper_5_16_chunk.v2, compress_hyper_5_16_chunk.v3, compress_hyper_5_16_chunk._ts_meta_count, compress_hyper_5_16_chunk._ts_meta_sequence_num, compress_hyper_5_16_chunk._ts_meta_min_3, compress_hyper_5_16_chunk._ts_meta_max_3, compress_hyper_5_16_chunk._ts_meta_min_1, compress_hyper_5_16_chunk._ts_meta_max_1, compress_hyper_5_16_chunk._ts_meta_min_2, compress_hyper_5_16_chunk._ts_meta_max_2 + Index Cond: (compress_hyper_5_16_chunk.device_id_peer = 1) +(17 rows) + +--ensure that we can get a nested loop +SET enable_seqscan TO TRUE; +SET enable_hashjoin TO FALSE; +:PREFIX_VERBOSE +SELECT device_id_peer +FROM :TEST_TABLE +WHERE device_id_peer IN ( + VALUES (1)); + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Append (actual rows=0 loops=1) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (actual rows=0 loops=1) + Output: _hyper_1_1_chunk.device_id_peer + Bulk Decompression: false + -> Seq Scan on _timescaledb_internal.compress_hyper_5_15_chunk (actual rows=0 loops=1) + Output: compress_hyper_5_15_chunk."time", compress_hyper_5_15_chunk.device_id, compress_hyper_5_15_chunk.device_id_peer, compress_hyper_5_15_chunk.v0, compress_hyper_5_15_chunk.v1, compress_hyper_5_15_chunk.v2, compress_hyper_5_15_chunk.v3, compress_hyper_5_15_chunk._ts_meta_count, compress_hyper_5_15_chunk._ts_meta_sequence_num, compress_hyper_5_15_chunk._ts_meta_min_3, compress_hyper_5_15_chunk._ts_meta_max_3, compress_hyper_5_15_chunk._ts_meta_min_1, compress_hyper_5_15_chunk._ts_meta_max_1, compress_hyper_5_15_chunk._ts_meta_min_2, compress_hyper_5_15_chunk._ts_meta_max_2 + Filter: (compress_hyper_5_15_chunk.device_id_peer = 1) + Rows Removed by Filter: 5 + -> Seq Scan on _timescaledb_internal._hyper_1_2_chunk (actual rows=0 loops=1) + Output: _hyper_1_2_chunk.device_id_peer + Filter: (_hyper_1_2_chunk.device_id_peer = 1) + Rows Removed by Filter: 2520 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk (actual rows=0 loops=1) + Output: _hyper_1_3_chunk.device_id_peer + Bulk Decompression: false + -> Seq Scan on _timescaledb_internal.compress_hyper_5_16_chunk (actual rows=0 loops=1) + Output: compress_hyper_5_16_chunk."time", compress_hyper_5_16_chunk.device_id, compress_hyper_5_16_chunk.device_id_peer, compress_hyper_5_16_chunk.v0, compress_hyper_5_16_chunk.v1, compress_hyper_5_16_chunk.v2, compress_hyper_5_16_chunk.v3, compress_hyper_5_16_chunk._ts_meta_count, compress_hyper_5_16_chunk._ts_meta_sequence_num, compress_hyper_5_16_chunk._ts_meta_min_3, compress_hyper_5_16_chunk._ts_meta_max_3, compress_hyper_5_16_chunk._ts_meta_min_1, compress_hyper_5_16_chunk._ts_meta_max_1, compress_hyper_5_16_chunk._ts_meta_min_2, compress_hyper_5_16_chunk._ts_meta_max_2 + Filter: (compress_hyper_5_16_chunk.device_id_peer = 1) + Rows Removed by Filter: 5 +(19 rows) + +--with multiple values can get a nested loop. +:PREFIX_VERBOSE +SELECT device_id_peer +FROM :TEST_TABLE +WHERE device_id_peer IN ( + VALUES (1), + (2)); + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Nested Loop (actual rows=0 loops=1) + Output: _hyper_1_1_chunk.device_id_peer + -> Unique (actual rows=2 loops=1) + Output: "*VALUES*".column1 + -> Sort (actual rows=2 loops=1) + Output: "*VALUES*".column1 + Sort Key: "*VALUES*".column1 + Sort Method: quicksort + -> Values Scan on "*VALUES*" (actual rows=2 loops=1) + Output: "*VALUES*".column1 + -> Append (actual rows=0 loops=2) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (actual rows=0 loops=2) + Output: _hyper_1_1_chunk.device_id_peer + Bulk Decompression: false + -> Index Scan using compress_hyper_5_15_chunk__compressed_hypertable_5_device_id_pe on _timescaledb_internal.compress_hyper_5_15_chunk (actual rows=0 loops=2) + Output: compress_hyper_5_15_chunk."time", compress_hyper_5_15_chunk.device_id, compress_hyper_5_15_chunk.device_id_peer, compress_hyper_5_15_chunk.v0, compress_hyper_5_15_chunk.v1, compress_hyper_5_15_chunk.v2, compress_hyper_5_15_chunk.v3, compress_hyper_5_15_chunk._ts_meta_count, compress_hyper_5_15_chunk._ts_meta_sequence_num, compress_hyper_5_15_chunk._ts_meta_min_3, compress_hyper_5_15_chunk._ts_meta_max_3, compress_hyper_5_15_chunk._ts_meta_min_1, compress_hyper_5_15_chunk._ts_meta_max_1, compress_hyper_5_15_chunk._ts_meta_min_2, compress_hyper_5_15_chunk._ts_meta_max_2 + Index Cond: (compress_hyper_5_15_chunk.device_id_peer = "*VALUES*".column1) + -> Seq Scan on _timescaledb_internal._hyper_1_2_chunk (actual rows=0 loops=2) + Output: _hyper_1_2_chunk.device_id_peer + Filter: ("*VALUES*".column1 = _hyper_1_2_chunk.device_id_peer) + Rows Removed by Filter: 2520 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk (actual rows=0 loops=2) + Output: _hyper_1_3_chunk.device_id_peer + Bulk Decompression: false + -> Index Scan using compress_hyper_5_16_chunk__compressed_hypertable_5_device_id_pe on _timescaledb_internal.compress_hyper_5_16_chunk (actual rows=0 loops=2) + Output: compress_hyper_5_16_chunk."time", compress_hyper_5_16_chunk.device_id, compress_hyper_5_16_chunk.device_id_peer, compress_hyper_5_16_chunk.v0, compress_hyper_5_16_chunk.v1, compress_hyper_5_16_chunk.v2, compress_hyper_5_16_chunk.v3, compress_hyper_5_16_chunk._ts_meta_count, compress_hyper_5_16_chunk._ts_meta_sequence_num, compress_hyper_5_16_chunk._ts_meta_min_3, compress_hyper_5_16_chunk._ts_meta_max_3, compress_hyper_5_16_chunk._ts_meta_min_1, compress_hyper_5_16_chunk._ts_meta_max_1, compress_hyper_5_16_chunk._ts_meta_min_2, compress_hyper_5_16_chunk._ts_meta_max_2 + Index Cond: (compress_hyper_5_16_chunk.device_id_peer = "*VALUES*".column1) +(27 rows) + +RESET enable_hashjoin; +:PREFIX_VERBOSE +SELECT device_id_peer +FROM :TEST_TABLE +WHERE device_id IN ( + VALUES (1)); + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Append (actual rows=1368 loops=1) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (actual rows=360 loops=1) + Output: _hyper_1_1_chunk.device_id_peer + Bulk Decompression: false + -> Seq Scan on _timescaledb_internal.compress_hyper_5_15_chunk (actual rows=1 loops=1) + Output: compress_hyper_5_15_chunk."time", compress_hyper_5_15_chunk.device_id, compress_hyper_5_15_chunk.device_id_peer, compress_hyper_5_15_chunk.v0, compress_hyper_5_15_chunk.v1, compress_hyper_5_15_chunk.v2, compress_hyper_5_15_chunk.v3, compress_hyper_5_15_chunk._ts_meta_count, compress_hyper_5_15_chunk._ts_meta_sequence_num, compress_hyper_5_15_chunk._ts_meta_min_3, compress_hyper_5_15_chunk._ts_meta_max_3, compress_hyper_5_15_chunk._ts_meta_min_1, compress_hyper_5_15_chunk._ts_meta_max_1, compress_hyper_5_15_chunk._ts_meta_min_2, compress_hyper_5_15_chunk._ts_meta_max_2 + Filter: (compress_hyper_5_15_chunk.device_id = 1) + Rows Removed by Filter: 4 + -> Seq Scan on _timescaledb_internal._hyper_1_2_chunk (actual rows=504 loops=1) + Output: _hyper_1_2_chunk.device_id_peer + Filter: (_hyper_1_2_chunk.device_id = 1) + Rows Removed by Filter: 2016 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk (actual rows=504 loops=1) + Output: _hyper_1_3_chunk.device_id_peer + Bulk Decompression: false + -> Seq Scan on _timescaledb_internal.compress_hyper_5_16_chunk (actual rows=1 loops=1) + Output: compress_hyper_5_16_chunk."time", compress_hyper_5_16_chunk.device_id, compress_hyper_5_16_chunk.device_id_peer, compress_hyper_5_16_chunk.v0, compress_hyper_5_16_chunk.v1, compress_hyper_5_16_chunk.v2, compress_hyper_5_16_chunk.v3, compress_hyper_5_16_chunk._ts_meta_count, compress_hyper_5_16_chunk._ts_meta_sequence_num, compress_hyper_5_16_chunk._ts_meta_min_3, compress_hyper_5_16_chunk._ts_meta_max_3, compress_hyper_5_16_chunk._ts_meta_min_1, compress_hyper_5_16_chunk._ts_meta_max_1, compress_hyper_5_16_chunk._ts_meta_min_2, compress_hyper_5_16_chunk._ts_meta_max_2 + Filter: (compress_hyper_5_16_chunk.device_id = 1) + Rows Removed by Filter: 4 +(19 rows) + +--with multiple values can get a semi-join or nested loop depending on seq_page_cost. +:PREFIX_VERBOSE +SELECT device_id_peer +FROM :TEST_TABLE +WHERE device_id IN ( + VALUES (1), + (2)); + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Nested Loop (actual rows=2736 loops=1) + Output: _hyper_1_1_chunk.device_id_peer + -> Unique (actual rows=2 loops=1) + Output: "*VALUES*".column1 + -> Sort (actual rows=2 loops=1) + Output: "*VALUES*".column1 + Sort Key: "*VALUES*".column1 + Sort Method: quicksort + -> Values Scan on "*VALUES*" (actual rows=2 loops=1) + Output: "*VALUES*".column1 + -> Append (actual rows=1368 loops=2) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (actual rows=360 loops=2) + Output: _hyper_1_1_chunk.device_id_peer, _hyper_1_1_chunk.device_id + Bulk Decompression: false + -> Index Scan using compress_hyper_5_15_chunk_c_index_2 on _timescaledb_internal.compress_hyper_5_15_chunk (actual rows=1 loops=2) + Output: compress_hyper_5_15_chunk."time", compress_hyper_5_15_chunk.device_id, compress_hyper_5_15_chunk.device_id_peer, compress_hyper_5_15_chunk.v0, compress_hyper_5_15_chunk.v1, compress_hyper_5_15_chunk.v2, compress_hyper_5_15_chunk.v3, compress_hyper_5_15_chunk._ts_meta_count, compress_hyper_5_15_chunk._ts_meta_sequence_num, compress_hyper_5_15_chunk._ts_meta_min_3, compress_hyper_5_15_chunk._ts_meta_max_3, compress_hyper_5_15_chunk._ts_meta_min_1, compress_hyper_5_15_chunk._ts_meta_max_1, compress_hyper_5_15_chunk._ts_meta_min_2, compress_hyper_5_15_chunk._ts_meta_max_2 + Index Cond: (compress_hyper_5_15_chunk.device_id = "*VALUES*".column1) + -> Seq Scan on _timescaledb_internal._hyper_1_2_chunk (actual rows=504 loops=2) + Output: _hyper_1_2_chunk.device_id_peer, _hyper_1_2_chunk.device_id + Filter: ("*VALUES*".column1 = _hyper_1_2_chunk.device_id) + Rows Removed by Filter: 2016 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk (actual rows=504 loops=2) + Output: _hyper_1_3_chunk.device_id_peer, _hyper_1_3_chunk.device_id + Bulk Decompression: false + -> Index Scan using compress_hyper_5_16_chunk_c_index_2 on _timescaledb_internal.compress_hyper_5_16_chunk (actual rows=1 loops=2) + Output: compress_hyper_5_16_chunk."time", compress_hyper_5_16_chunk.device_id, compress_hyper_5_16_chunk.device_id_peer, compress_hyper_5_16_chunk.v0, compress_hyper_5_16_chunk.v1, compress_hyper_5_16_chunk.v2, compress_hyper_5_16_chunk.v3, compress_hyper_5_16_chunk._ts_meta_count, compress_hyper_5_16_chunk._ts_meta_sequence_num, compress_hyper_5_16_chunk._ts_meta_min_3, compress_hyper_5_16_chunk._ts_meta_max_3, compress_hyper_5_16_chunk._ts_meta_min_1, compress_hyper_5_16_chunk._ts_meta_max_1, compress_hyper_5_16_chunk._ts_meta_min_2, compress_hyper_5_16_chunk._ts_meta_max_2 + Index Cond: (compress_hyper_5_16_chunk.device_id = "*VALUES*".column1) +(27 rows) + +SET seq_page_cost = 100; +-- loop/row counts of this query is different on windows so we run it without analyze +:PREFIX_NO_ANALYZE +SELECT device_id_peer +FROM :TEST_TABLE +WHERE device_id IN ( + VALUES (1), + (2)); + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Hash Semi Join + Output: _hyper_1_1_chunk.device_id_peer + Hash Cond: (_hyper_1_1_chunk.device_id = "*VALUES*".column1) + -> Append + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk + Output: _hyper_1_1_chunk.device_id_peer, _hyper_1_1_chunk.device_id + -> Index Scan using compress_hyper_5_15_chunk_c_index_2 on _timescaledb_internal.compress_hyper_5_15_chunk + Output: compress_hyper_5_15_chunk."time", compress_hyper_5_15_chunk.device_id, compress_hyper_5_15_chunk.device_id_peer, compress_hyper_5_15_chunk.v0, compress_hyper_5_15_chunk.v1, compress_hyper_5_15_chunk.v2, compress_hyper_5_15_chunk.v3, compress_hyper_5_15_chunk._ts_meta_count, compress_hyper_5_15_chunk._ts_meta_sequence_num, compress_hyper_5_15_chunk._ts_meta_min_3, compress_hyper_5_15_chunk._ts_meta_max_3, compress_hyper_5_15_chunk._ts_meta_min_1, compress_hyper_5_15_chunk._ts_meta_max_1, compress_hyper_5_15_chunk._ts_meta_min_2, compress_hyper_5_15_chunk._ts_meta_max_2 + -> Seq Scan on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.device_id_peer, _hyper_1_2_chunk.device_id + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk + Output: _hyper_1_3_chunk.device_id_peer, _hyper_1_3_chunk.device_id + -> Index Scan using compress_hyper_5_16_chunk_c_index_2 on _timescaledb_internal.compress_hyper_5_16_chunk + Output: compress_hyper_5_16_chunk."time", compress_hyper_5_16_chunk.device_id, compress_hyper_5_16_chunk.device_id_peer, compress_hyper_5_16_chunk.v0, compress_hyper_5_16_chunk.v1, compress_hyper_5_16_chunk.v2, compress_hyper_5_16_chunk.v3, compress_hyper_5_16_chunk._ts_meta_count, compress_hyper_5_16_chunk._ts_meta_sequence_num, compress_hyper_5_16_chunk._ts_meta_min_3, compress_hyper_5_16_chunk._ts_meta_max_3, compress_hyper_5_16_chunk._ts_meta_min_1, compress_hyper_5_16_chunk._ts_meta_max_1, compress_hyper_5_16_chunk._ts_meta_min_2, compress_hyper_5_16_chunk._ts_meta_max_2 + -> Hash + Output: "*VALUES*".column1 + -> Values Scan on "*VALUES*" + Output: "*VALUES*".column1 +(18 rows) + +RESET seq_page_cost; +:PREFIX_VERBOSE +SELECT device_id_peer +FROM :TEST_TABLE +WHERE device_id IN ( + VALUES (1)); + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Append (actual rows=1368 loops=1) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (actual rows=360 loops=1) + Output: _hyper_1_1_chunk.device_id_peer + Bulk Decompression: false + -> Seq Scan on _timescaledb_internal.compress_hyper_5_15_chunk (actual rows=1 loops=1) + Output: compress_hyper_5_15_chunk."time", compress_hyper_5_15_chunk.device_id, compress_hyper_5_15_chunk.device_id_peer, compress_hyper_5_15_chunk.v0, compress_hyper_5_15_chunk.v1, compress_hyper_5_15_chunk.v2, compress_hyper_5_15_chunk.v3, compress_hyper_5_15_chunk._ts_meta_count, compress_hyper_5_15_chunk._ts_meta_sequence_num, compress_hyper_5_15_chunk._ts_meta_min_3, compress_hyper_5_15_chunk._ts_meta_max_3, compress_hyper_5_15_chunk._ts_meta_min_1, compress_hyper_5_15_chunk._ts_meta_max_1, compress_hyper_5_15_chunk._ts_meta_min_2, compress_hyper_5_15_chunk._ts_meta_max_2 + Filter: (compress_hyper_5_15_chunk.device_id = 1) + Rows Removed by Filter: 4 + -> Seq Scan on _timescaledb_internal._hyper_1_2_chunk (actual rows=504 loops=1) + Output: _hyper_1_2_chunk.device_id_peer + Filter: (_hyper_1_2_chunk.device_id = 1) + Rows Removed by Filter: 2016 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk (actual rows=504 loops=1) + Output: _hyper_1_3_chunk.device_id_peer + Bulk Decompression: false + -> Seq Scan on _timescaledb_internal.compress_hyper_5_16_chunk (actual rows=1 loops=1) + Output: compress_hyper_5_16_chunk."time", compress_hyper_5_16_chunk.device_id, compress_hyper_5_16_chunk.device_id_peer, compress_hyper_5_16_chunk.v0, compress_hyper_5_16_chunk.v1, compress_hyper_5_16_chunk.v2, compress_hyper_5_16_chunk.v3, compress_hyper_5_16_chunk._ts_meta_count, compress_hyper_5_16_chunk._ts_meta_sequence_num, compress_hyper_5_16_chunk._ts_meta_min_3, compress_hyper_5_16_chunk._ts_meta_max_3, compress_hyper_5_16_chunk._ts_meta_min_1, compress_hyper_5_16_chunk._ts_meta_max_1, compress_hyper_5_16_chunk._ts_meta_min_2, compress_hyper_5_16_chunk._ts_meta_max_2 + Filter: (compress_hyper_5_16_chunk.device_id = 1) + Rows Removed by Filter: 4 +(19 rows) + +:PREFIX_VERBOSE +SELECT device_id_peer +FROM :TEST_TABLE +WHERE device_id IN ( + VALUES (1), + (2)); + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Nested Loop (actual rows=2736 loops=1) + Output: _hyper_1_1_chunk.device_id_peer + -> Unique (actual rows=2 loops=1) + Output: "*VALUES*".column1 + -> Sort (actual rows=2 loops=1) + Output: "*VALUES*".column1 + Sort Key: "*VALUES*".column1 + Sort Method: quicksort + -> Values Scan on "*VALUES*" (actual rows=2 loops=1) + Output: "*VALUES*".column1 + -> Append (actual rows=1368 loops=2) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (actual rows=360 loops=2) + Output: _hyper_1_1_chunk.device_id_peer, _hyper_1_1_chunk.device_id + Bulk Decompression: false + -> Index Scan using compress_hyper_5_15_chunk_c_index_2 on _timescaledb_internal.compress_hyper_5_15_chunk (actual rows=1 loops=2) + Output: compress_hyper_5_15_chunk."time", compress_hyper_5_15_chunk.device_id, compress_hyper_5_15_chunk.device_id_peer, compress_hyper_5_15_chunk.v0, compress_hyper_5_15_chunk.v1, compress_hyper_5_15_chunk.v2, compress_hyper_5_15_chunk.v3, compress_hyper_5_15_chunk._ts_meta_count, compress_hyper_5_15_chunk._ts_meta_sequence_num, compress_hyper_5_15_chunk._ts_meta_min_3, compress_hyper_5_15_chunk._ts_meta_max_3, compress_hyper_5_15_chunk._ts_meta_min_1, compress_hyper_5_15_chunk._ts_meta_max_1, compress_hyper_5_15_chunk._ts_meta_min_2, compress_hyper_5_15_chunk._ts_meta_max_2 + Index Cond: (compress_hyper_5_15_chunk.device_id = "*VALUES*".column1) + -> Seq Scan on _timescaledb_internal._hyper_1_2_chunk (actual rows=504 loops=2) + Output: _hyper_1_2_chunk.device_id_peer, _hyper_1_2_chunk.device_id + Filter: ("*VALUES*".column1 = _hyper_1_2_chunk.device_id) + Rows Removed by Filter: 2016 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk (actual rows=504 loops=2) + Output: _hyper_1_3_chunk.device_id_peer, _hyper_1_3_chunk.device_id + Bulk Decompression: false + -> Index Scan using compress_hyper_5_16_chunk_c_index_2 on _timescaledb_internal.compress_hyper_5_16_chunk (actual rows=1 loops=2) + Output: compress_hyper_5_16_chunk."time", compress_hyper_5_16_chunk.device_id, compress_hyper_5_16_chunk.device_id_peer, compress_hyper_5_16_chunk.v0, compress_hyper_5_16_chunk.v1, compress_hyper_5_16_chunk.v2, compress_hyper_5_16_chunk.v3, compress_hyper_5_16_chunk._ts_meta_count, compress_hyper_5_16_chunk._ts_meta_sequence_num, compress_hyper_5_16_chunk._ts_meta_min_3, compress_hyper_5_16_chunk._ts_meta_max_3, compress_hyper_5_16_chunk._ts_meta_min_1, compress_hyper_5_16_chunk._ts_meta_max_1, compress_hyper_5_16_chunk._ts_meta_min_2, compress_hyper_5_16_chunk._ts_meta_max_2 + Index Cond: (compress_hyper_5_16_chunk.device_id = "*VALUES*".column1) +(27 rows) + +-- test view +CREATE OR REPLACE VIEW compressed_view AS +SELECT time, + device_id, + v1, + v2 +FROM :TEST_TABLE; +:PREFIX +SELECT * +FROM compressed_view +WHERE device_id = 1 +ORDER BY time DESC +LIMIT 10; + QUERY PLAN +----------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Custom Scan (ChunkAppend) on metrics (actual rows=10 loops=1) + Order: metrics."time" DESC + -> Sort (actual rows=10 loops=1) + Sort Key: _hyper_1_3_chunk."time" DESC + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=504 loops=1) + -> Seq Scan on compress_hyper_5_16_chunk (actual rows=1 loops=1) + Filter: (device_id = 1) + Rows Removed by Filter: 4 + -> Index Scan using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk (never executed) + Filter: (device_id = 1) + -> Sort (never executed) + Sort Key: _hyper_1_1_chunk."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (never executed) + -> Seq Scan on compress_hyper_5_15_chunk (never executed) + Filter: (device_id = 1) +(17 rows) + +DROP VIEW compressed_view; +-- test INNER JOIN +:PREFIX +SELECT * +FROM :TEST_TABLE m1 + INNER JOIN :TEST_TABLE m2 ON m1.time = m2.time + AND m1.device_id = m2.device_id + ORDER BY m1.time, + m1.device_id + LIMIT 10; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Incremental Sort (actual rows=10 loops=1) + Sort Key: m1."time", m1.device_id + Presorted Key: m1."time" + Full-sort Groups: 1 Sort Method: quicksort + -> Merge Join (actual rows=11 loops=1) + Merge Cond: (m1."time" = m2."time") + Join Filter: (m1.device_id = m2.device_id) + Rows Removed by Join Filter: 40 + -> Custom Scan (ChunkAppend) on metrics m1 (actual rows=11 loops=1) + Order: m1."time" + -> Sort (actual rows=11 loops=1) + Sort Key: m1_1."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk m1_1 (actual rows=1800 loops=1) + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=5 loops=1) + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk m1_2 (never executed) + -> Sort (never executed) + Sort Key: m1_3."time" + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk m1_3 (never executed) + -> Seq Scan on compress_hyper_5_16_chunk (never executed) + -> Materialize (actual rows=51 loops=1) + -> Custom Scan (ChunkAppend) on metrics m2 (actual rows=11 loops=1) + Order: m2."time" + -> Sort (actual rows=11 loops=1) + Sort Key: m2_1."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk m2_1 (actual rows=1800 loops=1) + -> Seq Scan on compress_hyper_5_15_chunk compress_hyper_5_15_chunk_1 (actual rows=5 loops=1) + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk m2_2 (never executed) + -> Sort (never executed) + Sort Key: m2_3."time" + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk m2_3 (never executed) + -> Seq Scan on compress_hyper_5_16_chunk compress_hyper_5_16_chunk_1 (never executed) +(34 rows) + +:PREFIX +SELECT * +FROM :TEST_TABLE m1 + INNER JOIN :TEST_TABLE m2 ON m1.time = m2.time + INNER JOIN :TEST_TABLE m3 ON m2.time = m3.time + AND m1.device_id = m2.device_id + AND m3.device_id = 3 + ORDER BY m1.time, + m1.device_id + LIMIT 10; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Incremental Sort (actual rows=10 loops=1) + Sort Key: m1."time", m1.device_id + Presorted Key: m1."time" + Full-sort Groups: 1 Sort Method: quicksort + -> Merge Join (actual rows=11 loops=1) + Merge Cond: (m1."time" = m3_1."time") + -> Merge Join (actual rows=11 loops=1) + Merge Cond: (m1."time" = m2."time") + Join Filter: (m1.device_id = m2.device_id) + Rows Removed by Join Filter: 40 + -> Custom Scan (ChunkAppend) on metrics m1 (actual rows=11 loops=1) + Order: m1."time" + -> Sort (actual rows=11 loops=1) + Sort Key: m1_1."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk m1_1 (actual rows=1800 loops=1) + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=5 loops=1) + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk m1_2 (never executed) + -> Sort (never executed) + Sort Key: m1_3."time" + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk m1_3 (never executed) + -> Seq Scan on compress_hyper_5_16_chunk (never executed) + -> Materialize (actual rows=51 loops=1) + -> Custom Scan (ChunkAppend) on metrics m2 (actual rows=11 loops=1) + Order: m2."time" + -> Sort (actual rows=11 loops=1) + Sort Key: m2_1."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk m2_1 (actual rows=1800 loops=1) + -> Seq Scan on compress_hyper_5_15_chunk compress_hyper_5_15_chunk_1 (actual rows=5 loops=1) + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk m2_2 (never executed) + -> Sort (never executed) + Sort Key: m2_3."time" + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk m2_3 (never executed) + -> Seq Scan on compress_hyper_5_16_chunk compress_hyper_5_16_chunk_1 (never executed) + -> Materialize (actual rows=11 loops=1) + -> Merge Append (actual rows=3 loops=1) + Sort Key: m3_1."time" + -> Sort (actual rows=3 loops=1) + Sort Key: m3_1."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk m3_1 (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_5_15_chunk compress_hyper_5_15_chunk_2 (actual rows=1 loops=1) + Filter: (device_id = 3) + Rows Removed by Filter: 4 + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk m3_2 (actual rows=1 loops=1) + Filter: (device_id = 3) + Rows Removed by Filter: 2 + -> Sort (actual rows=1 loops=1) + Sort Key: m3_3."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk m3_3 (actual rows=504 loops=1) + -> Seq Scan on compress_hyper_5_16_chunk compress_hyper_5_16_chunk_2 (actual rows=1 loops=1) + Filter: (device_id = 3) + Rows Removed by Filter: 4 +(56 rows) + +:PREFIX +SELECT * +FROM :TEST_TABLE m1 + INNER JOIN :TEST_TABLE m2 ON m1.time = m2.time + AND m1.device_id = 1 + AND m2.device_id = 2 + ORDER BY m1.time, + m1.device_id, + m2.time, + m2.device_id + LIMIT 100; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=100 loops=1) + -> Merge Join (actual rows=100 loops=1) + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics m1 (actual rows=100 loops=1) + Order: m1."time" + -> Sort (actual rows=100 loops=1) + Sort Key: m1_1."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk m1_1 (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=1 loops=1) + Filter: (device_id = 1) + Rows Removed by Filter: 4 + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk m1_2 (never executed) + Filter: (device_id = 1) + -> Sort (never executed) + Sort Key: m1_3."time" + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk m1_3 (never executed) + -> Seq Scan on compress_hyper_5_16_chunk (never executed) + Filter: (device_id = 1) + -> Materialize (actual rows=100 loops=1) + -> Custom Scan (ChunkAppend) on metrics m2 (actual rows=100 loops=1) + Order: m2."time" + -> Sort (actual rows=100 loops=1) + Sort Key: m2_1."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk m2_1 (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_5_15_chunk compress_hyper_5_15_chunk_1 (actual rows=1 loops=1) + Filter: (device_id = 2) + Rows Removed by Filter: 4 + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk m2_2 (never executed) + Filter: (device_id = 2) + -> Sort (never executed) + Sort Key: m2_3."time" + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk m2_3 (never executed) + -> Seq Scan on compress_hyper_5_16_chunk compress_hyper_5_16_chunk_1 (never executed) + Filter: (device_id = 2) +(36 rows) + +:PREFIX +SELECT * +FROM metrics m1 + INNER JOIN metrics_space m2 ON m1.time = m2.time + AND m1.device_id = 1 + AND m2.device_id = 2 + ORDER BY m1.time, + m1.device_id, + m2.time, + m2.device_id + LIMIT 100; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=100 loops=1) + -> Merge Join (actual rows=100 loops=1) + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics m1 (actual rows=100 loops=1) + Order: m1."time" + -> Sort (actual rows=100 loops=1) + Sort Key: m1_1."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk m1_1 (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=1 loops=1) + Filter: (device_id = 1) + Rows Removed by Filter: 4 + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk m1_2 (never executed) + Filter: (device_id = 1) + -> Sort (never executed) + Sort Key: m1_3."time" + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk m1_3 (never executed) + -> Seq Scan on compress_hyper_5_16_chunk (never executed) + Filter: (device_id = 1) + -> Materialize (actual rows=100 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space m2 (actual rows=100 loops=1) + Order: m2."time" + -> Sort (actual rows=100 loops=1) + Sort Key: m2_1."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk m2_1 (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_18_chunk (actual rows=1 loops=1) + Filter: (device_id = 2) + Rows Removed by Filter: 2 + -> Index Scan Backward using _hyper_2_8_chunk_metrics_space_device_id_time_idx on _hyper_2_8_chunk m2_2 (never executed) + Index Cond: (device_id = 2) + -> Sort (never executed) + Sort Key: m2_3."time" + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk m2_3 (never executed) + -> Seq Scan on compress_hyper_6_21_chunk (never executed) + Filter: (device_id = 2) +(36 rows) + +-- test OUTER JOIN +:PREFIX +SELECT * +FROM :TEST_TABLE m1 + LEFT OUTER JOIN :TEST_TABLE m2 ON m1.time = m2.time + AND m1.device_id = m2.device_id +ORDER BY m1.time, + m1.device_id +LIMIT 10; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Incremental Sort (actual rows=10 loops=1) + Sort Key: m1."time", m1.device_id + Presorted Key: m1."time" + Full-sort Groups: 1 Sort Method: quicksort + -> Merge Left Join (actual rows=11 loops=1) + Merge Cond: (m1."time" = m2."time") + Join Filter: (m1.device_id = m2.device_id) + Rows Removed by Join Filter: 40 + -> Custom Scan (ChunkAppend) on metrics m1 (actual rows=11 loops=1) + Order: m1."time" + -> Sort (actual rows=11 loops=1) + Sort Key: m1_1."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk m1_1 (actual rows=1800 loops=1) + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=5 loops=1) + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk m1_2 (never executed) + -> Sort (never executed) + Sort Key: m1_3."time" + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk m1_3 (never executed) + -> Seq Scan on compress_hyper_5_16_chunk (never executed) + -> Materialize (actual rows=51 loops=1) + -> Custom Scan (ChunkAppend) on metrics m2 (actual rows=11 loops=1) + Order: m2."time" + -> Sort (actual rows=11 loops=1) + Sort Key: m2_1."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk m2_1 (actual rows=1800 loops=1) + -> Seq Scan on compress_hyper_5_15_chunk compress_hyper_5_15_chunk_1 (actual rows=5 loops=1) + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk m2_2 (never executed) + -> Sort (never executed) + Sort Key: m2_3."time" + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk m2_3 (never executed) + -> Seq Scan on compress_hyper_5_16_chunk compress_hyper_5_16_chunk_1 (never executed) +(34 rows) + +:PREFIX +SELECT * +FROM :TEST_TABLE m1 + LEFT OUTER JOIN :TEST_TABLE m2 ON m1.time = m2.time + AND m1.device_id = 1 + AND m2.device_id = 2 +ORDER BY m1.time, + m1.device_id, + m2.time, + m2.device_id +LIMIT 100; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=100 loops=1) + -> Incremental Sort (actual rows=100 loops=1) + Sort Key: m1."time", m1.device_id, m2."time", m2.device_id + Presorted Key: m1."time" + Full-sort Groups: 3 Sort Method: quicksort + -> Merge Left Join (actual rows=101 loops=1) + Merge Cond: (m1."time" = m2."time") + Join Filter: (m1.device_id = 1) + Rows Removed by Join Filter: 81 + -> Custom Scan (ChunkAppend) on metrics m1 (actual rows=101 loops=1) + Order: m1."time" + -> Sort (actual rows=101 loops=1) + Sort Key: m1_1."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk m1_1 (actual rows=1800 loops=1) + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=5 loops=1) + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk m1_2 (never executed) + -> Sort (never executed) + Sort Key: m1_3."time" + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk m1_3 (never executed) + -> Seq Scan on compress_hyper_5_16_chunk (never executed) + -> Materialize (actual rows=102 loops=1) + -> Custom Scan (ChunkAppend) on metrics m2 (actual rows=22 loops=1) + Order: m2."time" + -> Sort (actual rows=22 loops=1) + Sort Key: m2_1."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk m2_1 (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_5_15_chunk compress_hyper_5_15_chunk_1 (actual rows=1 loops=1) + Filter: (device_id = 2) + Rows Removed by Filter: 4 + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk m2_2 (never executed) + Filter: (device_id = 2) + -> Sort (never executed) + Sort Key: m2_3."time" + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk m2_3 (never executed) + -> Seq Scan on compress_hyper_5_16_chunk compress_hyper_5_16_chunk_1 (never executed) + Filter: (device_id = 2) +(38 rows) + +:PREFIX +SELECT * +FROM metrics m1 + LEFT OUTER JOIN metrics_space m2 ON m1.time = m2.time + AND m1.device_id = 1 + AND m2.device_id = 2 +ORDER BY m1.time, + m1.device_id, + m2.time, + m2.device_id +LIMIT 100; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=100 loops=1) + -> Incremental Sort (actual rows=100 loops=1) + Sort Key: m1."time", m1.device_id, m2."time", m2.device_id + Presorted Key: m1."time" + Full-sort Groups: 3 Sort Method: quicksort + -> Merge Left Join (actual rows=101 loops=1) + Merge Cond: (m1."time" = m2."time") + Join Filter: (m1.device_id = 1) + Rows Removed by Join Filter: 81 + -> Custom Scan (ChunkAppend) on metrics m1 (actual rows=101 loops=1) + Order: m1."time" + -> Sort (actual rows=101 loops=1) + Sort Key: m1_1."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk m1_1 (actual rows=1800 loops=1) + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=5 loops=1) + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk m1_2 (never executed) + -> Sort (never executed) + Sort Key: m1_3."time" + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk m1_3 (never executed) + -> Seq Scan on compress_hyper_5_16_chunk (never executed) + -> Materialize (actual rows=102 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space m2 (actual rows=22 loops=1) + Order: m2."time" + -> Merge Append (actual rows=22 loops=1) + Sort Key: m2_1."time" + -> Sort (actual rows=0 loops=1) + Sort Key: m2_1."time" + Sort Method: quicksort + -> Sort (actual rows=0 loops=1) + Sort Key: m2_1."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk m2_1 (actual rows=0 loops=1) + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=0 loops=1) + Filter: (device_id = 2) + Rows Removed by Filter: 1 + -> Sort (actual rows=22 loops=1) + Sort Key: m2_2."time" + Sort Method: quicksort + -> Sort (actual rows=360 loops=1) + Sort Key: m2_2."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk m2_2 (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_18_chunk (actual rows=1 loops=1) + Filter: (device_id = 2) + Rows Removed by Filter: 2 + -> Sort (actual rows=0 loops=1) + Sort Key: m2_3."time" + Sort Method: quicksort + -> Sort (actual rows=0 loops=1) + Sort Key: m2_3."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk m2_3 (actual rows=0 loops=1) + -> Seq Scan on compress_hyper_6_19_chunk (actual rows=0 loops=1) + Filter: (device_id = 2) + Rows Removed by Filter: 1 + -> Merge Append (never executed) + Sort Key: m2_4."time" + -> Index Scan Backward using _hyper_2_7_chunk_metrics_space_time_idx on _hyper_2_7_chunk m2_4 (never executed) + Filter: (device_id = 2) + -> Index Scan Backward using _hyper_2_8_chunk_metrics_space_time_idx on _hyper_2_8_chunk m2_5 (never executed) + Filter: (device_id = 2) + -> Index Scan Backward using _hyper_2_9_chunk_metrics_space_time_idx on _hyper_2_9_chunk m2_6 (never executed) + Filter: (device_id = 2) + -> Merge Append (never executed) + Sort Key: m2_7."time" + -> Sort (never executed) + Sort Key: m2_7."time" + -> Sort (never executed) + Sort Key: m2_7."time" + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk m2_7 (never executed) + -> Seq Scan on compress_hyper_6_20_chunk (never executed) + Filter: (device_id = 2) + -> Sort (never executed) + Sort Key: m2_8."time" + -> Sort (never executed) + Sort Key: m2_8."time" + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk m2_8 (never executed) + -> Seq Scan on compress_hyper_6_21_chunk (never executed) + Filter: (device_id = 2) + -> Index Scan Backward using _hyper_2_12_chunk_metrics_space_time_idx on _hyper_2_12_chunk m2_9 (never executed) + Filter: (device_id = 2) +(82 rows) + +-- test implicit self-join +:PREFIX +SELECT * +FROM :TEST_TABLE m1, + :TEST_TABLE m2 +WHERE m1.time = m2.time +ORDER BY m1.time, + m1.device_id, + m2.time, + m2.device_id +LIMIT 20; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=20 loops=1) + -> Incremental Sort (actual rows=20 loops=1) + Sort Key: m1."time", m1.device_id, m2.device_id + Presorted Key: m1."time" + Full-sort Groups: 1 Sort Method: quicksort + -> Merge Join (actual rows=26 loops=1) + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics m1 (actual rows=6 loops=1) + Order: m1."time" + -> Sort (actual rows=6 loops=1) + Sort Key: m1_1."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk m1_1 (actual rows=1800 loops=1) + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=5 loops=1) + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk m1_2 (never executed) + -> Sort (never executed) + Sort Key: m1_3."time" + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk m1_3 (never executed) + -> Seq Scan on compress_hyper_5_16_chunk (never executed) + -> Materialize (actual rows=26 loops=1) + -> Custom Scan (ChunkAppend) on metrics m2 (actual rows=6 loops=1) + Order: m2."time" + -> Sort (actual rows=6 loops=1) + Sort Key: m2_1."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk m2_1 (actual rows=1800 loops=1) + -> Seq Scan on compress_hyper_5_15_chunk compress_hyper_5_15_chunk_1 (actual rows=5 loops=1) + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk m2_2 (never executed) + -> Sort (never executed) + Sort Key: m2_3."time" + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk m2_3 (never executed) + -> Seq Scan on compress_hyper_5_16_chunk compress_hyper_5_16_chunk_1 (never executed) +(32 rows) + +-- test self-join with sub-query +:PREFIX +SELECT * +FROM ( + SELECT * + FROM :TEST_TABLE m1) m1 + INNER JOIN ( + SELECT * + FROM :TEST_TABLE m2) m2 ON m1.time = m2.time +ORDER BY m1.time, + m1.device_id, + m2.device_id +LIMIT 10; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Incremental Sort (actual rows=10 loops=1) + Sort Key: m1."time", m1.device_id, m2.device_id + Presorted Key: m1."time" + Full-sort Groups: 1 Sort Method: top-N heapsort + -> Merge Join (actual rows=26 loops=1) + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics m1 (actual rows=6 loops=1) + Order: m1."time" + -> Sort (actual rows=6 loops=1) + Sort Key: m1_1."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk m1_1 (actual rows=1800 loops=1) + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=5 loops=1) + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk m1_2 (never executed) + -> Sort (never executed) + Sort Key: m1_3."time" + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk m1_3 (never executed) + -> Seq Scan on compress_hyper_5_16_chunk (never executed) + -> Materialize (actual rows=26 loops=1) + -> Custom Scan (ChunkAppend) on metrics m2 (actual rows=6 loops=1) + Order: m2."time" + -> Sort (actual rows=6 loops=1) + Sort Key: m2_1."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk m2_1 (actual rows=1800 loops=1) + -> Seq Scan on compress_hyper_5_15_chunk compress_hyper_5_15_chunk_1 (actual rows=5 loops=1) + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk m2_2 (never executed) + -> Sort (never executed) + Sort Key: m2_3."time" + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk m2_3 (never executed) + -> Seq Scan on compress_hyper_5_16_chunk compress_hyper_5_16_chunk_1 (never executed) +(32 rows) + +:PREFIX +SELECT * +FROM generate_series('2000-01-01'::timestamptz, '2000-02-01'::timestamptz, '1d'::interval) g (time) + INNER JOIN LATERAL ( + SELECT time + FROM :TEST_TABLE m1 + WHERE m1.time = g.time + LIMIT 1) m1 ON TRUE; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------- + Nested Loop (actual rows=19 loops=1) + -> Function Scan on generate_series g (actual rows=32 loops=1) + -> Limit (actual rows=1 loops=32) + -> Custom Scan (ChunkAppend) on metrics m1 (actual rows=1 loops=32) + Chunks excluded during runtime: 2 + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk m1_1 (actual rows=1 loops=5) + Filter: ("time" = g."time") + Rows Removed by Filter: 168 + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=1 loops=5) + Filter: ((_ts_meta_min_3 <= g."time") AND (_ts_meta_max_3 >= g."time")) + -> Index Only Scan using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk m1_2 (actual rows=1 loops=7) + Index Cond: ("time" = g."time") + Heap Fetches: 7 + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk m1_3 (actual rows=1 loops=7) + Filter: ("time" = g."time") + Rows Removed by Filter: 240 + -> Seq Scan on compress_hyper_5_16_chunk (actual rows=1 loops=7) + Filter: ((_ts_meta_min_3 <= g."time") AND (_ts_meta_max_3 >= g."time")) +(18 rows) + +-- test prepared statement with params pushdown +PREPARE param_prep (int) AS +SELECT * +FROM generate_series('2000-01-01'::timestamptz, '2000-02-01'::timestamptz, '1d'::interval) g (time) + INNER JOIN LATERAL ( + SELECT time + FROM :TEST_TABLE m1 + WHERE m1.time = g.time + AND device_id = $1 + LIMIT 1) m1 ON TRUE; +:PREFIX EXECUTE param_prep (1); + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------- + Nested Loop (actual rows=19 loops=1) + -> Function Scan on generate_series g (actual rows=32 loops=1) + -> Limit (actual rows=1 loops=32) + -> Custom Scan (ChunkAppend) on metrics m1 (actual rows=1 loops=32) + Chunks excluded during runtime: 2 + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk m1_1 (actual rows=1 loops=5) + Filter: ("time" = g."time") + Rows Removed by Filter: 168 + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=1 loops=5) + Filter: ((_ts_meta_min_3 <= g."time") AND (_ts_meta_max_3 >= g."time") AND (device_id = 1)) + -> Index Scan using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk m1_2 (actual rows=1 loops=7) + Index Cond: ("time" = g."time") + Filter: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk m1_3 (actual rows=1 loops=7) + Filter: ("time" = g."time") + Rows Removed by Filter: 240 + -> Seq Scan on compress_hyper_5_16_chunk (actual rows=1 loops=7) + Filter: ((_ts_meta_min_3 <= g."time") AND (_ts_meta_max_3 >= g."time") AND (device_id = 1)) +(18 rows) + +:PREFIX EXECUTE param_prep (2); + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------- + Nested Loop (actual rows=19 loops=1) + -> Function Scan on generate_series g (actual rows=32 loops=1) + -> Limit (actual rows=1 loops=32) + -> Custom Scan (ChunkAppend) on metrics m1 (actual rows=1 loops=32) + Chunks excluded during runtime: 2 + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk m1_1 (actual rows=1 loops=5) + Filter: ("time" = g."time") + Rows Removed by Filter: 168 + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=1 loops=5) + Filter: ((_ts_meta_min_3 <= g."time") AND (_ts_meta_max_3 >= g."time") AND (device_id = 2)) + Rows Removed by Filter: 1 + -> Index Scan using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk m1_2 (actual rows=1 loops=7) + Index Cond: ("time" = g."time") + Filter: (device_id = 2) + Rows Removed by Filter: 1 + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk m1_3 (actual rows=1 loops=7) + Filter: ("time" = g."time") + Rows Removed by Filter: 240 + -> Seq Scan on compress_hyper_5_16_chunk (actual rows=1 loops=7) + Filter: ((_ts_meta_min_3 <= g."time") AND (_ts_meta_max_3 >= g."time") AND (device_id = 2)) + Rows Removed by Filter: 1 +(21 rows) + +EXECUTE param_prep (1); + time | time +------------------------------+------------------------------ + Sat Jan 01 00:00:00 2000 PST | Sat Jan 01 00:00:00 2000 PST + Sun Jan 02 00:00:00 2000 PST | Sun Jan 02 00:00:00 2000 PST + Mon Jan 03 00:00:00 2000 PST | Mon Jan 03 00:00:00 2000 PST + Tue Jan 04 00:00:00 2000 PST | Tue Jan 04 00:00:00 2000 PST + Wed Jan 05 00:00:00 2000 PST | Wed Jan 05 00:00:00 2000 PST + Thu Jan 06 00:00:00 2000 PST | Thu Jan 06 00:00:00 2000 PST + Fri Jan 07 00:00:00 2000 PST | Fri Jan 07 00:00:00 2000 PST + Sat Jan 08 00:00:00 2000 PST | Sat Jan 08 00:00:00 2000 PST + Sun Jan 09 00:00:00 2000 PST | Sun Jan 09 00:00:00 2000 PST + Mon Jan 10 00:00:00 2000 PST | Mon Jan 10 00:00:00 2000 PST + Tue Jan 11 00:00:00 2000 PST | Tue Jan 11 00:00:00 2000 PST + Wed Jan 12 00:00:00 2000 PST | Wed Jan 12 00:00:00 2000 PST + Thu Jan 13 00:00:00 2000 PST | Thu Jan 13 00:00:00 2000 PST + Fri Jan 14 00:00:00 2000 PST | Fri Jan 14 00:00:00 2000 PST + Sat Jan 15 00:00:00 2000 PST | Sat Jan 15 00:00:00 2000 PST + Sun Jan 16 00:00:00 2000 PST | Sun Jan 16 00:00:00 2000 PST + Mon Jan 17 00:00:00 2000 PST | Mon Jan 17 00:00:00 2000 PST + Tue Jan 18 00:00:00 2000 PST | Tue Jan 18 00:00:00 2000 PST + Wed Jan 19 00:00:00 2000 PST | Wed Jan 19 00:00:00 2000 PST +(19 rows) + +EXECUTE param_prep (2); + time | time +------------------------------+------------------------------ + Sat Jan 01 00:00:00 2000 PST | Sat Jan 01 00:00:00 2000 PST + Sun Jan 02 00:00:00 2000 PST | Sun Jan 02 00:00:00 2000 PST + Mon Jan 03 00:00:00 2000 PST | Mon Jan 03 00:00:00 2000 PST + Tue Jan 04 00:00:00 2000 PST | Tue Jan 04 00:00:00 2000 PST + Wed Jan 05 00:00:00 2000 PST | Wed Jan 05 00:00:00 2000 PST + Thu Jan 06 00:00:00 2000 PST | Thu Jan 06 00:00:00 2000 PST + Fri Jan 07 00:00:00 2000 PST | Fri Jan 07 00:00:00 2000 PST + Sat Jan 08 00:00:00 2000 PST | Sat Jan 08 00:00:00 2000 PST + Sun Jan 09 00:00:00 2000 PST | Sun Jan 09 00:00:00 2000 PST + Mon Jan 10 00:00:00 2000 PST | Mon Jan 10 00:00:00 2000 PST + Tue Jan 11 00:00:00 2000 PST | Tue Jan 11 00:00:00 2000 PST + Wed Jan 12 00:00:00 2000 PST | Wed Jan 12 00:00:00 2000 PST + Thu Jan 13 00:00:00 2000 PST | Thu Jan 13 00:00:00 2000 PST + Fri Jan 14 00:00:00 2000 PST | Fri Jan 14 00:00:00 2000 PST + Sat Jan 15 00:00:00 2000 PST | Sat Jan 15 00:00:00 2000 PST + Sun Jan 16 00:00:00 2000 PST | Sun Jan 16 00:00:00 2000 PST + Mon Jan 17 00:00:00 2000 PST | Mon Jan 17 00:00:00 2000 PST + Tue Jan 18 00:00:00 2000 PST | Tue Jan 18 00:00:00 2000 PST + Wed Jan 19 00:00:00 2000 PST | Wed Jan 19 00:00:00 2000 PST +(19 rows) + +EXECUTE param_prep (1); + time | time +------------------------------+------------------------------ + Sat Jan 01 00:00:00 2000 PST | Sat Jan 01 00:00:00 2000 PST + Sun Jan 02 00:00:00 2000 PST | Sun Jan 02 00:00:00 2000 PST + Mon Jan 03 00:00:00 2000 PST | Mon Jan 03 00:00:00 2000 PST + Tue Jan 04 00:00:00 2000 PST | Tue Jan 04 00:00:00 2000 PST + Wed Jan 05 00:00:00 2000 PST | Wed Jan 05 00:00:00 2000 PST + Thu Jan 06 00:00:00 2000 PST | Thu Jan 06 00:00:00 2000 PST + Fri Jan 07 00:00:00 2000 PST | Fri Jan 07 00:00:00 2000 PST + Sat Jan 08 00:00:00 2000 PST | Sat Jan 08 00:00:00 2000 PST + Sun Jan 09 00:00:00 2000 PST | Sun Jan 09 00:00:00 2000 PST + Mon Jan 10 00:00:00 2000 PST | Mon Jan 10 00:00:00 2000 PST + Tue Jan 11 00:00:00 2000 PST | Tue Jan 11 00:00:00 2000 PST + Wed Jan 12 00:00:00 2000 PST | Wed Jan 12 00:00:00 2000 PST + Thu Jan 13 00:00:00 2000 PST | Thu Jan 13 00:00:00 2000 PST + Fri Jan 14 00:00:00 2000 PST | Fri Jan 14 00:00:00 2000 PST + Sat Jan 15 00:00:00 2000 PST | Sat Jan 15 00:00:00 2000 PST + Sun Jan 16 00:00:00 2000 PST | Sun Jan 16 00:00:00 2000 PST + Mon Jan 17 00:00:00 2000 PST | Mon Jan 17 00:00:00 2000 PST + Tue Jan 18 00:00:00 2000 PST | Tue Jan 18 00:00:00 2000 PST + Wed Jan 19 00:00:00 2000 PST | Wed Jan 19 00:00:00 2000 PST +(19 rows) + +EXECUTE param_prep (2); + time | time +------------------------------+------------------------------ + Sat Jan 01 00:00:00 2000 PST | Sat Jan 01 00:00:00 2000 PST + Sun Jan 02 00:00:00 2000 PST | Sun Jan 02 00:00:00 2000 PST + Mon Jan 03 00:00:00 2000 PST | Mon Jan 03 00:00:00 2000 PST + Tue Jan 04 00:00:00 2000 PST | Tue Jan 04 00:00:00 2000 PST + Wed Jan 05 00:00:00 2000 PST | Wed Jan 05 00:00:00 2000 PST + Thu Jan 06 00:00:00 2000 PST | Thu Jan 06 00:00:00 2000 PST + Fri Jan 07 00:00:00 2000 PST | Fri Jan 07 00:00:00 2000 PST + Sat Jan 08 00:00:00 2000 PST | Sat Jan 08 00:00:00 2000 PST + Sun Jan 09 00:00:00 2000 PST | Sun Jan 09 00:00:00 2000 PST + Mon Jan 10 00:00:00 2000 PST | Mon Jan 10 00:00:00 2000 PST + Tue Jan 11 00:00:00 2000 PST | Tue Jan 11 00:00:00 2000 PST + Wed Jan 12 00:00:00 2000 PST | Wed Jan 12 00:00:00 2000 PST + Thu Jan 13 00:00:00 2000 PST | Thu Jan 13 00:00:00 2000 PST + Fri Jan 14 00:00:00 2000 PST | Fri Jan 14 00:00:00 2000 PST + Sat Jan 15 00:00:00 2000 PST | Sat Jan 15 00:00:00 2000 PST + Sun Jan 16 00:00:00 2000 PST | Sun Jan 16 00:00:00 2000 PST + Mon Jan 17 00:00:00 2000 PST | Mon Jan 17 00:00:00 2000 PST + Tue Jan 18 00:00:00 2000 PST | Tue Jan 18 00:00:00 2000 PST + Wed Jan 19 00:00:00 2000 PST | Wed Jan 19 00:00:00 2000 PST +(19 rows) + +EXECUTE param_prep (1); + time | time +------------------------------+------------------------------ + Sat Jan 01 00:00:00 2000 PST | Sat Jan 01 00:00:00 2000 PST + Sun Jan 02 00:00:00 2000 PST | Sun Jan 02 00:00:00 2000 PST + Mon Jan 03 00:00:00 2000 PST | Mon Jan 03 00:00:00 2000 PST + Tue Jan 04 00:00:00 2000 PST | Tue Jan 04 00:00:00 2000 PST + Wed Jan 05 00:00:00 2000 PST | Wed Jan 05 00:00:00 2000 PST + Thu Jan 06 00:00:00 2000 PST | Thu Jan 06 00:00:00 2000 PST + Fri Jan 07 00:00:00 2000 PST | Fri Jan 07 00:00:00 2000 PST + Sat Jan 08 00:00:00 2000 PST | Sat Jan 08 00:00:00 2000 PST + Sun Jan 09 00:00:00 2000 PST | Sun Jan 09 00:00:00 2000 PST + Mon Jan 10 00:00:00 2000 PST | Mon Jan 10 00:00:00 2000 PST + Tue Jan 11 00:00:00 2000 PST | Tue Jan 11 00:00:00 2000 PST + Wed Jan 12 00:00:00 2000 PST | Wed Jan 12 00:00:00 2000 PST + Thu Jan 13 00:00:00 2000 PST | Thu Jan 13 00:00:00 2000 PST + Fri Jan 14 00:00:00 2000 PST | Fri Jan 14 00:00:00 2000 PST + Sat Jan 15 00:00:00 2000 PST | Sat Jan 15 00:00:00 2000 PST + Sun Jan 16 00:00:00 2000 PST | Sun Jan 16 00:00:00 2000 PST + Mon Jan 17 00:00:00 2000 PST | Mon Jan 17 00:00:00 2000 PST + Tue Jan 18 00:00:00 2000 PST | Tue Jan 18 00:00:00 2000 PST + Wed Jan 19 00:00:00 2000 PST | Wed Jan 19 00:00:00 2000 PST +(19 rows) + +DEALLOCATE param_prep; +-- test continuous aggs +SET client_min_messages TO error; +CREATE MATERIALIZED VIEW cagg_test WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) AS +SELECT time_bucket ('1d', time) AS time, + device_id, + avg(v1) +FROM :TEST_TABLE +WHERE device_id = 1 +GROUP BY 1, + 2 WITH DATA; +SELECT time +FROM cagg_test +ORDER BY time +LIMIT 1; + time +------------------------------ + Fri Dec 31 16:00:00 1999 PST +(1 row) + +DROP MATERIALIZED VIEW cagg_test; +RESET client_min_messages; +--github issue 1558. nested loop with index scan needed +--disables parallel scan +SET enable_seqscan = FALSE; +SET enable_bitmapscan = FALSE; +SET max_parallel_workers_per_gather = 0; +SET enable_hashjoin = FALSE; +SET enable_mergejoin = FALSE; +:PREFIX +SELECT * +FROM metrics, + metrics_space +WHERE metrics.time > metrics_space.time + AND metrics.device_id = metrics_space.device_id + AND metrics.time < metrics_space.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------ + Nested Loop (actual rows=0 loops=1) + -> Append (actual rows=6840 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=360 loops=1) + -> Index Scan using compress_hyper_6_17_chunk_c_space_index_2 on compress_hyper_6_17_chunk (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=1080 loops=1) + -> Index Scan using compress_hyper_6_18_chunk_c_space_index_2 on compress_hyper_6_18_chunk (actual rows=3 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=360 loops=1) + -> Index Scan using compress_hyper_6_19_chunk_c_space_index_2 on compress_hyper_6_19_chunk (actual rows=1 loops=1) + -> Index Scan Backward using _hyper_2_7_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _hyper_2_7_chunk (actual rows=504 loops=1) + -> Index Scan Backward using _hyper_2_8_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _hyper_2_8_chunk (actual rows=1512 loops=1) + -> Index Scan Backward using _hyper_2_9_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _hyper_2_9_chunk (actual rows=504 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=504 loops=1) + -> Index Scan using compress_hyper_6_20_chunk_c_space_index_2 on compress_hyper_6_20_chunk (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (actual rows=1512 loops=1) + -> Index Scan using compress_hyper_6_21_chunk_c_space_index_2 on compress_hyper_6_21_chunk (actual rows=3 loops=1) + -> Index Scan Backward using _hyper_2_12_chunk_metrics_space_device_id_device_id_peer_v0_v_2 on _hyper_2_12_chunk (actual rows=504 loops=1) + -> Append (actual rows=0 loops=6840) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=0 loops=6840) + Filter: (("time" > _hyper_2_4_chunk."time") AND ("time" < _hyper_2_4_chunk."time")) + Rows Removed by Filter: 360 + -> Index Scan using compress_hyper_5_15_chunk_c_index_2 on compress_hyper_5_15_chunk (actual rows=1 loops=6840) + Index Cond: (device_id = _hyper_2_4_chunk.device_id) + -> Index Scan using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk (actual rows=0 loops=6840) + Index Cond: (("time" > _hyper_2_4_chunk."time") AND ("time" < _hyper_2_4_chunk."time")) + Filter: (_hyper_2_4_chunk.device_id = device_id) + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=0 loops=6840) + Filter: (("time" > _hyper_2_4_chunk."time") AND ("time" < _hyper_2_4_chunk."time")) + Rows Removed by Filter: 504 + -> Index Scan using compress_hyper_5_16_chunk_c_index_2 on compress_hyper_5_16_chunk (actual rows=1 loops=6840) + Index Cond: (device_id = _hyper_2_4_chunk.device_id) +(30 rows) + +SET enable_seqscan = TRUE; +SET enable_bitmapscan = TRUE; +SET max_parallel_workers_per_gather = 0; +SET enable_hashjoin = TRUE; +SET enable_mergejoin = TRUE; +---end github issue 1558 +\set TEST_TABLE 'metrics_space' +\ir :TEST_QUERY_NAME +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- this should use DecompressChunk node +:PREFIX_VERBOSE +SELECT * +FROM :TEST_TABLE +WHERE device_id = 1 +ORDER BY time +LIMIT 5; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=5 loops=1) + Output: metrics_space."time", metrics_space.device_id, metrics_space.device_id_peer, metrics_space.v0, metrics_space.v1, metrics_space.v2, metrics_space.v3 + -> Custom Scan (ChunkAppend) on public.metrics_space (actual rows=5 loops=1) + Output: metrics_space."time", metrics_space.device_id, metrics_space.device_id_peer, metrics_space.v0, metrics_space.v1, metrics_space.v2, metrics_space.v3 + Order: metrics_space."time" + Startup Exclusion: false + Runtime Exclusion: false + -> Sort (actual rows=5 loops=1) + Output: _hyper_2_4_chunk."time", _hyper_2_4_chunk.device_id, _hyper_2_4_chunk.device_id_peer, _hyper_2_4_chunk.v0, _hyper_2_4_chunk.v1, _hyper_2_4_chunk.v2, _hyper_2_4_chunk.v3 + Sort Key: _hyper_2_4_chunk."time" + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_4_chunk (actual rows=360 loops=1) + Output: _hyper_2_4_chunk."time", _hyper_2_4_chunk.device_id, _hyper_2_4_chunk.device_id_peer, _hyper_2_4_chunk.v0, _hyper_2_4_chunk.v1, _hyper_2_4_chunk.v2, _hyper_2_4_chunk.v3 + Bulk Decompression: true + -> Seq Scan on _timescaledb_internal.compress_hyper_6_17_chunk (actual rows=1 loops=1) + Output: compress_hyper_6_17_chunk."time", compress_hyper_6_17_chunk.device_id, compress_hyper_6_17_chunk.device_id_peer, compress_hyper_6_17_chunk.v0, compress_hyper_6_17_chunk.v1, compress_hyper_6_17_chunk.v2, compress_hyper_6_17_chunk.v3, compress_hyper_6_17_chunk._ts_meta_count, compress_hyper_6_17_chunk._ts_meta_sequence_num, compress_hyper_6_17_chunk._ts_meta_min_3, compress_hyper_6_17_chunk._ts_meta_max_3, compress_hyper_6_17_chunk._ts_meta_min_1, compress_hyper_6_17_chunk._ts_meta_max_1, compress_hyper_6_17_chunk._ts_meta_min_2, compress_hyper_6_17_chunk._ts_meta_max_2 + Filter: (compress_hyper_6_17_chunk.device_id = 1) + -> Index Scan Backward using _hyper_2_7_chunk_metrics_space_time_idx on _timescaledb_internal._hyper_2_7_chunk (never executed) + Output: _hyper_2_7_chunk."time", _hyper_2_7_chunk.device_id, _hyper_2_7_chunk.device_id_peer, _hyper_2_7_chunk.v0, _hyper_2_7_chunk.v1, _hyper_2_7_chunk.v2, _hyper_2_7_chunk.v3 + Filter: (_hyper_2_7_chunk.device_id = 1) + -> Sort (never executed) + Output: _hyper_2_10_chunk."time", _hyper_2_10_chunk.device_id, _hyper_2_10_chunk.device_id_peer, _hyper_2_10_chunk.v0, _hyper_2_10_chunk.v1, _hyper_2_10_chunk.v2, _hyper_2_10_chunk.v3 + Sort Key: _hyper_2_10_chunk."time" + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_10_chunk (never executed) + Output: _hyper_2_10_chunk."time", _hyper_2_10_chunk.device_id, _hyper_2_10_chunk.device_id_peer, _hyper_2_10_chunk.v0, _hyper_2_10_chunk.v1, _hyper_2_10_chunk.v2, _hyper_2_10_chunk.v3 + Bulk Decompression: true + -> Seq Scan on _timescaledb_internal.compress_hyper_6_20_chunk (never executed) + Output: compress_hyper_6_20_chunk."time", compress_hyper_6_20_chunk.device_id, compress_hyper_6_20_chunk.device_id_peer, compress_hyper_6_20_chunk.v0, compress_hyper_6_20_chunk.v1, compress_hyper_6_20_chunk.v2, compress_hyper_6_20_chunk.v3, compress_hyper_6_20_chunk._ts_meta_count, compress_hyper_6_20_chunk._ts_meta_sequence_num, compress_hyper_6_20_chunk._ts_meta_min_3, compress_hyper_6_20_chunk._ts_meta_max_3, compress_hyper_6_20_chunk._ts_meta_min_1, compress_hyper_6_20_chunk._ts_meta_max_1, compress_hyper_6_20_chunk._ts_meta_min_2, compress_hyper_6_20_chunk._ts_meta_max_2 + Filter: (compress_hyper_6_20_chunk.device_id = 1) +(29 rows) + +-- test RECORD by itself +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE device_id = 1 +ORDER BY time; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_space (actual rows=1368 loops=1) + Order: metrics_space."time" + -> Sort (actual rows=360 loops=1) + Sort Key: _hyper_2_4_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=1 loops=1) + Filter: (device_id = 1) + -> Index Scan Backward using _hyper_2_7_chunk_metrics_space_time_idx on _hyper_2_7_chunk (actual rows=504 loops=1) + Filter: (device_id = 1) + -> Sort (actual rows=504 loops=1) + Sort Key: _hyper_2_10_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=504 loops=1) + -> Seq Scan on compress_hyper_6_20_chunk (actual rows=1 loops=1) + Filter: (device_id = 1) +(16 rows) + +-- test expressions +:PREFIX +SELECT time_bucket ('1d', time), + v1 + v2 AS "sum", + COALESCE(NULL, v1, v2) AS "coalesce", + NULL AS "NULL", + 'text' AS "text", + :TEST_TABLE AS "RECORD" +FROM :TEST_TABLE +WHERE device_id IN (1, 2) +ORDER BY time, + device_id; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------- + Incremental Sort (actual rows=2736 loops=1) + Sort Key: metrics_space."time", metrics_space.device_id + Presorted Key: metrics_space."time" + Full-sort Groups: 86 Sort Method: quicksort + -> Result (actual rows=2736 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=2736 loops=1) + Order: metrics_space."time" + -> Merge Append (actual rows=720 loops=1) + Sort Key: _hyper_2_4_chunk."time" + -> Sort (actual rows=360 loops=1) + Sort Key: _hyper_2_4_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=360 loops=1) + Sort Key: _hyper_2_4_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=1 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Sort (actual rows=360 loops=1) + Sort Key: _hyper_2_5_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=360 loops=1) + Sort Key: _hyper_2_5_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_18_chunk (actual rows=1 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 2 + -> Merge Append (actual rows=1008 loops=1) + Sort Key: _hyper_2_7_chunk."time" + -> Index Scan Backward using _hyper_2_7_chunk_metrics_space_time_idx on _hyper_2_7_chunk (actual rows=504 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Index Scan Backward using _hyper_2_8_chunk_metrics_space_time_idx on _hyper_2_8_chunk (actual rows=504 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 1008 + -> Merge Append (actual rows=1008 loops=1) + Sort Key: _hyper_2_10_chunk."time" + -> Sort (actual rows=504 loops=1) + Sort Key: _hyper_2_10_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=504 loops=1) + Sort Key: _hyper_2_10_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=504 loops=1) + -> Seq Scan on compress_hyper_6_20_chunk (actual rows=1 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Sort (actual rows=504 loops=1) + Sort Key: _hyper_2_11_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=504 loops=1) + Sort Key: _hyper_2_11_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (actual rows=504 loops=1) + -> Seq Scan on compress_hyper_6_21_chunk (actual rows=1 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 2 +(56 rows) + +-- test empty targetlist +:PREFIX +SELECT +FROM :TEST_TABLE; + QUERY PLAN +------------------------------------------------------------------------------------- + Append (actual rows=6840 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=1080 loops=1) + -> Seq Scan on compress_hyper_6_18_chunk (actual rows=3 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_19_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_2_7_chunk (actual rows=504 loops=1) + -> Seq Scan on _hyper_2_8_chunk (actual rows=1512 loops=1) + -> Seq Scan on _hyper_2_9_chunk (actual rows=504 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=504 loops=1) + -> Seq Scan on compress_hyper_6_20_chunk (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (actual rows=1512 loops=1) + -> Seq Scan on compress_hyper_6_21_chunk (actual rows=3 loops=1) + -> Seq Scan on _hyper_2_12_chunk (actual rows=504 loops=1) +(15 rows) + +-- test empty resultset +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE device_id < 0; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------- + Append (actual rows=0 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=0 loops=1) + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=0 loops=1) + Filter: (device_id < 0) + Rows Removed by Filter: 1 + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=0 loops=1) + -> Seq Scan on compress_hyper_6_18_chunk (actual rows=0 loops=1) + Filter: (device_id < 0) + Rows Removed by Filter: 3 + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=0 loops=1) + -> Seq Scan on compress_hyper_6_19_chunk (actual rows=0 loops=1) + Filter: (device_id < 0) + Rows Removed by Filter: 1 + -> Index Scan using _hyper_2_7_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _hyper_2_7_chunk (actual rows=0 loops=1) + Index Cond: (device_id < 0) + -> Index Scan using _hyper_2_8_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _hyper_2_8_chunk (actual rows=0 loops=1) + Index Cond: (device_id < 0) + -> Index Scan using _hyper_2_9_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _hyper_2_9_chunk (actual rows=0 loops=1) + Index Cond: (device_id < 0) + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=0 loops=1) + -> Seq Scan on compress_hyper_6_20_chunk (actual rows=0 loops=1) + Filter: (device_id < 0) + Rows Removed by Filter: 1 + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (actual rows=0 loops=1) + -> Seq Scan on compress_hyper_6_21_chunk (actual rows=0 loops=1) + Filter: (device_id < 0) + Rows Removed by Filter: 3 + -> Index Scan using _hyper_2_12_chunk_metrics_space_device_id_device_id_peer_v0_v_2 on _hyper_2_12_chunk (actual rows=0 loops=1) + Index Cond: (device_id < 0) +(29 rows) + +-- test targetlist not referencing columns +:PREFIX +SELECT 1 +FROM :TEST_TABLE; + QUERY PLAN +------------------------------------------------------------------------------------------- + Result (actual rows=6840 loops=1) + -> Append (actual rows=6840 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=1080 loops=1) + -> Seq Scan on compress_hyper_6_18_chunk (actual rows=3 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_19_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_2_7_chunk (actual rows=504 loops=1) + -> Seq Scan on _hyper_2_8_chunk (actual rows=1512 loops=1) + -> Seq Scan on _hyper_2_9_chunk (actual rows=504 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=504 loops=1) + -> Seq Scan on compress_hyper_6_20_chunk (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (actual rows=1512 loops=1) + -> Seq Scan on compress_hyper_6_21_chunk (actual rows=3 loops=1) + -> Seq Scan on _hyper_2_12_chunk (actual rows=504 loops=1) +(16 rows) + +-- test constraints not present in targetlist +:PREFIX +SELECT v1 +FROM :TEST_TABLE +WHERE device_id = 1 +ORDER BY v1; + QUERY PLAN +------------------------------------------------------------------------------------------ + Sort (actual rows=1368 loops=1) + Sort Key: _hyper_2_4_chunk.v1 + Sort Method: quicksort + -> Append (actual rows=1368 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=1 loops=1) + Filter: (device_id = 1) + -> Seq Scan on _hyper_2_7_chunk (actual rows=504 loops=1) + Filter: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=504 loops=1) + -> Seq Scan on compress_hyper_6_20_chunk (actual rows=1 loops=1) + Filter: (device_id = 1) +(12 rows) + +-- test order not present in targetlist +:PREFIX +SELECT v2 +FROM :TEST_TABLE +WHERE device_id = 1 +ORDER BY v1; + QUERY PLAN +------------------------------------------------------------------------------------------ + Sort (actual rows=1368 loops=1) + Sort Key: _hyper_2_4_chunk.v1 + Sort Method: quicksort + -> Append (actual rows=1368 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=1 loops=1) + Filter: (device_id = 1) + -> Seq Scan on _hyper_2_7_chunk (actual rows=504 loops=1) + Filter: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=504 loops=1) + -> Seq Scan on compress_hyper_6_20_chunk (actual rows=1 loops=1) + Filter: (device_id = 1) +(12 rows) + +-- test column with all NULL +:PREFIX +SELECT v3 +FROM :TEST_TABLE +WHERE device_id = 1; + QUERY PLAN +------------------------------------------------------------------------------------ + Append (actual rows=1368 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=1 loops=1) + Filter: (device_id = 1) + -> Seq Scan on _hyper_2_7_chunk (actual rows=504 loops=1) + Filter: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=504 loops=1) + -> Seq Scan on compress_hyper_6_20_chunk (actual rows=1 loops=1) + Filter: (device_id = 1) +(9 rows) + +-- +-- test qual pushdown +-- +-- v3 is not segment by or order by column so should not be pushed down +:PREFIX_VERBOSE +SELECT * +FROM :TEST_TABLE +WHERE v3 > 10.0 +ORDER BY time, + device_id; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort (actual rows=0 loops=1) + Output: _hyper_2_4_chunk."time", _hyper_2_4_chunk.device_id, _hyper_2_4_chunk.device_id_peer, _hyper_2_4_chunk.v0, _hyper_2_4_chunk.v1, _hyper_2_4_chunk.v2, _hyper_2_4_chunk.v3 + Sort Key: _hyper_2_4_chunk."time", _hyper_2_4_chunk.device_id + Sort Method: quicksort + -> Append (actual rows=0 loops=1) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_4_chunk (actual rows=0 loops=1) + Output: _hyper_2_4_chunk."time", _hyper_2_4_chunk.device_id, _hyper_2_4_chunk.device_id_peer, _hyper_2_4_chunk.v0, _hyper_2_4_chunk.v1, _hyper_2_4_chunk.v2, _hyper_2_4_chunk.v3 + Vectorized Filter: (_hyper_2_4_chunk.v3 > '10'::double precision) + Rows Removed by Filter: 360 + Bulk Decompression: true + -> Seq Scan on _timescaledb_internal.compress_hyper_6_17_chunk (actual rows=1 loops=1) + Output: compress_hyper_6_17_chunk."time", compress_hyper_6_17_chunk.device_id, compress_hyper_6_17_chunk.device_id_peer, compress_hyper_6_17_chunk.v0, compress_hyper_6_17_chunk.v1, compress_hyper_6_17_chunk.v2, compress_hyper_6_17_chunk.v3, compress_hyper_6_17_chunk._ts_meta_count, compress_hyper_6_17_chunk._ts_meta_sequence_num, compress_hyper_6_17_chunk._ts_meta_min_3, compress_hyper_6_17_chunk._ts_meta_max_3, compress_hyper_6_17_chunk._ts_meta_min_1, compress_hyper_6_17_chunk._ts_meta_max_1, compress_hyper_6_17_chunk._ts_meta_min_2, compress_hyper_6_17_chunk._ts_meta_max_2 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_5_chunk (actual rows=0 loops=1) + Output: _hyper_2_5_chunk."time", _hyper_2_5_chunk.device_id, _hyper_2_5_chunk.device_id_peer, _hyper_2_5_chunk.v0, _hyper_2_5_chunk.v1, _hyper_2_5_chunk.v2, _hyper_2_5_chunk.v3 + Vectorized Filter: (_hyper_2_5_chunk.v3 > '10'::double precision) + Rows Removed by Filter: 1080 + Bulk Decompression: true + -> Seq Scan on _timescaledb_internal.compress_hyper_6_18_chunk (actual rows=3 loops=1) + Output: compress_hyper_6_18_chunk."time", compress_hyper_6_18_chunk.device_id, compress_hyper_6_18_chunk.device_id_peer, compress_hyper_6_18_chunk.v0, compress_hyper_6_18_chunk.v1, compress_hyper_6_18_chunk.v2, compress_hyper_6_18_chunk.v3, compress_hyper_6_18_chunk._ts_meta_count, compress_hyper_6_18_chunk._ts_meta_sequence_num, compress_hyper_6_18_chunk._ts_meta_min_3, compress_hyper_6_18_chunk._ts_meta_max_3, compress_hyper_6_18_chunk._ts_meta_min_1, compress_hyper_6_18_chunk._ts_meta_max_1, compress_hyper_6_18_chunk._ts_meta_min_2, compress_hyper_6_18_chunk._ts_meta_max_2 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_6_chunk (actual rows=0 loops=1) + Output: _hyper_2_6_chunk."time", _hyper_2_6_chunk.device_id, _hyper_2_6_chunk.device_id_peer, _hyper_2_6_chunk.v0, _hyper_2_6_chunk.v1, _hyper_2_6_chunk.v2, _hyper_2_6_chunk.v3 + Vectorized Filter: (_hyper_2_6_chunk.v3 > '10'::double precision) + Rows Removed by Filter: 360 + Bulk Decompression: true + -> Seq Scan on _timescaledb_internal.compress_hyper_6_19_chunk (actual rows=1 loops=1) + Output: compress_hyper_6_19_chunk."time", compress_hyper_6_19_chunk.device_id, compress_hyper_6_19_chunk.device_id_peer, compress_hyper_6_19_chunk.v0, compress_hyper_6_19_chunk.v1, compress_hyper_6_19_chunk.v2, compress_hyper_6_19_chunk.v3, compress_hyper_6_19_chunk._ts_meta_count, compress_hyper_6_19_chunk._ts_meta_sequence_num, compress_hyper_6_19_chunk._ts_meta_min_3, compress_hyper_6_19_chunk._ts_meta_max_3, compress_hyper_6_19_chunk._ts_meta_min_1, compress_hyper_6_19_chunk._ts_meta_max_1, compress_hyper_6_19_chunk._ts_meta_min_2, compress_hyper_6_19_chunk._ts_meta_max_2 + -> Seq Scan on _timescaledb_internal._hyper_2_7_chunk (actual rows=0 loops=1) + Output: _hyper_2_7_chunk."time", _hyper_2_7_chunk.device_id, _hyper_2_7_chunk.device_id_peer, _hyper_2_7_chunk.v0, _hyper_2_7_chunk.v1, _hyper_2_7_chunk.v2, _hyper_2_7_chunk.v3 + Filter: (_hyper_2_7_chunk.v3 > '10'::double precision) + Rows Removed by Filter: 504 + -> Seq Scan on _timescaledb_internal._hyper_2_8_chunk (actual rows=0 loops=1) + Output: _hyper_2_8_chunk."time", _hyper_2_8_chunk.device_id, _hyper_2_8_chunk.device_id_peer, _hyper_2_8_chunk.v0, _hyper_2_8_chunk.v1, _hyper_2_8_chunk.v2, _hyper_2_8_chunk.v3 + Filter: (_hyper_2_8_chunk.v3 > '10'::double precision) + Rows Removed by Filter: 1512 + -> Seq Scan on _timescaledb_internal._hyper_2_9_chunk (actual rows=0 loops=1) + Output: _hyper_2_9_chunk."time", _hyper_2_9_chunk.device_id, _hyper_2_9_chunk.device_id_peer, _hyper_2_9_chunk.v0, _hyper_2_9_chunk.v1, _hyper_2_9_chunk.v2, _hyper_2_9_chunk.v3 + Filter: (_hyper_2_9_chunk.v3 > '10'::double precision) + Rows Removed by Filter: 504 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_10_chunk (actual rows=0 loops=1) + Output: _hyper_2_10_chunk."time", _hyper_2_10_chunk.device_id, _hyper_2_10_chunk.device_id_peer, _hyper_2_10_chunk.v0, _hyper_2_10_chunk.v1, _hyper_2_10_chunk.v2, _hyper_2_10_chunk.v3 + Vectorized Filter: (_hyper_2_10_chunk.v3 > '10'::double precision) + Rows Removed by Filter: 504 + Bulk Decompression: true + -> Seq Scan on _timescaledb_internal.compress_hyper_6_20_chunk (actual rows=1 loops=1) + Output: compress_hyper_6_20_chunk."time", compress_hyper_6_20_chunk.device_id, compress_hyper_6_20_chunk.device_id_peer, compress_hyper_6_20_chunk.v0, compress_hyper_6_20_chunk.v1, compress_hyper_6_20_chunk.v2, compress_hyper_6_20_chunk.v3, compress_hyper_6_20_chunk._ts_meta_count, compress_hyper_6_20_chunk._ts_meta_sequence_num, compress_hyper_6_20_chunk._ts_meta_min_3, compress_hyper_6_20_chunk._ts_meta_max_3, compress_hyper_6_20_chunk._ts_meta_min_1, compress_hyper_6_20_chunk._ts_meta_max_1, compress_hyper_6_20_chunk._ts_meta_min_2, compress_hyper_6_20_chunk._ts_meta_max_2 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_11_chunk (actual rows=0 loops=1) + Output: _hyper_2_11_chunk."time", _hyper_2_11_chunk.device_id, _hyper_2_11_chunk.device_id_peer, _hyper_2_11_chunk.v0, _hyper_2_11_chunk.v1, _hyper_2_11_chunk.v2, _hyper_2_11_chunk.v3 + Vectorized Filter: (_hyper_2_11_chunk.v3 > '10'::double precision) + Rows Removed by Filter: 1512 + Bulk Decompression: true + -> Seq Scan on _timescaledb_internal.compress_hyper_6_21_chunk (actual rows=3 loops=1) + Output: compress_hyper_6_21_chunk."time", compress_hyper_6_21_chunk.device_id, compress_hyper_6_21_chunk.device_id_peer, compress_hyper_6_21_chunk.v0, compress_hyper_6_21_chunk.v1, compress_hyper_6_21_chunk.v2, compress_hyper_6_21_chunk.v3, compress_hyper_6_21_chunk._ts_meta_count, compress_hyper_6_21_chunk._ts_meta_sequence_num, compress_hyper_6_21_chunk._ts_meta_min_3, compress_hyper_6_21_chunk._ts_meta_max_3, compress_hyper_6_21_chunk._ts_meta_min_1, compress_hyper_6_21_chunk._ts_meta_max_1, compress_hyper_6_21_chunk._ts_meta_min_2, compress_hyper_6_21_chunk._ts_meta_max_2 + -> Seq Scan on _timescaledb_internal._hyper_2_12_chunk (actual rows=0 loops=1) + Output: _hyper_2_12_chunk."time", _hyper_2_12_chunk.device_id, _hyper_2_12_chunk.device_id_peer, _hyper_2_12_chunk.v0, _hyper_2_12_chunk.v1, _hyper_2_12_chunk.v2, _hyper_2_12_chunk.v3 + Filter: (_hyper_2_12_chunk.v3 > '10'::double precision) + Rows Removed by Filter: 504 +(56 rows) + +-- device_id constraint should be pushed down +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE device_id = 1 +ORDER BY time, + device_id +LIMIT 10; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=10 loops=1) + Order: metrics_space."time" + -> Sort (actual rows=10 loops=1) + Sort Key: _hyper_2_4_chunk."time" + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=1 loops=1) + Filter: (device_id = 1) + -> Index Scan Backward using _hyper_2_7_chunk_metrics_space_time_idx on _hyper_2_7_chunk (never executed) + Filter: (device_id = 1) + -> Sort (never executed) + Sort Key: _hyper_2_10_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (never executed) + -> Seq Scan on compress_hyper_6_20_chunk (never executed) + Filter: (device_id = 1) +(16 rows) + +-- test IS NULL / IS NOT NULL +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE device_id IS NOT NULL +ORDER BY time, + device_id +LIMIT 10; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Incremental Sort (actual rows=10 loops=1) + Sort Key: metrics_space."time", metrics_space.device_id + Presorted Key: metrics_space."time" + Full-sort Groups: 1 Sort Method: quicksort + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=11 loops=1) + Order: metrics_space."time" + -> Merge Append (actual rows=11 loops=1) + Sort Key: _hyper_2_4_chunk."time" + -> Sort (actual rows=3 loops=1) + Sort Key: _hyper_2_4_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=360 loops=1) + Sort Key: _hyper_2_4_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=1 loops=1) + Filter: (device_id IS NOT NULL) + -> Sort (actual rows=7 loops=1) + Sort Key: _hyper_2_5_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=1080 loops=1) + Sort Key: _hyper_2_5_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=1080 loops=1) + -> Seq Scan on compress_hyper_6_18_chunk (actual rows=3 loops=1) + Filter: (device_id IS NOT NULL) + -> Sort (actual rows=3 loops=1) + Sort Key: _hyper_2_6_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=360 loops=1) + Sort Key: _hyper_2_6_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_19_chunk (actual rows=1 loops=1) + Filter: (device_id IS NOT NULL) + -> Merge Append (never executed) + Sort Key: _hyper_2_7_chunk."time" + -> Index Scan Backward using _hyper_2_7_chunk_metrics_space_time_idx on _hyper_2_7_chunk (never executed) + Filter: (device_id IS NOT NULL) + -> Index Scan Backward using _hyper_2_8_chunk_metrics_space_time_idx on _hyper_2_8_chunk (never executed) + Filter: (device_id IS NOT NULL) + -> Index Scan Backward using _hyper_2_9_chunk_metrics_space_time_idx on _hyper_2_9_chunk (never executed) + Filter: (device_id IS NOT NULL) + -> Merge Append (never executed) + Sort Key: _hyper_2_10_chunk."time" + -> Sort (never executed) + Sort Key: _hyper_2_10_chunk."time" + -> Sort (never executed) + Sort Key: _hyper_2_10_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (never executed) + -> Seq Scan on compress_hyper_6_20_chunk (never executed) + Filter: (device_id IS NOT NULL) + -> Sort (never executed) + Sort Key: _hyper_2_11_chunk."time" + -> Sort (never executed) + Sort Key: _hyper_2_11_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (never executed) + -> Seq Scan on compress_hyper_6_21_chunk (never executed) + Filter: (device_id IS NOT NULL) + -> Index Scan Backward using _hyper_2_12_chunk_metrics_space_time_idx on _hyper_2_12_chunk (never executed) + Filter: (device_id IS NOT NULL) +(62 rows) + +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE device_id IS NULL +ORDER BY time, + device_id +LIMIT 10; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=0 loops=1) + -> Incremental Sort (actual rows=0 loops=1) + Sort Key: metrics_space."time", metrics_space.device_id + Presorted Key: metrics_space."time" + Full-sort Groups: 1 Sort Method: quicksort + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=0 loops=1) + Order: metrics_space."time" + -> Merge Append (actual rows=0 loops=1) + Sort Key: _hyper_2_4_chunk."time" + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_4_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_4_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=0 loops=1) + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=0 loops=1) + Filter: (device_id IS NULL) + Rows Removed by Filter: 1 + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_5_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_5_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=0 loops=1) + -> Seq Scan on compress_hyper_6_18_chunk (actual rows=0 loops=1) + Filter: (device_id IS NULL) + Rows Removed by Filter: 3 + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_6_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_6_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=0 loops=1) + -> Seq Scan on compress_hyper_6_19_chunk (actual rows=0 loops=1) + Filter: (device_id IS NULL) + Rows Removed by Filter: 1 + -> Merge Append (actual rows=0 loops=1) + Sort Key: _hyper_2_7_chunk."time" + -> Index Scan Backward using _hyper_2_7_chunk_metrics_space_time_idx on _hyper_2_7_chunk (actual rows=0 loops=1) + Filter: (device_id IS NULL) + Rows Removed by Filter: 504 + -> Index Scan Backward using _hyper_2_8_chunk_metrics_space_time_idx on _hyper_2_8_chunk (actual rows=0 loops=1) + Filter: (device_id IS NULL) + Rows Removed by Filter: 1512 + -> Index Scan Backward using _hyper_2_9_chunk_metrics_space_time_idx on _hyper_2_9_chunk (actual rows=0 loops=1) + Filter: (device_id IS NULL) + Rows Removed by Filter: 504 + -> Merge Append (actual rows=0 loops=1) + Sort Key: _hyper_2_10_chunk."time" + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_10_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_10_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=0 loops=1) + -> Seq Scan on compress_hyper_6_20_chunk (actual rows=0 loops=1) + Filter: (device_id IS NULL) + Rows Removed by Filter: 1 + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_11_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_11_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (actual rows=0 loops=1) + -> Seq Scan on compress_hyper_6_21_chunk (actual rows=0 loops=1) + Filter: (device_id IS NULL) + Rows Removed by Filter: 3 + -> Index Scan Backward using _hyper_2_12_chunk_metrics_space_time_idx on _hyper_2_12_chunk (actual rows=0 loops=1) + Filter: (device_id IS NULL) + Rows Removed by Filter: 504 +(75 rows) + +-- test IN (Const,Const) +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE device_id IN (1, 2) +ORDER BY time, + device_id +LIMIT 10; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Incremental Sort (actual rows=10 loops=1) + Sort Key: metrics_space."time", metrics_space.device_id + Presorted Key: metrics_space."time" + Full-sort Groups: 1 Sort Method: quicksort + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=11 loops=1) + Order: metrics_space."time" + -> Merge Append (actual rows=11 loops=1) + Sort Key: _hyper_2_4_chunk."time" + -> Sort (actual rows=6 loops=1) + Sort Key: _hyper_2_4_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=360 loops=1) + Sort Key: _hyper_2_4_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=1 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Sort (actual rows=6 loops=1) + Sort Key: _hyper_2_5_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=360 loops=1) + Sort Key: _hyper_2_5_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_18_chunk (actual rows=1 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 2 + -> Merge Append (never executed) + Sort Key: _hyper_2_7_chunk."time" + -> Index Scan Backward using _hyper_2_7_chunk_metrics_space_time_idx on _hyper_2_7_chunk (never executed) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Index Scan Backward using _hyper_2_8_chunk_metrics_space_time_idx on _hyper_2_8_chunk (never executed) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Merge Append (never executed) + Sort Key: _hyper_2_10_chunk."time" + -> Sort (never executed) + Sort Key: _hyper_2_10_chunk."time" + -> Sort (never executed) + Sort Key: _hyper_2_10_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (never executed) + -> Seq Scan on compress_hyper_6_20_chunk (never executed) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Sort (never executed) + Sort Key: _hyper_2_11_chunk."time" + -> Sort (never executed) + Sort Key: _hyper_2_11_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (never executed) + -> Seq Scan on compress_hyper_6_21_chunk (never executed) + Filter: (device_id = ANY ('{1,2}'::integer[])) +(50 rows) + +-- test cast pushdown +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE device_id = '1'::text::int +ORDER BY time, + device_id +LIMIT 10; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=10 loops=1) + Order: metrics_space."time" + -> Sort (actual rows=10 loops=1) + Sort Key: _hyper_2_4_chunk."time" + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=1 loops=1) + Filter: (device_id = 1) + -> Index Scan Backward using _hyper_2_7_chunk_metrics_space_time_idx on _hyper_2_7_chunk (never executed) + Filter: (device_id = 1) + -> Sort (never executed) + Sort Key: _hyper_2_10_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (never executed) + -> Seq Scan on compress_hyper_6_20_chunk (never executed) + Filter: (device_id = 1) +(16 rows) + +--test var op var with two segment by +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE device_id = device_id_peer +ORDER BY time, + device_id +LIMIT 10; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=0 loops=1) + -> Incremental Sort (actual rows=0 loops=1) + Sort Key: metrics_space."time", metrics_space.device_id + Presorted Key: metrics_space."time" + Full-sort Groups: 1 Sort Method: quicksort + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=0 loops=1) + Order: metrics_space."time" + -> Merge Append (actual rows=0 loops=1) + Sort Key: _hyper_2_4_chunk."time" + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_4_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_4_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=0 loops=1) + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=0 loops=1) + Filter: (device_id = device_id_peer) + Rows Removed by Filter: 1 + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_5_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_5_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=0 loops=1) + -> Seq Scan on compress_hyper_6_18_chunk (actual rows=0 loops=1) + Filter: (device_id = device_id_peer) + Rows Removed by Filter: 3 + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_6_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_6_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=0 loops=1) + -> Seq Scan on compress_hyper_6_19_chunk (actual rows=0 loops=1) + Filter: (device_id = device_id_peer) + Rows Removed by Filter: 1 + -> Merge Append (actual rows=0 loops=1) + Sort Key: _hyper_2_7_chunk."time" + -> Index Scan Backward using _hyper_2_7_chunk_metrics_space_time_idx on _hyper_2_7_chunk (actual rows=0 loops=1) + Filter: (device_id = device_id_peer) + Rows Removed by Filter: 504 + -> Index Scan Backward using _hyper_2_8_chunk_metrics_space_time_idx on _hyper_2_8_chunk (actual rows=0 loops=1) + Filter: (device_id = device_id_peer) + Rows Removed by Filter: 1512 + -> Index Scan Backward using _hyper_2_9_chunk_metrics_space_time_idx on _hyper_2_9_chunk (actual rows=0 loops=1) + Filter: (device_id = device_id_peer) + Rows Removed by Filter: 504 + -> Merge Append (actual rows=0 loops=1) + Sort Key: _hyper_2_10_chunk."time" + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_10_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_10_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=0 loops=1) + -> Seq Scan on compress_hyper_6_20_chunk (actual rows=0 loops=1) + Filter: (device_id = device_id_peer) + Rows Removed by Filter: 1 + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_11_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_11_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (actual rows=0 loops=1) + -> Seq Scan on compress_hyper_6_21_chunk (actual rows=0 loops=1) + Filter: (device_id = device_id_peer) + Rows Removed by Filter: 3 + -> Index Scan Backward using _hyper_2_12_chunk_metrics_space_time_idx on _hyper_2_12_chunk (actual rows=0 loops=1) + Filter: (device_id = device_id_peer) + Rows Removed by Filter: 504 +(75 rows) + +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE device_id_peer < device_id +ORDER BY time, + device_id +LIMIT 10; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Incremental Sort (actual rows=10 loops=1) + Sort Key: metrics_space."time", metrics_space.device_id + Presorted Key: metrics_space."time" + Full-sort Groups: 1 Sort Method: quicksort + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=11 loops=1) + Order: metrics_space."time" + -> Merge Append (actual rows=11 loops=1) + Sort Key: _hyper_2_4_chunk."time" + -> Sort (actual rows=3 loops=1) + Sort Key: _hyper_2_4_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=360 loops=1) + Sort Key: _hyper_2_4_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=1 loops=1) + Filter: (device_id_peer < device_id) + -> Sort (actual rows=7 loops=1) + Sort Key: _hyper_2_5_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=1080 loops=1) + Sort Key: _hyper_2_5_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=1080 loops=1) + -> Seq Scan on compress_hyper_6_18_chunk (actual rows=3 loops=1) + Filter: (device_id_peer < device_id) + -> Sort (actual rows=3 loops=1) + Sort Key: _hyper_2_6_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=360 loops=1) + Sort Key: _hyper_2_6_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_19_chunk (actual rows=1 loops=1) + Filter: (device_id_peer < device_id) + -> Merge Append (never executed) + Sort Key: _hyper_2_7_chunk."time" + -> Index Scan Backward using _hyper_2_7_chunk_metrics_space_time_idx on _hyper_2_7_chunk (never executed) + Filter: (device_id_peer < device_id) + -> Index Scan Backward using _hyper_2_8_chunk_metrics_space_time_idx on _hyper_2_8_chunk (never executed) + Filter: (device_id_peer < device_id) + -> Index Scan Backward using _hyper_2_9_chunk_metrics_space_time_idx on _hyper_2_9_chunk (never executed) + Filter: (device_id_peer < device_id) + -> Merge Append (never executed) + Sort Key: _hyper_2_10_chunk."time" + -> Sort (never executed) + Sort Key: _hyper_2_10_chunk."time" + -> Sort (never executed) + Sort Key: _hyper_2_10_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (never executed) + -> Seq Scan on compress_hyper_6_20_chunk (never executed) + Filter: (device_id_peer < device_id) + -> Sort (never executed) + Sort Key: _hyper_2_11_chunk."time" + -> Sort (never executed) + Sort Key: _hyper_2_11_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (never executed) + -> Seq Scan on compress_hyper_6_21_chunk (never executed) + Filter: (device_id_peer < device_id) + -> Index Scan Backward using _hyper_2_12_chunk_metrics_space_time_idx on _hyper_2_12_chunk (never executed) + Filter: (device_id_peer < device_id) +(62 rows) + +-- test expressions +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE device_id = 1 + 4 / 2 +ORDER BY time, + device_id +LIMIT 10; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=10 loops=1) + Order: metrics_space."time" + -> Sort (actual rows=10 loops=1) + Sort Key: _hyper_2_6_chunk."time" + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_19_chunk (actual rows=1 loops=1) + Filter: (device_id = 3) + -> Index Scan Backward using _hyper_2_9_chunk_metrics_space_time_idx on _hyper_2_9_chunk (never executed) + Filter: (device_id = 3) + -> Index Scan Backward using _hyper_2_12_chunk_metrics_space_time_idx on _hyper_2_12_chunk (never executed) + Filter: (device_id = 3) +(13 rows) + +-- test function calls +-- not yet pushed down +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE device_id = length(substring(version(), 1, 3)) +ORDER BY time, + device_id +LIMIT 10; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=10 loops=1) + Order: metrics_space."time" + -> Merge Append (actual rows=10 loops=1) + Sort Key: _hyper_2_4_chunk."time" + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_4_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_4_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=0 loops=1) + Filter: (device_id = length("substring"(version(), 1, 3))) + Rows Removed by Filter: 360 + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=1 loops=1) + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_5_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_5_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=0 loops=1) + Filter: (device_id = length("substring"(version(), 1, 3))) + Rows Removed by Filter: 1080 + -> Seq Scan on compress_hyper_6_18_chunk (actual rows=3 loops=1) + -> Sort (actual rows=10 loops=1) + Sort Key: _hyper_2_6_chunk."time" + Sort Method: top-N heapsort + -> Sort (actual rows=360 loops=1) + Sort Key: _hyper_2_6_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=360 loops=1) + Filter: (device_id = length("substring"(version(), 1, 3))) + -> Seq Scan on compress_hyper_6_19_chunk (actual rows=1 loops=1) + -> Merge Append (never executed) + Sort Key: _hyper_2_7_chunk."time" + -> Index Scan Backward using _hyper_2_7_chunk_metrics_space_device_id_time_idx on _hyper_2_7_chunk (never executed) + Index Cond: (device_id = length("substring"(version(), 1, 3))) + -> Index Scan Backward using _hyper_2_8_chunk_metrics_space_device_id_time_idx on _hyper_2_8_chunk (never executed) + Index Cond: (device_id = length("substring"(version(), 1, 3))) + -> Index Scan Backward using _hyper_2_9_chunk_metrics_space_device_id_time_idx on _hyper_2_9_chunk (never executed) + Index Cond: (device_id = length("substring"(version(), 1, 3))) + -> Merge Append (never executed) + Sort Key: _hyper_2_10_chunk."time" + -> Sort (never executed) + Sort Key: _hyper_2_10_chunk."time" + -> Sort (never executed) + Sort Key: _hyper_2_10_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (never executed) + Filter: (device_id = length("substring"(version(), 1, 3))) + -> Seq Scan on compress_hyper_6_20_chunk (never executed) + -> Sort (never executed) + Sort Key: _hyper_2_11_chunk."time" + -> Sort (never executed) + Sort Key: _hyper_2_11_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (never executed) + Filter: (device_id = length("substring"(version(), 1, 3))) + -> Seq Scan on compress_hyper_6_21_chunk (never executed) + -> Index Scan Backward using _hyper_2_12_chunk_metrics_space_device_id_time_idx on _hyper_2_12_chunk (never executed) + Index Cond: (device_id = length("substring"(version(), 1, 3))) +(60 rows) + +-- +-- test segment meta pushdown +-- +-- order by column and const +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE time = '2000-01-01 1:00:00+0' +ORDER BY time, + device_id +LIMIT 10; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=5 loops=1) + -> Merge Append (actual rows=5 loops=1) + Sort Key: _hyper_2_4_chunk.device_id + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=1 loops=1) + Vectorized Filter: ("time" = 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + Rows Removed by Filter: 359 + -> Sort (actual rows=1 loops=1) + Sort Key: compress_hyper_6_17_chunk.device_id + Sort Method: quicksort + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=1 loops=1) + Filter: ((_ts_meta_min_3 <= 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) AND (_ts_meta_max_3 >= 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone)) + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=3 loops=1) + Vectorized Filter: ("time" = 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + Rows Removed by Filter: 1077 + -> Sort (actual rows=3 loops=1) + Sort Key: compress_hyper_6_18_chunk.device_id + Sort Method: quicksort + -> Seq Scan on compress_hyper_6_18_chunk (actual rows=3 loops=1) + Filter: ((_ts_meta_min_3 <= 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) AND (_ts_meta_max_3 >= 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone)) + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=1 loops=1) + Vectorized Filter: ("time" = 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + Rows Removed by Filter: 359 + -> Sort (actual rows=1 loops=1) + Sort Key: compress_hyper_6_19_chunk.device_id + Sort Method: quicksort + -> Seq Scan on compress_hyper_6_19_chunk (actual rows=1 loops=1) + Filter: ((_ts_meta_min_3 <= 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) AND (_ts_meta_max_3 >= 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone)) +(27 rows) + +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE time < '2000-01-01 1:00:00+0' +ORDER BY time, + device_id +LIMIT 10; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Sort (actual rows=10 loops=1) + Sort Key: _hyper_2_4_chunk."time", _hyper_2_4_chunk.device_id + Sort Method: quicksort + -> Append (actual rows=15 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=3 loops=1) + Vectorized Filter: ("time" < 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + Rows Removed by Filter: 357 + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=1 loops=1) + Filter: (_ts_meta_min_3 < 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=9 loops=1) + Vectorized Filter: ("time" < 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + Rows Removed by Filter: 1071 + -> Seq Scan on compress_hyper_6_18_chunk (actual rows=3 loops=1) + Filter: (_ts_meta_min_3 < 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=3 loops=1) + Vectorized Filter: ("time" < 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + Rows Removed by Filter: 357 + -> Seq Scan on compress_hyper_6_19_chunk (actual rows=1 loops=1) + Filter: (_ts_meta_min_3 < 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) +(20 rows) + +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE time <= '2000-01-01 1:00:00+0' +ORDER BY time, + device_id +LIMIT 10; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Sort (actual rows=10 loops=1) + Sort Key: _hyper_2_4_chunk."time", _hyper_2_4_chunk.device_id + Sort Method: quicksort + -> Append (actual rows=20 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=4 loops=1) + Vectorized Filter: ("time" <= 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + Rows Removed by Filter: 356 + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=1 loops=1) + Filter: (_ts_meta_min_3 <= 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=12 loops=1) + Vectorized Filter: ("time" <= 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + Rows Removed by Filter: 1068 + -> Seq Scan on compress_hyper_6_18_chunk (actual rows=3 loops=1) + Filter: (_ts_meta_min_3 <= 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=4 loops=1) + Vectorized Filter: ("time" <= 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + Rows Removed by Filter: 356 + -> Seq Scan on compress_hyper_6_19_chunk (actual rows=1 loops=1) + Filter: (_ts_meta_min_3 <= 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) +(20 rows) + +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE time >= '2000-01-01 1:00:00+0' +ORDER BY time, + device_id +LIMIT 10; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Incremental Sort (actual rows=10 loops=1) + Sort Key: metrics_space."time", metrics_space.device_id + Presorted Key: metrics_space."time" + Full-sort Groups: 1 Sort Method: quicksort + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=11 loops=1) + Order: metrics_space."time" + -> Merge Append (actual rows=11 loops=1) + Sort Key: _hyper_2_4_chunk."time" + -> Sort (actual rows=3 loops=1) + Sort Key: _hyper_2_4_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=357 loops=1) + Sort Key: _hyper_2_4_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=357 loops=1) + Vectorized Filter: ("time" >= 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + Rows Removed by Filter: 3 + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=1 loops=1) + Filter: (_ts_meta_max_3 >= 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + -> Sort (actual rows=7 loops=1) + Sort Key: _hyper_2_5_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=1071 loops=1) + Sort Key: _hyper_2_5_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=1071 loops=1) + Vectorized Filter: ("time" >= 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + Rows Removed by Filter: 9 + -> Seq Scan on compress_hyper_6_18_chunk (actual rows=3 loops=1) + Filter: (_ts_meta_max_3 >= 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + -> Sort (actual rows=3 loops=1) + Sort Key: _hyper_2_6_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=357 loops=1) + Sort Key: _hyper_2_6_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=357 loops=1) + Vectorized Filter: ("time" >= 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + Rows Removed by Filter: 3 + -> Seq Scan on compress_hyper_6_19_chunk (actual rows=1 loops=1) + Filter: (_ts_meta_max_3 >= 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + -> Merge Append (never executed) + Sort Key: _hyper_2_7_chunk."time" + -> Index Scan Backward using _hyper_2_7_chunk_metrics_space_time_idx on _hyper_2_7_chunk (never executed) + Index Cond: ("time" >= 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + -> Index Scan Backward using _hyper_2_8_chunk_metrics_space_time_idx on _hyper_2_8_chunk (never executed) + Index Cond: ("time" >= 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + -> Index Scan Backward using _hyper_2_9_chunk_metrics_space_time_idx on _hyper_2_9_chunk (never executed) + Index Cond: ("time" >= 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + -> Merge Append (never executed) + Sort Key: _hyper_2_10_chunk."time" + -> Sort (never executed) + Sort Key: _hyper_2_10_chunk."time" + -> Sort (never executed) + Sort Key: _hyper_2_10_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (never executed) + Vectorized Filter: ("time" >= 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + -> Seq Scan on compress_hyper_6_20_chunk (never executed) + Filter: (_ts_meta_max_3 >= 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + -> Sort (never executed) + Sort Key: _hyper_2_11_chunk."time" + -> Sort (never executed) + Sort Key: _hyper_2_11_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (never executed) + Vectorized Filter: ("time" >= 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + -> Seq Scan on compress_hyper_6_21_chunk (never executed) + Filter: (_ts_meta_max_3 >= 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + -> Index Scan Backward using _hyper_2_12_chunk_metrics_space_time_idx on _hyper_2_12_chunk (never executed) + Index Cond: ("time" >= 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) +(70 rows) + +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE time > '2000-01-01 1:00:00+0' +ORDER BY time, + device_id +LIMIT 10; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Incremental Sort (actual rows=10 loops=1) + Sort Key: metrics_space."time", metrics_space.device_id + Presorted Key: metrics_space."time" + Full-sort Groups: 1 Sort Method: quicksort + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=11 loops=1) + Order: metrics_space."time" + -> Merge Append (actual rows=11 loops=1) + Sort Key: _hyper_2_4_chunk."time" + -> Sort (actual rows=3 loops=1) + Sort Key: _hyper_2_4_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=356 loops=1) + Sort Key: _hyper_2_4_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=356 loops=1) + Vectorized Filter: ("time" > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + Rows Removed by Filter: 4 + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=1 loops=1) + Filter: (_ts_meta_max_3 > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + -> Sort (actual rows=7 loops=1) + Sort Key: _hyper_2_5_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=1068 loops=1) + Sort Key: _hyper_2_5_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=1068 loops=1) + Vectorized Filter: ("time" > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + Rows Removed by Filter: 12 + -> Seq Scan on compress_hyper_6_18_chunk (actual rows=3 loops=1) + Filter: (_ts_meta_max_3 > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + -> Sort (actual rows=3 loops=1) + Sort Key: _hyper_2_6_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=356 loops=1) + Sort Key: _hyper_2_6_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=356 loops=1) + Vectorized Filter: ("time" > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + Rows Removed by Filter: 4 + -> Seq Scan on compress_hyper_6_19_chunk (actual rows=1 loops=1) + Filter: (_ts_meta_max_3 > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + -> Merge Append (never executed) + Sort Key: _hyper_2_7_chunk."time" + -> Index Scan Backward using _hyper_2_7_chunk_metrics_space_time_idx on _hyper_2_7_chunk (never executed) + Index Cond: ("time" > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + -> Index Scan Backward using _hyper_2_8_chunk_metrics_space_time_idx on _hyper_2_8_chunk (never executed) + Index Cond: ("time" > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + -> Index Scan Backward using _hyper_2_9_chunk_metrics_space_time_idx on _hyper_2_9_chunk (never executed) + Index Cond: ("time" > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + -> Merge Append (never executed) + Sort Key: _hyper_2_10_chunk."time" + -> Sort (never executed) + Sort Key: _hyper_2_10_chunk."time" + -> Sort (never executed) + Sort Key: _hyper_2_10_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (never executed) + Vectorized Filter: ("time" > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + -> Seq Scan on compress_hyper_6_20_chunk (never executed) + Filter: (_ts_meta_max_3 > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + -> Sort (never executed) + Sort Key: _hyper_2_11_chunk."time" + -> Sort (never executed) + Sort Key: _hyper_2_11_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (never executed) + Vectorized Filter: ("time" > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + -> Seq Scan on compress_hyper_6_21_chunk (never executed) + Filter: (_ts_meta_max_3 > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + -> Index Scan Backward using _hyper_2_12_chunk_metrics_space_time_idx on _hyper_2_12_chunk (never executed) + Index Cond: ("time" > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) +(70 rows) + +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE '2000-01-01 1:00:00+0' < time +ORDER BY time, + device_id +LIMIT 10; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Incremental Sort (actual rows=10 loops=1) + Sort Key: metrics_space."time", metrics_space.device_id + Presorted Key: metrics_space."time" + Full-sort Groups: 1 Sort Method: quicksort + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=11 loops=1) + Order: metrics_space."time" + -> Merge Append (actual rows=11 loops=1) + Sort Key: _hyper_2_4_chunk."time" + -> Sort (actual rows=3 loops=1) + Sort Key: _hyper_2_4_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=356 loops=1) + Sort Key: _hyper_2_4_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=356 loops=1) + Vectorized Filter: ("time" > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + Rows Removed by Filter: 4 + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=1 loops=1) + Filter: (_ts_meta_max_3 > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + -> Sort (actual rows=7 loops=1) + Sort Key: _hyper_2_5_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=1068 loops=1) + Sort Key: _hyper_2_5_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=1068 loops=1) + Vectorized Filter: ("time" > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + Rows Removed by Filter: 12 + -> Seq Scan on compress_hyper_6_18_chunk (actual rows=3 loops=1) + Filter: (_ts_meta_max_3 > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + -> Sort (actual rows=3 loops=1) + Sort Key: _hyper_2_6_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=356 loops=1) + Sort Key: _hyper_2_6_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=356 loops=1) + Vectorized Filter: ("time" > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + Rows Removed by Filter: 4 + -> Seq Scan on compress_hyper_6_19_chunk (actual rows=1 loops=1) + Filter: (_ts_meta_max_3 > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + -> Merge Append (never executed) + Sort Key: _hyper_2_7_chunk."time" + -> Index Scan Backward using _hyper_2_7_chunk_metrics_space_time_idx on _hyper_2_7_chunk (never executed) + Index Cond: ("time" > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + -> Index Scan Backward using _hyper_2_8_chunk_metrics_space_time_idx on _hyper_2_8_chunk (never executed) + Index Cond: ("time" > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + -> Index Scan Backward using _hyper_2_9_chunk_metrics_space_time_idx on _hyper_2_9_chunk (never executed) + Index Cond: ("time" > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + -> Merge Append (never executed) + Sort Key: _hyper_2_10_chunk."time" + -> Sort (never executed) + Sort Key: _hyper_2_10_chunk."time" + -> Sort (never executed) + Sort Key: _hyper_2_10_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (never executed) + Vectorized Filter: ("time" > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + -> Seq Scan on compress_hyper_6_20_chunk (never executed) + Filter: (_ts_meta_max_3 > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + -> Sort (never executed) + Sort Key: _hyper_2_11_chunk."time" + -> Sort (never executed) + Sort Key: _hyper_2_11_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (never executed) + Vectorized Filter: ("time" > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + -> Seq Scan on compress_hyper_6_21_chunk (never executed) + Filter: (_ts_meta_max_3 > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + -> Index Scan Backward using _hyper_2_12_chunk_metrics_space_time_idx on _hyper_2_12_chunk (never executed) + Index Cond: ("time" > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) +(70 rows) + +--pushdowns between order by and segment by columns +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE v0 < 1 +ORDER BY time, + device_id +LIMIT 10; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=0 loops=1) + -> Incremental Sort (actual rows=0 loops=1) + Sort Key: metrics_space."time", metrics_space.device_id + Presorted Key: metrics_space."time" + Full-sort Groups: 1 Sort Method: quicksort + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=0 loops=1) + Order: metrics_space."time" + -> Merge Append (actual rows=0 loops=1) + Sort Key: _hyper_2_4_chunk."time" + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_4_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_4_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=0 loops=1) + Vectorized Filter: (v0 < 1) + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=0 loops=1) + Filter: (_ts_meta_min_1 < 1) + Rows Removed by Filter: 1 + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_5_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_5_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=0 loops=1) + Vectorized Filter: (v0 < 1) + -> Seq Scan on compress_hyper_6_18_chunk (actual rows=0 loops=1) + Filter: (_ts_meta_min_1 < 1) + Rows Removed by Filter: 3 + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_6_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_6_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=0 loops=1) + Vectorized Filter: (v0 < 1) + -> Seq Scan on compress_hyper_6_19_chunk (actual rows=0 loops=1) + Filter: (_ts_meta_min_1 < 1) + Rows Removed by Filter: 1 + -> Merge Append (actual rows=0 loops=1) + Sort Key: _hyper_2_7_chunk."time" + -> Index Scan Backward using _hyper_2_7_chunk_metrics_space_time_idx on _hyper_2_7_chunk (actual rows=0 loops=1) + Filter: (v0 < 1) + Rows Removed by Filter: 504 + -> Index Scan Backward using _hyper_2_8_chunk_metrics_space_time_idx on _hyper_2_8_chunk (actual rows=0 loops=1) + Filter: (v0 < 1) + Rows Removed by Filter: 1512 + -> Index Scan Backward using _hyper_2_9_chunk_metrics_space_time_idx on _hyper_2_9_chunk (actual rows=0 loops=1) + Filter: (v0 < 1) + Rows Removed by Filter: 504 + -> Merge Append (actual rows=0 loops=1) + Sort Key: _hyper_2_10_chunk."time" + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_10_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_10_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=0 loops=1) + Vectorized Filter: (v0 < 1) + -> Seq Scan on compress_hyper_6_20_chunk (actual rows=0 loops=1) + Filter: (_ts_meta_min_1 < 1) + Rows Removed by Filter: 1 + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_11_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_11_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (actual rows=0 loops=1) + Vectorized Filter: (v0 < 1) + -> Seq Scan on compress_hyper_6_21_chunk (actual rows=0 loops=1) + Filter: (_ts_meta_min_1 < 1) + Rows Removed by Filter: 3 + -> Index Scan Backward using _hyper_2_12_chunk_metrics_space_time_idx on _hyper_2_12_chunk (actual rows=0 loops=1) + Filter: (v0 < 1) + Rows Removed by Filter: 504 +(80 rows) + +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE v0 < device_id +ORDER BY time, + device_id +LIMIT 10; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=0 loops=1) + -> Incremental Sort (actual rows=0 loops=1) + Sort Key: metrics_space."time", metrics_space.device_id + Presorted Key: metrics_space."time" + Full-sort Groups: 1 Sort Method: quicksort + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=0 loops=1) + Order: metrics_space."time" + -> Merge Append (actual rows=0 loops=1) + Sort Key: _hyper_2_4_chunk."time" + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_4_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_4_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=0 loops=1) + Filter: (v0 < device_id) + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=0 loops=1) + Filter: (_ts_meta_min_1 < device_id) + Rows Removed by Filter: 1 + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_5_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_5_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=0 loops=1) + Filter: (v0 < device_id) + -> Seq Scan on compress_hyper_6_18_chunk (actual rows=0 loops=1) + Filter: (_ts_meta_min_1 < device_id) + Rows Removed by Filter: 3 + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_6_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_6_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=0 loops=1) + Filter: (v0 < device_id) + -> Seq Scan on compress_hyper_6_19_chunk (actual rows=0 loops=1) + Filter: (_ts_meta_min_1 < device_id) + Rows Removed by Filter: 1 + -> Merge Append (actual rows=0 loops=1) + Sort Key: _hyper_2_7_chunk."time" + -> Index Scan Backward using _hyper_2_7_chunk_metrics_space_time_idx on _hyper_2_7_chunk (actual rows=0 loops=1) + Filter: (v0 < device_id) + Rows Removed by Filter: 504 + -> Index Scan Backward using _hyper_2_8_chunk_metrics_space_time_idx on _hyper_2_8_chunk (actual rows=0 loops=1) + Filter: (v0 < device_id) + Rows Removed by Filter: 1512 + -> Index Scan Backward using _hyper_2_9_chunk_metrics_space_time_idx on _hyper_2_9_chunk (actual rows=0 loops=1) + Filter: (v0 < device_id) + Rows Removed by Filter: 504 + -> Merge Append (actual rows=0 loops=1) + Sort Key: _hyper_2_10_chunk."time" + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_10_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_10_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=0 loops=1) + Filter: (v0 < device_id) + -> Seq Scan on compress_hyper_6_20_chunk (actual rows=0 loops=1) + Filter: (_ts_meta_min_1 < device_id) + Rows Removed by Filter: 1 + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_11_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_11_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (actual rows=0 loops=1) + Filter: (v0 < device_id) + -> Seq Scan on compress_hyper_6_21_chunk (actual rows=0 loops=1) + Filter: (_ts_meta_min_1 < device_id) + Rows Removed by Filter: 3 + -> Index Scan Backward using _hyper_2_12_chunk_metrics_space_time_idx on _hyper_2_12_chunk (actual rows=0 loops=1) + Filter: (v0 < device_id) + Rows Removed by Filter: 504 +(80 rows) + +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE device_id < v0 +ORDER BY time, + device_id +LIMIT 10; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Incremental Sort (actual rows=10 loops=1) + Sort Key: metrics_space."time", metrics_space.device_id + Presorted Key: metrics_space."time" + Full-sort Groups: 1 Sort Method: quicksort + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=11 loops=1) + Order: metrics_space."time" + -> Merge Append (actual rows=11 loops=1) + Sort Key: _hyper_2_4_chunk."time" + -> Sort (actual rows=3 loops=1) + Sort Key: _hyper_2_4_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=360 loops=1) + Sort Key: _hyper_2_4_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=360 loops=1) + Filter: (device_id < v0) + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=1 loops=1) + Filter: (_ts_meta_max_1 > device_id) + -> Sort (actual rows=7 loops=1) + Sort Key: _hyper_2_5_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=1080 loops=1) + Sort Key: _hyper_2_5_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=1080 loops=1) + Filter: (device_id < v0) + -> Seq Scan on compress_hyper_6_18_chunk (actual rows=3 loops=1) + Filter: (_ts_meta_max_1 > device_id) + -> Sort (actual rows=3 loops=1) + Sort Key: _hyper_2_6_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=360 loops=1) + Sort Key: _hyper_2_6_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=360 loops=1) + Filter: (device_id < v0) + -> Seq Scan on compress_hyper_6_19_chunk (actual rows=1 loops=1) + Filter: (_ts_meta_max_1 > device_id) + -> Merge Append (never executed) + Sort Key: _hyper_2_7_chunk."time" + -> Index Scan Backward using _hyper_2_7_chunk_metrics_space_time_idx on _hyper_2_7_chunk (never executed) + Filter: (device_id < v0) + -> Index Scan Backward using _hyper_2_8_chunk_metrics_space_time_idx on _hyper_2_8_chunk (never executed) + Filter: (device_id < v0) + -> Index Scan Backward using _hyper_2_9_chunk_metrics_space_time_idx on _hyper_2_9_chunk (never executed) + Filter: (device_id < v0) + -> Merge Append (never executed) + Sort Key: _hyper_2_10_chunk."time" + -> Sort (never executed) + Sort Key: _hyper_2_10_chunk."time" + -> Sort (never executed) + Sort Key: _hyper_2_10_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (never executed) + Filter: (device_id < v0) + -> Seq Scan on compress_hyper_6_20_chunk (never executed) + Filter: (_ts_meta_max_1 > device_id) + -> Sort (never executed) + Sort Key: _hyper_2_11_chunk."time" + -> Sort (never executed) + Sort Key: _hyper_2_11_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (never executed) + Filter: (device_id < v0) + -> Seq Scan on compress_hyper_6_21_chunk (never executed) + Filter: (_ts_meta_max_1 > device_id) + -> Index Scan Backward using _hyper_2_12_chunk_metrics_space_time_idx on _hyper_2_12_chunk (never executed) + Filter: (device_id < v0) +(67 rows) + +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE v1 = device_id +ORDER BY time, + device_id +LIMIT 10; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=0 loops=1) + -> Incremental Sort (actual rows=0 loops=1) + Sort Key: metrics_space."time", metrics_space.device_id + Presorted Key: metrics_space."time" + Full-sort Groups: 1 Sort Method: quicksort + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=0 loops=1) + Order: metrics_space."time" + -> Merge Append (actual rows=0 loops=1) + Sort Key: _hyper_2_4_chunk."time" + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_4_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_4_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=0 loops=1) + Filter: (v1 = (device_id)::double precision) + Rows Removed by Filter: 360 + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=1 loops=1) + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_5_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_5_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=0 loops=1) + Filter: (v1 = (device_id)::double precision) + Rows Removed by Filter: 1080 + -> Seq Scan on compress_hyper_6_18_chunk (actual rows=3 loops=1) + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_6_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_6_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=0 loops=1) + Filter: (v1 = (device_id)::double precision) + Rows Removed by Filter: 360 + -> Seq Scan on compress_hyper_6_19_chunk (actual rows=1 loops=1) + -> Merge Append (actual rows=0 loops=1) + Sort Key: _hyper_2_7_chunk."time" + -> Index Scan Backward using _hyper_2_7_chunk_metrics_space_time_idx on _hyper_2_7_chunk (actual rows=0 loops=1) + Filter: (v1 = (device_id)::double precision) + Rows Removed by Filter: 504 + -> Index Scan Backward using _hyper_2_8_chunk_metrics_space_time_idx on _hyper_2_8_chunk (actual rows=0 loops=1) + Filter: (v1 = (device_id)::double precision) + Rows Removed by Filter: 1512 + -> Index Scan Backward using _hyper_2_9_chunk_metrics_space_time_idx on _hyper_2_9_chunk (actual rows=0 loops=1) + Filter: (v1 = (device_id)::double precision) + Rows Removed by Filter: 504 + -> Merge Append (actual rows=0 loops=1) + Sort Key: _hyper_2_10_chunk."time" + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_10_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_10_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=0 loops=1) + Filter: (v1 = (device_id)::double precision) + Rows Removed by Filter: 504 + -> Seq Scan on compress_hyper_6_20_chunk (actual rows=1 loops=1) + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_11_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_11_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (actual rows=0 loops=1) + Filter: (v1 = (device_id)::double precision) + Rows Removed by Filter: 1512 + -> Seq Scan on compress_hyper_6_21_chunk (actual rows=3 loops=1) + -> Index Scan Backward using _hyper_2_12_chunk_metrics_space_time_idx on _hyper_2_12_chunk (actual rows=0 loops=1) + Filter: (v1 = (device_id)::double precision) + Rows Removed by Filter: 504 +(75 rows) + +--pushdown between two order by column (not pushed down) +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE v0 = v1 +ORDER BY time, + device_id +LIMIT 10; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=0 loops=1) + -> Incremental Sort (actual rows=0 loops=1) + Sort Key: metrics_space."time", metrics_space.device_id + Presorted Key: metrics_space."time" + Full-sort Groups: 1 Sort Method: quicksort + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=0 loops=1) + Order: metrics_space."time" + -> Merge Append (actual rows=0 loops=1) + Sort Key: _hyper_2_4_chunk."time" + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_4_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_4_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=0 loops=1) + Filter: ((v0)::double precision = v1) + Rows Removed by Filter: 360 + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=1 loops=1) + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_5_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_5_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=0 loops=1) + Filter: ((v0)::double precision = v1) + Rows Removed by Filter: 1080 + -> Seq Scan on compress_hyper_6_18_chunk (actual rows=3 loops=1) + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_6_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_6_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=0 loops=1) + Filter: ((v0)::double precision = v1) + Rows Removed by Filter: 360 + -> Seq Scan on compress_hyper_6_19_chunk (actual rows=1 loops=1) + -> Merge Append (actual rows=0 loops=1) + Sort Key: _hyper_2_7_chunk."time" + -> Index Scan Backward using _hyper_2_7_chunk_metrics_space_time_idx on _hyper_2_7_chunk (actual rows=0 loops=1) + Filter: ((v0)::double precision = v1) + Rows Removed by Filter: 504 + -> Index Scan Backward using _hyper_2_8_chunk_metrics_space_time_idx on _hyper_2_8_chunk (actual rows=0 loops=1) + Filter: ((v0)::double precision = v1) + Rows Removed by Filter: 1512 + -> Index Scan Backward using _hyper_2_9_chunk_metrics_space_time_idx on _hyper_2_9_chunk (actual rows=0 loops=1) + Filter: ((v0)::double precision = v1) + Rows Removed by Filter: 504 + -> Merge Append (actual rows=0 loops=1) + Sort Key: _hyper_2_10_chunk."time" + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_10_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_10_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=0 loops=1) + Filter: ((v0)::double precision = v1) + Rows Removed by Filter: 504 + -> Seq Scan on compress_hyper_6_20_chunk (actual rows=1 loops=1) + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_11_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_11_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (actual rows=0 loops=1) + Filter: ((v0)::double precision = v1) + Rows Removed by Filter: 1512 + -> Seq Scan on compress_hyper_6_21_chunk (actual rows=3 loops=1) + -> Index Scan Backward using _hyper_2_12_chunk_metrics_space_time_idx on _hyper_2_12_chunk (actual rows=0 loops=1) + Filter: ((v0)::double precision = v1) + Rows Removed by Filter: 504 +(75 rows) + +--pushdown of quals on order by and segment by cols anded together +:PREFIX_VERBOSE +SELECT * +FROM :TEST_TABLE +WHERE time > '2000-01-01 1:00:00+0' + AND device_id = 1 +ORDER BY time, + device_id +LIMIT 10; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + Output: metrics_space."time", metrics_space.device_id, metrics_space.device_id_peer, metrics_space.v0, metrics_space.v1, metrics_space.v2, metrics_space.v3 + -> Custom Scan (ChunkAppend) on public.metrics_space (actual rows=10 loops=1) + Output: metrics_space."time", metrics_space.device_id, metrics_space.device_id_peer, metrics_space.v0, metrics_space.v1, metrics_space.v2, metrics_space.v3 + Order: metrics_space."time" + Startup Exclusion: false + Runtime Exclusion: false + -> Sort (actual rows=10 loops=1) + Output: _hyper_2_4_chunk."time", _hyper_2_4_chunk.device_id, _hyper_2_4_chunk.device_id_peer, _hyper_2_4_chunk.v0, _hyper_2_4_chunk.v1, _hyper_2_4_chunk.v2, _hyper_2_4_chunk.v3 + Sort Key: _hyper_2_4_chunk."time" + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_4_chunk (actual rows=356 loops=1) + Output: _hyper_2_4_chunk."time", _hyper_2_4_chunk.device_id, _hyper_2_4_chunk.device_id_peer, _hyper_2_4_chunk.v0, _hyper_2_4_chunk.v1, _hyper_2_4_chunk.v2, _hyper_2_4_chunk.v3 + Vectorized Filter: (_hyper_2_4_chunk."time" > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + Rows Removed by Filter: 4 + Bulk Decompression: true + -> Seq Scan on _timescaledb_internal.compress_hyper_6_17_chunk (actual rows=1 loops=1) + Output: compress_hyper_6_17_chunk."time", compress_hyper_6_17_chunk.device_id, compress_hyper_6_17_chunk.device_id_peer, compress_hyper_6_17_chunk.v0, compress_hyper_6_17_chunk.v1, compress_hyper_6_17_chunk.v2, compress_hyper_6_17_chunk.v3, compress_hyper_6_17_chunk._ts_meta_count, compress_hyper_6_17_chunk._ts_meta_sequence_num, compress_hyper_6_17_chunk._ts_meta_min_3, compress_hyper_6_17_chunk._ts_meta_max_3, compress_hyper_6_17_chunk._ts_meta_min_1, compress_hyper_6_17_chunk._ts_meta_max_1, compress_hyper_6_17_chunk._ts_meta_min_2, compress_hyper_6_17_chunk._ts_meta_max_2 + Filter: ((compress_hyper_6_17_chunk._ts_meta_max_3 > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) AND (compress_hyper_6_17_chunk.device_id = 1)) + -> Index Scan Backward using _hyper_2_7_chunk_metrics_space_time_idx on _timescaledb_internal._hyper_2_7_chunk (never executed) + Output: _hyper_2_7_chunk."time", _hyper_2_7_chunk.device_id, _hyper_2_7_chunk.device_id_peer, _hyper_2_7_chunk.v0, _hyper_2_7_chunk.v1, _hyper_2_7_chunk.v2, _hyper_2_7_chunk.v3 + Index Cond: (_hyper_2_7_chunk."time" > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + Filter: (_hyper_2_7_chunk.device_id = 1) + -> Sort (never executed) + Output: _hyper_2_10_chunk."time", _hyper_2_10_chunk.device_id, _hyper_2_10_chunk.device_id_peer, _hyper_2_10_chunk.v0, _hyper_2_10_chunk.v1, _hyper_2_10_chunk.v2, _hyper_2_10_chunk.v3 + Sort Key: _hyper_2_10_chunk."time" + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_10_chunk (never executed) + Output: _hyper_2_10_chunk."time", _hyper_2_10_chunk.device_id, _hyper_2_10_chunk.device_id_peer, _hyper_2_10_chunk.v0, _hyper_2_10_chunk.v1, _hyper_2_10_chunk.v2, _hyper_2_10_chunk.v3 + Vectorized Filter: (_hyper_2_10_chunk."time" > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + Bulk Decompression: true + -> Seq Scan on _timescaledb_internal.compress_hyper_6_20_chunk (never executed) + Output: compress_hyper_6_20_chunk."time", compress_hyper_6_20_chunk.device_id, compress_hyper_6_20_chunk.device_id_peer, compress_hyper_6_20_chunk.v0, compress_hyper_6_20_chunk.v1, compress_hyper_6_20_chunk.v2, compress_hyper_6_20_chunk.v3, compress_hyper_6_20_chunk._ts_meta_count, compress_hyper_6_20_chunk._ts_meta_sequence_num, compress_hyper_6_20_chunk._ts_meta_min_3, compress_hyper_6_20_chunk._ts_meta_max_3, compress_hyper_6_20_chunk._ts_meta_min_1, compress_hyper_6_20_chunk._ts_meta_max_1, compress_hyper_6_20_chunk._ts_meta_min_2, compress_hyper_6_20_chunk._ts_meta_max_2 + Filter: ((compress_hyper_6_20_chunk._ts_meta_max_3 > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) AND (compress_hyper_6_20_chunk.device_id = 1)) +(33 rows) + +--pushdown of quals on order by and segment by cols or together (not pushed down) +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE time > '2000-01-01 1:00:00+0' + OR device_id = 1 +ORDER BY time, + device_id +LIMIT 10; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Incremental Sort (actual rows=10 loops=1) + Sort Key: metrics_space."time", metrics_space.device_id + Presorted Key: metrics_space."time" + Full-sort Groups: 1 Sort Method: quicksort + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=15 loops=1) + Order: metrics_space."time" + -> Merge Append (actual rows=15 loops=1) + Sort Key: _hyper_2_4_chunk."time" + -> Sort (actual rows=7 loops=1) + Sort Key: _hyper_2_4_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=360 loops=1) + Sort Key: _hyper_2_4_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=360 loops=1) + Filter: (("time" > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) OR (device_id = 1)) + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=1 loops=1) + -> Sort (actual rows=7 loops=1) + Sort Key: _hyper_2_5_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=1068 loops=1) + Sort Key: _hyper_2_5_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=1068 loops=1) + Filter: (("time" > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) OR (device_id = 1)) + Rows Removed by Filter: 12 + -> Seq Scan on compress_hyper_6_18_chunk (actual rows=3 loops=1) + -> Sort (actual rows=3 loops=1) + Sort Key: _hyper_2_6_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=356 loops=1) + Sort Key: _hyper_2_6_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=356 loops=1) + Filter: (("time" > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) OR (device_id = 1)) + Rows Removed by Filter: 4 + -> Seq Scan on compress_hyper_6_19_chunk (actual rows=1 loops=1) + -> Merge Append (never executed) + Sort Key: _hyper_2_7_chunk."time" + -> Index Scan Backward using _hyper_2_7_chunk_metrics_space_time_idx on _hyper_2_7_chunk (never executed) + Filter: (("time" > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) OR (device_id = 1)) + -> Index Scan Backward using _hyper_2_8_chunk_metrics_space_time_idx on _hyper_2_8_chunk (never executed) + Filter: (("time" > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) OR (device_id = 1)) + -> Index Scan Backward using _hyper_2_9_chunk_metrics_space_time_idx on _hyper_2_9_chunk (never executed) + Filter: (("time" > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) OR (device_id = 1)) + -> Merge Append (never executed) + Sort Key: _hyper_2_10_chunk."time" + -> Sort (never executed) + Sort Key: _hyper_2_10_chunk."time" + -> Sort (never executed) + Sort Key: _hyper_2_10_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (never executed) + Filter: (("time" > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) OR (device_id = 1)) + -> Seq Scan on compress_hyper_6_20_chunk (never executed) + -> Sort (never executed) + Sort Key: _hyper_2_11_chunk."time" + -> Sort (never executed) + Sort Key: _hyper_2_11_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (never executed) + Filter: (("time" > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) OR (device_id = 1)) + -> Seq Scan on compress_hyper_6_21_chunk (never executed) + -> Index Scan Backward using _hyper_2_12_chunk_metrics_space_time_idx on _hyper_2_12_chunk (never executed) + Filter: (("time" > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) OR (device_id = 1)) +(64 rows) + +--functions not yet optimized +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE time < now() +ORDER BY time, + device_id +LIMIT 10; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Incremental Sort (actual rows=10 loops=1) + Sort Key: metrics_space."time", metrics_space.device_id + Presorted Key: metrics_space."time" + Full-sort Groups: 1 Sort Method: quicksort + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=11 loops=1) + Order: metrics_space."time" + -> Merge Append (actual rows=11 loops=1) + Sort Key: _hyper_2_4_chunk."time" + -> Sort (actual rows=3 loops=1) + Sort Key: _hyper_2_4_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=360 loops=1) + Sort Key: _hyper_2_4_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=360 loops=1) + Filter: ("time" < now()) + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=1 loops=1) + -> Sort (actual rows=7 loops=1) + Sort Key: _hyper_2_5_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=1080 loops=1) + Sort Key: _hyper_2_5_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=1080 loops=1) + Filter: ("time" < now()) + -> Seq Scan on compress_hyper_6_18_chunk (actual rows=3 loops=1) + -> Sort (actual rows=3 loops=1) + Sort Key: _hyper_2_6_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=360 loops=1) + Sort Key: _hyper_2_6_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=360 loops=1) + Filter: ("time" < now()) + -> Seq Scan on compress_hyper_6_19_chunk (actual rows=1 loops=1) + -> Merge Append (never executed) + Sort Key: _hyper_2_7_chunk."time" + -> Index Scan Backward using _hyper_2_7_chunk_metrics_space_time_idx on _hyper_2_7_chunk (never executed) + Index Cond: ("time" < now()) + -> Index Scan Backward using _hyper_2_8_chunk_metrics_space_time_idx on _hyper_2_8_chunk (never executed) + Index Cond: ("time" < now()) + -> Index Scan Backward using _hyper_2_9_chunk_metrics_space_time_idx on _hyper_2_9_chunk (never executed) + Index Cond: ("time" < now()) + -> Merge Append (never executed) + Sort Key: _hyper_2_10_chunk."time" + -> Sort (never executed) + Sort Key: _hyper_2_10_chunk."time" + -> Sort (never executed) + Sort Key: _hyper_2_10_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (never executed) + Filter: ("time" < now()) + -> Seq Scan on compress_hyper_6_20_chunk (never executed) + -> Sort (never executed) + Sort Key: _hyper_2_11_chunk."time" + -> Sort (never executed) + Sort Key: _hyper_2_11_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (never executed) + Filter: ("time" < now()) + -> Seq Scan on compress_hyper_6_21_chunk (never executed) + -> Index Scan Backward using _hyper_2_12_chunk_metrics_space_time_idx on _hyper_2_12_chunk (never executed) + Index Cond: ("time" < now()) +(62 rows) + +-- test sort optimization interaction +:PREFIX +SELECT time +FROM :TEST_TABLE +ORDER BY time DESC +LIMIT 10; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=10 loops=1) + Order: metrics_space."time" DESC + -> Merge Append (actual rows=10 loops=1) + Sort Key: _hyper_2_12_chunk."time" DESC + -> Index Only Scan using _hyper_2_12_chunk_metrics_space_time_idx on _hyper_2_12_chunk (actual rows=3 loops=1) + Heap Fetches: 3 + -> Sort (actual rows=6 loops=1) + Sort Key: _hyper_2_11_chunk."time" DESC + Sort Method: top-N heapsort + -> Sort (actual rows=1512 loops=1) + Sort Key: _hyper_2_11_chunk."time" DESC + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (actual rows=1512 loops=1) + -> Seq Scan on compress_hyper_6_21_chunk (actual rows=3 loops=1) + -> Sort (actual rows=3 loops=1) + Sort Key: _hyper_2_10_chunk."time" DESC + Sort Method: top-N heapsort + -> Sort (actual rows=504 loops=1) + Sort Key: _hyper_2_10_chunk."time" DESC + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=504 loops=1) + -> Seq Scan on compress_hyper_6_20_chunk (actual rows=1 loops=1) + -> Merge Append (never executed) + Sort Key: _hyper_2_9_chunk."time" DESC + -> Index Only Scan using _hyper_2_9_chunk_metrics_space_time_idx on _hyper_2_9_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_2_8_chunk_metrics_space_time_idx on _hyper_2_8_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_2_7_chunk_metrics_space_time_idx on _hyper_2_7_chunk (never executed) + Heap Fetches: 0 + -> Merge Append (never executed) + Sort Key: _hyper_2_6_chunk."time" DESC + -> Sort (never executed) + Sort Key: _hyper_2_6_chunk."time" DESC + -> Sort (never executed) + Sort Key: _hyper_2_6_chunk."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (never executed) + -> Seq Scan on compress_hyper_6_19_chunk (never executed) + -> Sort (never executed) + Sort Key: _hyper_2_5_chunk."time" DESC + -> Sort (never executed) + Sort Key: _hyper_2_5_chunk."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (never executed) + -> Seq Scan on compress_hyper_6_18_chunk (never executed) + -> Sort (never executed) + Sort Key: _hyper_2_4_chunk."time" DESC + -> Sort (never executed) + Sort Key: _hyper_2_4_chunk."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (never executed) + -> Seq Scan on compress_hyper_6_17_chunk (never executed) +(51 rows) + +:PREFIX +SELECT time, + device_id +FROM :TEST_TABLE +ORDER BY time DESC, + device_id +LIMIT 10; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Incremental Sort (actual rows=10 loops=1) + Sort Key: metrics_space."time" DESC, metrics_space.device_id + Presorted Key: metrics_space."time" + Full-sort Groups: 1 Sort Method: quicksort + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=11 loops=1) + Order: metrics_space."time" DESC + -> Merge Append (actual rows=11 loops=1) + Sort Key: _hyper_2_12_chunk."time" DESC + -> Index Scan using _hyper_2_12_chunk_metrics_space_time_idx on _hyper_2_12_chunk (actual rows=3 loops=1) + -> Sort (actual rows=7 loops=1) + Sort Key: _hyper_2_11_chunk."time" DESC + Sort Method: quicksort + -> Sort (actual rows=1512 loops=1) + Sort Key: _hyper_2_11_chunk."time" DESC + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (actual rows=1512 loops=1) + -> Seq Scan on compress_hyper_6_21_chunk (actual rows=3 loops=1) + -> Sort (actual rows=3 loops=1) + Sort Key: _hyper_2_10_chunk."time" DESC + Sort Method: quicksort + -> Sort (actual rows=504 loops=1) + Sort Key: _hyper_2_10_chunk."time" DESC + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=504 loops=1) + -> Seq Scan on compress_hyper_6_20_chunk (actual rows=1 loops=1) + -> Merge Append (never executed) + Sort Key: _hyper_2_9_chunk."time" DESC + -> Index Scan using _hyper_2_9_chunk_metrics_space_time_idx on _hyper_2_9_chunk (never executed) + -> Index Scan using _hyper_2_8_chunk_metrics_space_time_idx on _hyper_2_8_chunk (never executed) + -> Index Scan using _hyper_2_7_chunk_metrics_space_time_idx on _hyper_2_7_chunk (never executed) + -> Merge Append (never executed) + Sort Key: _hyper_2_6_chunk."time" DESC + -> Sort (never executed) + Sort Key: _hyper_2_6_chunk."time" DESC + -> Sort (never executed) + Sort Key: _hyper_2_6_chunk."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (never executed) + -> Seq Scan on compress_hyper_6_19_chunk (never executed) + -> Sort (never executed) + Sort Key: _hyper_2_5_chunk."time" DESC + -> Sort (never executed) + Sort Key: _hyper_2_5_chunk."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (never executed) + -> Seq Scan on compress_hyper_6_18_chunk (never executed) + -> Sort (never executed) + Sort Key: _hyper_2_4_chunk."time" DESC + -> Sort (never executed) + Sort Key: _hyper_2_4_chunk."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (never executed) + -> Seq Scan on compress_hyper_6_17_chunk (never executed) +(51 rows) + +:PREFIX +SELECT time, + device_id +FROM :TEST_TABLE +ORDER BY device_id, + time DESC +LIMIT 10; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Merge Append (actual rows=10 loops=1) + Sort Key: _hyper_2_4_chunk.device_id, _hyper_2_4_chunk."time" DESC + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_2_4_chunk.device_id, _hyper_2_4_chunk."time" DESC + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_2_5_chunk.device_id, _hyper_2_5_chunk."time" DESC + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=1080 loops=1) + -> Seq Scan on compress_hyper_6_18_chunk (actual rows=3 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_2_6_chunk.device_id, _hyper_2_6_chunk."time" DESC + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_19_chunk (actual rows=1 loops=1) + -> Index Only Scan using _hyper_2_7_chunk_metrics_space_device_id_time_idx on _hyper_2_7_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_2_8_chunk_metrics_space_device_id_time_idx on _hyper_2_8_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_2_9_chunk_metrics_space_device_id_time_idx on _hyper_2_9_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Sort (actual rows=10 loops=1) + Sort Key: _hyper_2_10_chunk.device_id, _hyper_2_10_chunk."time" DESC + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=504 loops=1) + -> Seq Scan on compress_hyper_6_20_chunk (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_2_11_chunk.device_id, _hyper_2_11_chunk."time" DESC + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (actual rows=1512 loops=1) + -> Seq Scan on compress_hyper_6_21_chunk (actual rows=3 loops=1) + -> Index Only Scan using _hyper_2_12_chunk_metrics_space_device_id_time_idx on _hyper_2_12_chunk (actual rows=1 loops=1) + Heap Fetches: 1 +(36 rows) + +-- +-- test ordered path +-- +-- should not produce ordered path +:PREFIX_VERBOSE +SELECT * +FROM :TEST_TABLE +WHERE time > '2000-01-08' +ORDER BY time, + device_id; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Incremental Sort (actual rows=4195 loops=1) + Output: metrics_space."time", metrics_space.device_id, metrics_space.device_id_peer, metrics_space.v0, metrics_space.v1, metrics_space.v2, metrics_space.v3 + Sort Key: metrics_space."time", metrics_space.device_id + Presorted Key: metrics_space."time" + Full-sort Groups: 120 Sort Method: quicksort + -> Custom Scan (ChunkAppend) on public.metrics_space (actual rows=4195 loops=1) + Output: metrics_space."time", metrics_space.device_id, metrics_space.device_id_peer, metrics_space.v0, metrics_space.v1, metrics_space.v2, metrics_space.v3 + Order: metrics_space."time" + Startup Exclusion: false + Runtime Exclusion: false + -> Merge Append (actual rows=1675 loops=1) + Sort Key: _hyper_2_7_chunk."time" + -> Index Scan Backward using _hyper_2_7_chunk_metrics_space_time_idx on _timescaledb_internal._hyper_2_7_chunk (actual rows=335 loops=1) + Output: _hyper_2_7_chunk."time", _hyper_2_7_chunk.device_id, _hyper_2_7_chunk.device_id_peer, _hyper_2_7_chunk.v0, _hyper_2_7_chunk.v1, _hyper_2_7_chunk.v2, _hyper_2_7_chunk.v3 + Index Cond: (_hyper_2_7_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Index Scan Backward using _hyper_2_8_chunk_metrics_space_time_idx on _timescaledb_internal._hyper_2_8_chunk (actual rows=1005 loops=1) + Output: _hyper_2_8_chunk."time", _hyper_2_8_chunk.device_id, _hyper_2_8_chunk.device_id_peer, _hyper_2_8_chunk.v0, _hyper_2_8_chunk.v1, _hyper_2_8_chunk.v2, _hyper_2_8_chunk.v3 + Index Cond: (_hyper_2_8_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Index Scan Backward using _hyper_2_9_chunk_metrics_space_time_idx on _timescaledb_internal._hyper_2_9_chunk (actual rows=335 loops=1) + Output: _hyper_2_9_chunk."time", _hyper_2_9_chunk.device_id, _hyper_2_9_chunk.device_id_peer, _hyper_2_9_chunk.v0, _hyper_2_9_chunk.v1, _hyper_2_9_chunk.v2, _hyper_2_9_chunk.v3 + Index Cond: (_hyper_2_9_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Merge Append (actual rows=2520 loops=1) + Sort Key: _hyper_2_10_chunk."time" + -> Sort (actual rows=504 loops=1) + Output: _hyper_2_10_chunk."time", _hyper_2_10_chunk.device_id, _hyper_2_10_chunk.device_id_peer, _hyper_2_10_chunk.v0, _hyper_2_10_chunk.v1, _hyper_2_10_chunk.v2, _hyper_2_10_chunk.v3 + Sort Key: _hyper_2_10_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=504 loops=1) + Output: _hyper_2_10_chunk."time", _hyper_2_10_chunk.device_id, _hyper_2_10_chunk.device_id_peer, _hyper_2_10_chunk.v0, _hyper_2_10_chunk.v1, _hyper_2_10_chunk.v2, _hyper_2_10_chunk.v3 + Sort Key: _hyper_2_10_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_10_chunk (actual rows=504 loops=1) + Output: _hyper_2_10_chunk."time", _hyper_2_10_chunk.device_id, _hyper_2_10_chunk.device_id_peer, _hyper_2_10_chunk.v0, _hyper_2_10_chunk.v1, _hyper_2_10_chunk.v2, _hyper_2_10_chunk.v3 + Vectorized Filter: (_hyper_2_10_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Bulk Decompression: true + -> Seq Scan on _timescaledb_internal.compress_hyper_6_20_chunk (actual rows=1 loops=1) + Output: compress_hyper_6_20_chunk."time", compress_hyper_6_20_chunk.device_id, compress_hyper_6_20_chunk.device_id_peer, compress_hyper_6_20_chunk.v0, compress_hyper_6_20_chunk.v1, compress_hyper_6_20_chunk.v2, compress_hyper_6_20_chunk.v3, compress_hyper_6_20_chunk._ts_meta_count, compress_hyper_6_20_chunk._ts_meta_sequence_num, compress_hyper_6_20_chunk._ts_meta_min_3, compress_hyper_6_20_chunk._ts_meta_max_3, compress_hyper_6_20_chunk._ts_meta_min_1, compress_hyper_6_20_chunk._ts_meta_max_1, compress_hyper_6_20_chunk._ts_meta_min_2, compress_hyper_6_20_chunk._ts_meta_max_2 + Filter: (compress_hyper_6_20_chunk._ts_meta_max_3 > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Sort (actual rows=1512 loops=1) + Output: _hyper_2_11_chunk."time", _hyper_2_11_chunk.device_id, _hyper_2_11_chunk.device_id_peer, _hyper_2_11_chunk.v0, _hyper_2_11_chunk.v1, _hyper_2_11_chunk.v2, _hyper_2_11_chunk.v3 + Sort Key: _hyper_2_11_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=1512 loops=1) + Output: _hyper_2_11_chunk."time", _hyper_2_11_chunk.device_id, _hyper_2_11_chunk.device_id_peer, _hyper_2_11_chunk.v0, _hyper_2_11_chunk.v1, _hyper_2_11_chunk.v2, _hyper_2_11_chunk.v3 + Sort Key: _hyper_2_11_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_11_chunk (actual rows=1512 loops=1) + Output: _hyper_2_11_chunk."time", _hyper_2_11_chunk.device_id, _hyper_2_11_chunk.device_id_peer, _hyper_2_11_chunk.v0, _hyper_2_11_chunk.v1, _hyper_2_11_chunk.v2, _hyper_2_11_chunk.v3 + Vectorized Filter: (_hyper_2_11_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Bulk Decompression: true + -> Seq Scan on _timescaledb_internal.compress_hyper_6_21_chunk (actual rows=3 loops=1) + Output: compress_hyper_6_21_chunk."time", compress_hyper_6_21_chunk.device_id, compress_hyper_6_21_chunk.device_id_peer, compress_hyper_6_21_chunk.v0, compress_hyper_6_21_chunk.v1, compress_hyper_6_21_chunk.v2, compress_hyper_6_21_chunk.v3, compress_hyper_6_21_chunk._ts_meta_count, compress_hyper_6_21_chunk._ts_meta_sequence_num, compress_hyper_6_21_chunk._ts_meta_min_3, compress_hyper_6_21_chunk._ts_meta_max_3, compress_hyper_6_21_chunk._ts_meta_min_1, compress_hyper_6_21_chunk._ts_meta_max_1, compress_hyper_6_21_chunk._ts_meta_min_2, compress_hyper_6_21_chunk._ts_meta_max_2 + Filter: (compress_hyper_6_21_chunk._ts_meta_max_3 > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Index Scan Backward using _hyper_2_12_chunk_metrics_space_time_idx on _timescaledb_internal._hyper_2_12_chunk (actual rows=504 loops=1) + Output: _hyper_2_12_chunk."time", _hyper_2_12_chunk.device_id, _hyper_2_12_chunk.device_id_peer, _hyper_2_12_chunk.v0, _hyper_2_12_chunk.v1, _hyper_2_12_chunk.v2, _hyper_2_12_chunk.v3 + Index Cond: (_hyper_2_12_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) +(56 rows) + +-- should produce ordered path +:PREFIX_VERBOSE +SELECT * +FROM :TEST_TABLE +WHERE time > '2000-01-08' +ORDER BY device_id, + device_id_peer, + v0, + v1 DESC, + time; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Append (actual rows=4195 loops=1) + Sort Key: _hyper_2_7_chunk.device_id, _hyper_2_7_chunk.device_id_peer, _hyper_2_7_chunk.v0, _hyper_2_7_chunk.v1 DESC, _hyper_2_7_chunk."time" + -> Index Scan using _hyper_2_7_chunk_metrics_space_device_id_device_id_peer_v0_v1_t on _timescaledb_internal._hyper_2_7_chunk (actual rows=335 loops=1) + Output: _hyper_2_7_chunk."time", _hyper_2_7_chunk.device_id, _hyper_2_7_chunk.device_id_peer, _hyper_2_7_chunk.v0, _hyper_2_7_chunk.v1, _hyper_2_7_chunk.v2, _hyper_2_7_chunk.v3 + Index Cond: (_hyper_2_7_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Index Scan using _hyper_2_8_chunk_metrics_space_device_id_device_id_peer_v0_v1_t on _timescaledb_internal._hyper_2_8_chunk (actual rows=1005 loops=1) + Output: _hyper_2_8_chunk."time", _hyper_2_8_chunk.device_id, _hyper_2_8_chunk.device_id_peer, _hyper_2_8_chunk.v0, _hyper_2_8_chunk.v1, _hyper_2_8_chunk.v2, _hyper_2_8_chunk.v3 + Index Cond: (_hyper_2_8_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Index Scan using _hyper_2_9_chunk_metrics_space_device_id_device_id_peer_v0_v1_t on _timescaledb_internal._hyper_2_9_chunk (actual rows=335 loops=1) + Output: _hyper_2_9_chunk."time", _hyper_2_9_chunk.device_id, _hyper_2_9_chunk.device_id_peer, _hyper_2_9_chunk.v0, _hyper_2_9_chunk.v1, _hyper_2_9_chunk.v2, _hyper_2_9_chunk.v3 + Index Cond: (_hyper_2_9_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_10_chunk (actual rows=504 loops=1) + Output: _hyper_2_10_chunk."time", _hyper_2_10_chunk.device_id, _hyper_2_10_chunk.device_id_peer, _hyper_2_10_chunk.v0, _hyper_2_10_chunk.v1, _hyper_2_10_chunk.v2, _hyper_2_10_chunk.v3 + Vectorized Filter: (_hyper_2_10_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Bulk Decompression: true + -> Index Scan using compress_hyper_6_20_chunk__compressed_hypertable_6_device_id_de on _timescaledb_internal.compress_hyper_6_20_chunk (actual rows=1 loops=1) + Output: compress_hyper_6_20_chunk."time", compress_hyper_6_20_chunk.device_id, compress_hyper_6_20_chunk.device_id_peer, compress_hyper_6_20_chunk.v0, compress_hyper_6_20_chunk.v1, compress_hyper_6_20_chunk.v2, compress_hyper_6_20_chunk.v3, compress_hyper_6_20_chunk._ts_meta_count, compress_hyper_6_20_chunk._ts_meta_sequence_num, compress_hyper_6_20_chunk._ts_meta_min_3, compress_hyper_6_20_chunk._ts_meta_max_3, compress_hyper_6_20_chunk._ts_meta_min_1, compress_hyper_6_20_chunk._ts_meta_max_1, compress_hyper_6_20_chunk._ts_meta_min_2, compress_hyper_6_20_chunk._ts_meta_max_2 + Filter: (compress_hyper_6_20_chunk._ts_meta_max_3 > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_11_chunk (actual rows=1512 loops=1) + Output: _hyper_2_11_chunk."time", _hyper_2_11_chunk.device_id, _hyper_2_11_chunk.device_id_peer, _hyper_2_11_chunk.v0, _hyper_2_11_chunk.v1, _hyper_2_11_chunk.v2, _hyper_2_11_chunk.v3 + Vectorized Filter: (_hyper_2_11_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Bulk Decompression: true + -> Index Scan using compress_hyper_6_21_chunk__compressed_hypertable_6_device_id_de on _timescaledb_internal.compress_hyper_6_21_chunk (actual rows=3 loops=1) + Output: compress_hyper_6_21_chunk."time", compress_hyper_6_21_chunk.device_id, compress_hyper_6_21_chunk.device_id_peer, compress_hyper_6_21_chunk.v0, compress_hyper_6_21_chunk.v1, compress_hyper_6_21_chunk.v2, compress_hyper_6_21_chunk.v3, compress_hyper_6_21_chunk._ts_meta_count, compress_hyper_6_21_chunk._ts_meta_sequence_num, compress_hyper_6_21_chunk._ts_meta_min_3, compress_hyper_6_21_chunk._ts_meta_max_3, compress_hyper_6_21_chunk._ts_meta_min_1, compress_hyper_6_21_chunk._ts_meta_max_1, compress_hyper_6_21_chunk._ts_meta_min_2, compress_hyper_6_21_chunk._ts_meta_max_2 + Filter: (compress_hyper_6_21_chunk._ts_meta_max_3 > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Index Scan using _hyper_2_12_chunk_metrics_space_device_id_device_id_peer_v0_v1_ on _timescaledb_internal._hyper_2_12_chunk (actual rows=504 loops=1) + Output: _hyper_2_12_chunk."time", _hyper_2_12_chunk.device_id, _hyper_2_12_chunk.device_id_peer, _hyper_2_12_chunk.v0, _hyper_2_12_chunk.v1, _hyper_2_12_chunk.v2, _hyper_2_12_chunk.v3 + Index Cond: (_hyper_2_12_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) +(28 rows) + +-- test order by columns not in targetlist +:PREFIX_VERBOSE +SELECT device_id, + device_id_peer +FROM :TEST_TABLE +WHERE time > '2000-01-08' +ORDER BY device_id, + device_id_peer, + v0, + v1 DESC, + time +LIMIT 100; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=100 loops=1) + Output: _hyper_2_7_chunk.device_id, _hyper_2_7_chunk.device_id_peer, _hyper_2_7_chunk.v0, _hyper_2_7_chunk.v1, _hyper_2_7_chunk."time" + -> Merge Append (actual rows=100 loops=1) + Sort Key: _hyper_2_7_chunk.device_id, _hyper_2_7_chunk.device_id_peer, _hyper_2_7_chunk.v0, _hyper_2_7_chunk.v1 DESC, _hyper_2_7_chunk."time" + -> Index Only Scan using _hyper_2_7_chunk_metrics_space_device_id_device_id_peer_v0_v1_t on _timescaledb_internal._hyper_2_7_chunk (actual rows=100 loops=1) + Output: _hyper_2_7_chunk.device_id, _hyper_2_7_chunk.device_id_peer, _hyper_2_7_chunk.v0, _hyper_2_7_chunk.v1, _hyper_2_7_chunk."time" + Index Cond: (_hyper_2_7_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 100 + -> Index Only Scan using _hyper_2_8_chunk_metrics_space_device_id_device_id_peer_v0_v1_t on _timescaledb_internal._hyper_2_8_chunk (actual rows=1 loops=1) + Output: _hyper_2_8_chunk.device_id, _hyper_2_8_chunk.device_id_peer, _hyper_2_8_chunk.v0, _hyper_2_8_chunk.v1, _hyper_2_8_chunk."time" + Index Cond: (_hyper_2_8_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 1 + -> Index Only Scan using _hyper_2_9_chunk_metrics_space_device_id_device_id_peer_v0_v1_t on _timescaledb_internal._hyper_2_9_chunk (actual rows=1 loops=1) + Output: _hyper_2_9_chunk.device_id, _hyper_2_9_chunk.device_id_peer, _hyper_2_9_chunk.v0, _hyper_2_9_chunk.v1, _hyper_2_9_chunk."time" + Index Cond: (_hyper_2_9_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 1 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_10_chunk (actual rows=1 loops=1) + Output: _hyper_2_10_chunk.device_id, _hyper_2_10_chunk.device_id_peer, _hyper_2_10_chunk.v0, _hyper_2_10_chunk.v1, _hyper_2_10_chunk."time" + Vectorized Filter: (_hyper_2_10_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Bulk Decompression: true + -> Sort (actual rows=1 loops=1) + Output: compress_hyper_6_20_chunk."time", compress_hyper_6_20_chunk.device_id, compress_hyper_6_20_chunk.device_id_peer, compress_hyper_6_20_chunk.v0, compress_hyper_6_20_chunk.v1, compress_hyper_6_20_chunk.v2, compress_hyper_6_20_chunk.v3, compress_hyper_6_20_chunk._ts_meta_count, compress_hyper_6_20_chunk._ts_meta_sequence_num, compress_hyper_6_20_chunk._ts_meta_min_3, compress_hyper_6_20_chunk._ts_meta_max_3, compress_hyper_6_20_chunk._ts_meta_min_1, compress_hyper_6_20_chunk._ts_meta_max_1, compress_hyper_6_20_chunk._ts_meta_min_2, compress_hyper_6_20_chunk._ts_meta_max_2 + Sort Key: compress_hyper_6_20_chunk.device_id, compress_hyper_6_20_chunk.device_id_peer, compress_hyper_6_20_chunk._ts_meta_sequence_num + Sort Method: quicksort + -> Seq Scan on _timescaledb_internal.compress_hyper_6_20_chunk (actual rows=1 loops=1) + Output: compress_hyper_6_20_chunk."time", compress_hyper_6_20_chunk.device_id, compress_hyper_6_20_chunk.device_id_peer, compress_hyper_6_20_chunk.v0, compress_hyper_6_20_chunk.v1, compress_hyper_6_20_chunk.v2, compress_hyper_6_20_chunk.v3, compress_hyper_6_20_chunk._ts_meta_count, compress_hyper_6_20_chunk._ts_meta_sequence_num, compress_hyper_6_20_chunk._ts_meta_min_3, compress_hyper_6_20_chunk._ts_meta_max_3, compress_hyper_6_20_chunk._ts_meta_min_1, compress_hyper_6_20_chunk._ts_meta_max_1, compress_hyper_6_20_chunk._ts_meta_min_2, compress_hyper_6_20_chunk._ts_meta_max_2 + Filter: (compress_hyper_6_20_chunk._ts_meta_max_3 > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_11_chunk (actual rows=1 loops=1) + Output: _hyper_2_11_chunk.device_id, _hyper_2_11_chunk.device_id_peer, _hyper_2_11_chunk.v0, _hyper_2_11_chunk.v1, _hyper_2_11_chunk."time" + Vectorized Filter: (_hyper_2_11_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Bulk Decompression: true + -> Sort (actual rows=1 loops=1) + Output: compress_hyper_6_21_chunk."time", compress_hyper_6_21_chunk.device_id, compress_hyper_6_21_chunk.device_id_peer, compress_hyper_6_21_chunk.v0, compress_hyper_6_21_chunk.v1, compress_hyper_6_21_chunk.v2, compress_hyper_6_21_chunk.v3, compress_hyper_6_21_chunk._ts_meta_count, compress_hyper_6_21_chunk._ts_meta_sequence_num, compress_hyper_6_21_chunk._ts_meta_min_3, compress_hyper_6_21_chunk._ts_meta_max_3, compress_hyper_6_21_chunk._ts_meta_min_1, compress_hyper_6_21_chunk._ts_meta_max_1, compress_hyper_6_21_chunk._ts_meta_min_2, compress_hyper_6_21_chunk._ts_meta_max_2 + Sort Key: compress_hyper_6_21_chunk.device_id, compress_hyper_6_21_chunk.device_id_peer, compress_hyper_6_21_chunk._ts_meta_sequence_num + Sort Method: quicksort + -> Seq Scan on _timescaledb_internal.compress_hyper_6_21_chunk (actual rows=3 loops=1) + Output: compress_hyper_6_21_chunk."time", compress_hyper_6_21_chunk.device_id, compress_hyper_6_21_chunk.device_id_peer, compress_hyper_6_21_chunk.v0, compress_hyper_6_21_chunk.v1, compress_hyper_6_21_chunk.v2, compress_hyper_6_21_chunk.v3, compress_hyper_6_21_chunk._ts_meta_count, compress_hyper_6_21_chunk._ts_meta_sequence_num, compress_hyper_6_21_chunk._ts_meta_min_3, compress_hyper_6_21_chunk._ts_meta_max_3, compress_hyper_6_21_chunk._ts_meta_min_1, compress_hyper_6_21_chunk._ts_meta_max_1, compress_hyper_6_21_chunk._ts_meta_min_2, compress_hyper_6_21_chunk._ts_meta_max_2 + Filter: (compress_hyper_6_21_chunk._ts_meta_max_3 > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan using _hyper_2_12_chunk_metrics_space_device_id_device_id_peer_v0_v1_ on _timescaledb_internal._hyper_2_12_chunk (actual rows=1 loops=1) + Output: _hyper_2_12_chunk.device_id, _hyper_2_12_chunk.device_id_peer, _hyper_2_12_chunk.v0, _hyper_2_12_chunk.v1, _hyper_2_12_chunk."time" + Index Cond: (_hyper_2_12_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 1 +(42 rows) + +-- test ordering only by segmentby columns +-- should produce ordered path and not have sequence number in targetlist of compressed scan +:PREFIX_VERBOSE +SELECT device_id, + device_id_peer +FROM :TEST_TABLE +WHERE time > '2000-01-08' +ORDER BY device_id, + device_id_peer +LIMIT 100; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=100 loops=1) + Output: _hyper_2_7_chunk.device_id, _hyper_2_7_chunk.device_id_peer + -> Merge Append (actual rows=100 loops=1) + Sort Key: _hyper_2_7_chunk.device_id, _hyper_2_7_chunk.device_id_peer + -> Index Only Scan Backward using _hyper_2_7_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _timescaledb_internal._hyper_2_7_chunk (actual rows=100 loops=1) + Output: _hyper_2_7_chunk.device_id, _hyper_2_7_chunk.device_id_peer + Index Cond: (_hyper_2_7_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 100 + -> Index Only Scan Backward using _hyper_2_8_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _timescaledb_internal._hyper_2_8_chunk (actual rows=1 loops=1) + Output: _hyper_2_8_chunk.device_id, _hyper_2_8_chunk.device_id_peer + Index Cond: (_hyper_2_8_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 1 + -> Index Only Scan Backward using _hyper_2_9_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _timescaledb_internal._hyper_2_9_chunk (actual rows=1 loops=1) + Output: _hyper_2_9_chunk.device_id, _hyper_2_9_chunk.device_id_peer + Index Cond: (_hyper_2_9_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 1 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_10_chunk (actual rows=1 loops=1) + Output: _hyper_2_10_chunk.device_id, _hyper_2_10_chunk.device_id_peer + Vectorized Filter: (_hyper_2_10_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Bulk Decompression: true + -> Sort (actual rows=1 loops=1) + Output: compress_hyper_6_20_chunk."time", compress_hyper_6_20_chunk.device_id, compress_hyper_6_20_chunk.device_id_peer, compress_hyper_6_20_chunk.v0, compress_hyper_6_20_chunk.v1, compress_hyper_6_20_chunk.v2, compress_hyper_6_20_chunk.v3, compress_hyper_6_20_chunk._ts_meta_count, compress_hyper_6_20_chunk._ts_meta_sequence_num, compress_hyper_6_20_chunk._ts_meta_min_3, compress_hyper_6_20_chunk._ts_meta_max_3, compress_hyper_6_20_chunk._ts_meta_min_1, compress_hyper_6_20_chunk._ts_meta_max_1, compress_hyper_6_20_chunk._ts_meta_min_2, compress_hyper_6_20_chunk._ts_meta_max_2 + Sort Key: compress_hyper_6_20_chunk.device_id, compress_hyper_6_20_chunk.device_id_peer + Sort Method: quicksort + -> Seq Scan on _timescaledb_internal.compress_hyper_6_20_chunk (actual rows=1 loops=1) + Output: compress_hyper_6_20_chunk."time", compress_hyper_6_20_chunk.device_id, compress_hyper_6_20_chunk.device_id_peer, compress_hyper_6_20_chunk.v0, compress_hyper_6_20_chunk.v1, compress_hyper_6_20_chunk.v2, compress_hyper_6_20_chunk.v3, compress_hyper_6_20_chunk._ts_meta_count, compress_hyper_6_20_chunk._ts_meta_sequence_num, compress_hyper_6_20_chunk._ts_meta_min_3, compress_hyper_6_20_chunk._ts_meta_max_3, compress_hyper_6_20_chunk._ts_meta_min_1, compress_hyper_6_20_chunk._ts_meta_max_1, compress_hyper_6_20_chunk._ts_meta_min_2, compress_hyper_6_20_chunk._ts_meta_max_2 + Filter: (compress_hyper_6_20_chunk._ts_meta_max_3 > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_11_chunk (actual rows=1 loops=1) + Output: _hyper_2_11_chunk.device_id, _hyper_2_11_chunk.device_id_peer + Vectorized Filter: (_hyper_2_11_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Bulk Decompression: true + -> Sort (actual rows=1 loops=1) + Output: compress_hyper_6_21_chunk."time", compress_hyper_6_21_chunk.device_id, compress_hyper_6_21_chunk.device_id_peer, compress_hyper_6_21_chunk.v0, compress_hyper_6_21_chunk.v1, compress_hyper_6_21_chunk.v2, compress_hyper_6_21_chunk.v3, compress_hyper_6_21_chunk._ts_meta_count, compress_hyper_6_21_chunk._ts_meta_sequence_num, compress_hyper_6_21_chunk._ts_meta_min_3, compress_hyper_6_21_chunk._ts_meta_max_3, compress_hyper_6_21_chunk._ts_meta_min_1, compress_hyper_6_21_chunk._ts_meta_max_1, compress_hyper_6_21_chunk._ts_meta_min_2, compress_hyper_6_21_chunk._ts_meta_max_2 + Sort Key: compress_hyper_6_21_chunk.device_id, compress_hyper_6_21_chunk.device_id_peer + Sort Method: quicksort + -> Seq Scan on _timescaledb_internal.compress_hyper_6_21_chunk (actual rows=3 loops=1) + Output: compress_hyper_6_21_chunk."time", compress_hyper_6_21_chunk.device_id, compress_hyper_6_21_chunk.device_id_peer, compress_hyper_6_21_chunk.v0, compress_hyper_6_21_chunk.v1, compress_hyper_6_21_chunk.v2, compress_hyper_6_21_chunk.v3, compress_hyper_6_21_chunk._ts_meta_count, compress_hyper_6_21_chunk._ts_meta_sequence_num, compress_hyper_6_21_chunk._ts_meta_min_3, compress_hyper_6_21_chunk._ts_meta_max_3, compress_hyper_6_21_chunk._ts_meta_min_1, compress_hyper_6_21_chunk._ts_meta_max_1, compress_hyper_6_21_chunk._ts_meta_min_2, compress_hyper_6_21_chunk._ts_meta_max_2 + Filter: (compress_hyper_6_21_chunk._ts_meta_max_3 > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_2_12_chunk_metrics_space_device_id_device_id_peer_v0_v_2 on _timescaledb_internal._hyper_2_12_chunk (actual rows=1 loops=1) + Output: _hyper_2_12_chunk.device_id, _hyper_2_12_chunk.device_id_peer + Index Cond: (_hyper_2_12_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 1 +(42 rows) + +-- should produce ordered path +-- only referencing PREFIX_VERBOSE should work +:PREFIX_VERBOSE +SELECT device_id, + device_id_peer, + v0 +FROM :TEST_TABLE +WHERE time > '2000-01-08' +ORDER BY device_id, + device_id_peer, + v0; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Append (actual rows=4195 loops=1) + Sort Key: _hyper_2_7_chunk.device_id, _hyper_2_7_chunk.device_id_peer, _hyper_2_7_chunk.v0 + -> Index Only Scan using _hyper_2_7_chunk_metrics_space_device_id_device_id_peer_v0_v1_t on _timescaledb_internal._hyper_2_7_chunk (actual rows=335 loops=1) + Output: _hyper_2_7_chunk.device_id, _hyper_2_7_chunk.device_id_peer, _hyper_2_7_chunk.v0 + Index Cond: (_hyper_2_7_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 335 + -> Index Only Scan using _hyper_2_8_chunk_metrics_space_device_id_device_id_peer_v0_v1_t on _timescaledb_internal._hyper_2_8_chunk (actual rows=1005 loops=1) + Output: _hyper_2_8_chunk.device_id, _hyper_2_8_chunk.device_id_peer, _hyper_2_8_chunk.v0 + Index Cond: (_hyper_2_8_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 1005 + -> Index Only Scan using _hyper_2_9_chunk_metrics_space_device_id_device_id_peer_v0_v1_t on _timescaledb_internal._hyper_2_9_chunk (actual rows=335 loops=1) + Output: _hyper_2_9_chunk.device_id, _hyper_2_9_chunk.device_id_peer, _hyper_2_9_chunk.v0 + Index Cond: (_hyper_2_9_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 335 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_10_chunk (actual rows=504 loops=1) + Output: _hyper_2_10_chunk.device_id, _hyper_2_10_chunk.device_id_peer, _hyper_2_10_chunk.v0 + Vectorized Filter: (_hyper_2_10_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Bulk Decompression: true + -> Index Scan using compress_hyper_6_20_chunk__compressed_hypertable_6_device_id_de on _timescaledb_internal.compress_hyper_6_20_chunk (actual rows=1 loops=1) + Output: compress_hyper_6_20_chunk."time", compress_hyper_6_20_chunk.device_id, compress_hyper_6_20_chunk.device_id_peer, compress_hyper_6_20_chunk.v0, compress_hyper_6_20_chunk.v1, compress_hyper_6_20_chunk.v2, compress_hyper_6_20_chunk.v3, compress_hyper_6_20_chunk._ts_meta_count, compress_hyper_6_20_chunk._ts_meta_sequence_num, compress_hyper_6_20_chunk._ts_meta_min_3, compress_hyper_6_20_chunk._ts_meta_max_3, compress_hyper_6_20_chunk._ts_meta_min_1, compress_hyper_6_20_chunk._ts_meta_max_1, compress_hyper_6_20_chunk._ts_meta_min_2, compress_hyper_6_20_chunk._ts_meta_max_2 + Filter: (compress_hyper_6_20_chunk._ts_meta_max_3 > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_11_chunk (actual rows=1512 loops=1) + Output: _hyper_2_11_chunk.device_id, _hyper_2_11_chunk.device_id_peer, _hyper_2_11_chunk.v0 + Vectorized Filter: (_hyper_2_11_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Bulk Decompression: true + -> Index Scan using compress_hyper_6_21_chunk__compressed_hypertable_6_device_id_de on _timescaledb_internal.compress_hyper_6_21_chunk (actual rows=3 loops=1) + Output: compress_hyper_6_21_chunk."time", compress_hyper_6_21_chunk.device_id, compress_hyper_6_21_chunk.device_id_peer, compress_hyper_6_21_chunk.v0, compress_hyper_6_21_chunk.v1, compress_hyper_6_21_chunk.v2, compress_hyper_6_21_chunk.v3, compress_hyper_6_21_chunk._ts_meta_count, compress_hyper_6_21_chunk._ts_meta_sequence_num, compress_hyper_6_21_chunk._ts_meta_min_3, compress_hyper_6_21_chunk._ts_meta_max_3, compress_hyper_6_21_chunk._ts_meta_min_1, compress_hyper_6_21_chunk._ts_meta_max_1, compress_hyper_6_21_chunk._ts_meta_min_2, compress_hyper_6_21_chunk._ts_meta_max_2 + Filter: (compress_hyper_6_21_chunk._ts_meta_max_3 > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan using _hyper_2_12_chunk_metrics_space_device_id_device_id_peer_v0_v1_ on _timescaledb_internal._hyper_2_12_chunk (actual rows=504 loops=1) + Output: _hyper_2_12_chunk.device_id, _hyper_2_12_chunk.device_id_peer, _hyper_2_12_chunk.v0 + Index Cond: (_hyper_2_12_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 504 +(32 rows) + +-- should produce ordered path +-- only referencing PREFIX_VERBOSE should work +:PREFIX_VERBOSE +SELECT device_id, + device_id_peer, + v0, + v1 +FROM :TEST_TABLE +WHERE time > '2000-01-08' +ORDER BY device_id, + device_id_peer, + v0, + v1 DESC; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Append (actual rows=4195 loops=1) + Sort Key: _hyper_2_7_chunk.device_id, _hyper_2_7_chunk.device_id_peer, _hyper_2_7_chunk.v0, _hyper_2_7_chunk.v1 DESC + -> Index Only Scan using _hyper_2_7_chunk_metrics_space_device_id_device_id_peer_v0_v1_t on _timescaledb_internal._hyper_2_7_chunk (actual rows=335 loops=1) + Output: _hyper_2_7_chunk.device_id, _hyper_2_7_chunk.device_id_peer, _hyper_2_7_chunk.v0, _hyper_2_7_chunk.v1 + Index Cond: (_hyper_2_7_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 335 + -> Index Only Scan using _hyper_2_8_chunk_metrics_space_device_id_device_id_peer_v0_v1_t on _timescaledb_internal._hyper_2_8_chunk (actual rows=1005 loops=1) + Output: _hyper_2_8_chunk.device_id, _hyper_2_8_chunk.device_id_peer, _hyper_2_8_chunk.v0, _hyper_2_8_chunk.v1 + Index Cond: (_hyper_2_8_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 1005 + -> Index Only Scan using _hyper_2_9_chunk_metrics_space_device_id_device_id_peer_v0_v1_t on _timescaledb_internal._hyper_2_9_chunk (actual rows=335 loops=1) + Output: _hyper_2_9_chunk.device_id, _hyper_2_9_chunk.device_id_peer, _hyper_2_9_chunk.v0, _hyper_2_9_chunk.v1 + Index Cond: (_hyper_2_9_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 335 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_10_chunk (actual rows=504 loops=1) + Output: _hyper_2_10_chunk.device_id, _hyper_2_10_chunk.device_id_peer, _hyper_2_10_chunk.v0, _hyper_2_10_chunk.v1 + Vectorized Filter: (_hyper_2_10_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Bulk Decompression: true + -> Index Scan using compress_hyper_6_20_chunk__compressed_hypertable_6_device_id_de on _timescaledb_internal.compress_hyper_6_20_chunk (actual rows=1 loops=1) + Output: compress_hyper_6_20_chunk."time", compress_hyper_6_20_chunk.device_id, compress_hyper_6_20_chunk.device_id_peer, compress_hyper_6_20_chunk.v0, compress_hyper_6_20_chunk.v1, compress_hyper_6_20_chunk.v2, compress_hyper_6_20_chunk.v3, compress_hyper_6_20_chunk._ts_meta_count, compress_hyper_6_20_chunk._ts_meta_sequence_num, compress_hyper_6_20_chunk._ts_meta_min_3, compress_hyper_6_20_chunk._ts_meta_max_3, compress_hyper_6_20_chunk._ts_meta_min_1, compress_hyper_6_20_chunk._ts_meta_max_1, compress_hyper_6_20_chunk._ts_meta_min_2, compress_hyper_6_20_chunk._ts_meta_max_2 + Filter: (compress_hyper_6_20_chunk._ts_meta_max_3 > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_11_chunk (actual rows=1512 loops=1) + Output: _hyper_2_11_chunk.device_id, _hyper_2_11_chunk.device_id_peer, _hyper_2_11_chunk.v0, _hyper_2_11_chunk.v1 + Vectorized Filter: (_hyper_2_11_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Bulk Decompression: true + -> Index Scan using compress_hyper_6_21_chunk__compressed_hypertable_6_device_id_de on _timescaledb_internal.compress_hyper_6_21_chunk (actual rows=3 loops=1) + Output: compress_hyper_6_21_chunk."time", compress_hyper_6_21_chunk.device_id, compress_hyper_6_21_chunk.device_id_peer, compress_hyper_6_21_chunk.v0, compress_hyper_6_21_chunk.v1, compress_hyper_6_21_chunk.v2, compress_hyper_6_21_chunk.v3, compress_hyper_6_21_chunk._ts_meta_count, compress_hyper_6_21_chunk._ts_meta_sequence_num, compress_hyper_6_21_chunk._ts_meta_min_3, compress_hyper_6_21_chunk._ts_meta_max_3, compress_hyper_6_21_chunk._ts_meta_min_1, compress_hyper_6_21_chunk._ts_meta_max_1, compress_hyper_6_21_chunk._ts_meta_min_2, compress_hyper_6_21_chunk._ts_meta_max_2 + Filter: (compress_hyper_6_21_chunk._ts_meta_max_3 > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan using _hyper_2_12_chunk_metrics_space_device_id_device_id_peer_v0_v1_ on _timescaledb_internal._hyper_2_12_chunk (actual rows=504 loops=1) + Output: _hyper_2_12_chunk.device_id, _hyper_2_12_chunk.device_id_peer, _hyper_2_12_chunk.v0, _hyper_2_12_chunk.v1 + Index Cond: (_hyper_2_12_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 504 +(32 rows) + +-- should not produce ordered path +:PREFIX_VERBOSE +SELECT * +FROM :TEST_TABLE +WHERE time > '2000-01-08' +ORDER BY device_id, + device_id_peer, + v0, + v1 DESC, + time, + v3; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Incremental Sort (actual rows=4195 loops=1) + Output: _hyper_2_7_chunk."time", _hyper_2_7_chunk.device_id, _hyper_2_7_chunk.device_id_peer, _hyper_2_7_chunk.v0, _hyper_2_7_chunk.v1, _hyper_2_7_chunk.v2, _hyper_2_7_chunk.v3 + Sort Key: _hyper_2_7_chunk.device_id, _hyper_2_7_chunk.device_id_peer, _hyper_2_7_chunk.v0, _hyper_2_7_chunk.v1 DESC, _hyper_2_7_chunk."time", _hyper_2_7_chunk.v3 + Presorted Key: _hyper_2_7_chunk.device_id, _hyper_2_7_chunk.device_id_peer, _hyper_2_7_chunk.v0, _hyper_2_7_chunk.v1, _hyper_2_7_chunk."time" + Full-sort Groups: 132 Sort Method: quicksort + -> Merge Append (actual rows=4195 loops=1) + Sort Key: _hyper_2_7_chunk.device_id, _hyper_2_7_chunk.device_id_peer, _hyper_2_7_chunk.v0, _hyper_2_7_chunk.v1 DESC, _hyper_2_7_chunk."time" + -> Index Scan using _hyper_2_7_chunk_metrics_space_device_id_device_id_peer_v0_v1_t on _timescaledb_internal._hyper_2_7_chunk (actual rows=335 loops=1) + Output: _hyper_2_7_chunk."time", _hyper_2_7_chunk.device_id, _hyper_2_7_chunk.device_id_peer, _hyper_2_7_chunk.v0, _hyper_2_7_chunk.v1, _hyper_2_7_chunk.v2, _hyper_2_7_chunk.v3 + Index Cond: (_hyper_2_7_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Index Scan using _hyper_2_8_chunk_metrics_space_device_id_device_id_peer_v0_v1_t on _timescaledb_internal._hyper_2_8_chunk (actual rows=1005 loops=1) + Output: _hyper_2_8_chunk."time", _hyper_2_8_chunk.device_id, _hyper_2_8_chunk.device_id_peer, _hyper_2_8_chunk.v0, _hyper_2_8_chunk.v1, _hyper_2_8_chunk.v2, _hyper_2_8_chunk.v3 + Index Cond: (_hyper_2_8_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Index Scan using _hyper_2_9_chunk_metrics_space_device_id_device_id_peer_v0_v1_t on _timescaledb_internal._hyper_2_9_chunk (actual rows=335 loops=1) + Output: _hyper_2_9_chunk."time", _hyper_2_9_chunk.device_id, _hyper_2_9_chunk.device_id_peer, _hyper_2_9_chunk.v0, _hyper_2_9_chunk.v1, _hyper_2_9_chunk.v2, _hyper_2_9_chunk.v3 + Index Cond: (_hyper_2_9_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Sort (actual rows=504 loops=1) + Output: _hyper_2_10_chunk."time", _hyper_2_10_chunk.device_id, _hyper_2_10_chunk.device_id_peer, _hyper_2_10_chunk.v0, _hyper_2_10_chunk.v1, _hyper_2_10_chunk.v2, _hyper_2_10_chunk.v3 + Sort Key: _hyper_2_10_chunk.device_id, _hyper_2_10_chunk.device_id_peer, _hyper_2_10_chunk.v0, _hyper_2_10_chunk.v1 DESC, _hyper_2_10_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_10_chunk (actual rows=504 loops=1) + Output: _hyper_2_10_chunk."time", _hyper_2_10_chunk.device_id, _hyper_2_10_chunk.device_id_peer, _hyper_2_10_chunk.v0, _hyper_2_10_chunk.v1, _hyper_2_10_chunk.v2, _hyper_2_10_chunk.v3 + Vectorized Filter: (_hyper_2_10_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Bulk Decompression: true + -> Seq Scan on _timescaledb_internal.compress_hyper_6_20_chunk (actual rows=1 loops=1) + Output: compress_hyper_6_20_chunk."time", compress_hyper_6_20_chunk.device_id, compress_hyper_6_20_chunk.device_id_peer, compress_hyper_6_20_chunk.v0, compress_hyper_6_20_chunk.v1, compress_hyper_6_20_chunk.v2, compress_hyper_6_20_chunk.v3, compress_hyper_6_20_chunk._ts_meta_count, compress_hyper_6_20_chunk._ts_meta_sequence_num, compress_hyper_6_20_chunk._ts_meta_min_3, compress_hyper_6_20_chunk._ts_meta_max_3, compress_hyper_6_20_chunk._ts_meta_min_1, compress_hyper_6_20_chunk._ts_meta_max_1, compress_hyper_6_20_chunk._ts_meta_min_2, compress_hyper_6_20_chunk._ts_meta_max_2 + Filter: (compress_hyper_6_20_chunk._ts_meta_max_3 > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Sort (actual rows=1512 loops=1) + Output: _hyper_2_11_chunk."time", _hyper_2_11_chunk.device_id, _hyper_2_11_chunk.device_id_peer, _hyper_2_11_chunk.v0, _hyper_2_11_chunk.v1, _hyper_2_11_chunk.v2, _hyper_2_11_chunk.v3 + Sort Key: _hyper_2_11_chunk.device_id, _hyper_2_11_chunk.device_id_peer, _hyper_2_11_chunk.v0, _hyper_2_11_chunk.v1 DESC, _hyper_2_11_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_11_chunk (actual rows=1512 loops=1) + Output: _hyper_2_11_chunk."time", _hyper_2_11_chunk.device_id, _hyper_2_11_chunk.device_id_peer, _hyper_2_11_chunk.v0, _hyper_2_11_chunk.v1, _hyper_2_11_chunk.v2, _hyper_2_11_chunk.v3 + Vectorized Filter: (_hyper_2_11_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Bulk Decompression: true + -> Seq Scan on _timescaledb_internal.compress_hyper_6_21_chunk (actual rows=3 loops=1) + Output: compress_hyper_6_21_chunk."time", compress_hyper_6_21_chunk.device_id, compress_hyper_6_21_chunk.device_id_peer, compress_hyper_6_21_chunk.v0, compress_hyper_6_21_chunk.v1, compress_hyper_6_21_chunk.v2, compress_hyper_6_21_chunk.v3, compress_hyper_6_21_chunk._ts_meta_count, compress_hyper_6_21_chunk._ts_meta_sequence_num, compress_hyper_6_21_chunk._ts_meta_min_3, compress_hyper_6_21_chunk._ts_meta_max_3, compress_hyper_6_21_chunk._ts_meta_min_1, compress_hyper_6_21_chunk._ts_meta_max_1, compress_hyper_6_21_chunk._ts_meta_min_2, compress_hyper_6_21_chunk._ts_meta_max_2 + Filter: (compress_hyper_6_21_chunk._ts_meta_max_3 > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Index Scan using _hyper_2_12_chunk_metrics_space_device_id_device_id_peer_v0_v1_ on _timescaledb_internal._hyper_2_12_chunk (actual rows=504 loops=1) + Output: _hyper_2_12_chunk."time", _hyper_2_12_chunk.device_id, _hyper_2_12_chunk.device_id_peer, _hyper_2_12_chunk.v0, _hyper_2_12_chunk.v1, _hyper_2_12_chunk.v2, _hyper_2_12_chunk.v3 + Index Cond: (_hyper_2_12_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) +(41 rows) + +-- should produce ordered path +-- ASC/DESC for segmentby columns can be pushed down +:PREFIX_VERBOSE +SELECT * +FROM :TEST_TABLE +WHERE time > '2000-01-08' +ORDER BY device_id DESC, + device_id_peer DESC, + v0, + v1 DESC, + time; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Append (actual rows=4195 loops=1) + Sort Key: _hyper_2_7_chunk.device_id DESC, _hyper_2_7_chunk.device_id_peer DESC, _hyper_2_7_chunk.v0, _hyper_2_7_chunk.v1 DESC, _hyper_2_7_chunk."time" + -> Index Scan using _hyper_2_7_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _timescaledb_internal._hyper_2_7_chunk (actual rows=335 loops=1) + Output: _hyper_2_7_chunk."time", _hyper_2_7_chunk.device_id, _hyper_2_7_chunk.device_id_peer, _hyper_2_7_chunk.v0, _hyper_2_7_chunk.v1, _hyper_2_7_chunk.v2, _hyper_2_7_chunk.v3 + Index Cond: (_hyper_2_7_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Index Scan using _hyper_2_8_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _timescaledb_internal._hyper_2_8_chunk (actual rows=1005 loops=1) + Output: _hyper_2_8_chunk."time", _hyper_2_8_chunk.device_id, _hyper_2_8_chunk.device_id_peer, _hyper_2_8_chunk.v0, _hyper_2_8_chunk.v1, _hyper_2_8_chunk.v2, _hyper_2_8_chunk.v3 + Index Cond: (_hyper_2_8_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Index Scan using _hyper_2_9_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _timescaledb_internal._hyper_2_9_chunk (actual rows=335 loops=1) + Output: _hyper_2_9_chunk."time", _hyper_2_9_chunk.device_id, _hyper_2_9_chunk.device_id_peer, _hyper_2_9_chunk.v0, _hyper_2_9_chunk.v1, _hyper_2_9_chunk.v2, _hyper_2_9_chunk.v3 + Index Cond: (_hyper_2_9_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_10_chunk (actual rows=504 loops=1) + Output: _hyper_2_10_chunk."time", _hyper_2_10_chunk.device_id, _hyper_2_10_chunk.device_id_peer, _hyper_2_10_chunk.v0, _hyper_2_10_chunk.v1, _hyper_2_10_chunk.v2, _hyper_2_10_chunk.v3 + Vectorized Filter: (_hyper_2_10_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Bulk Decompression: true + -> Sort (actual rows=1 loops=1) + Output: compress_hyper_6_20_chunk."time", compress_hyper_6_20_chunk.device_id, compress_hyper_6_20_chunk.device_id_peer, compress_hyper_6_20_chunk.v0, compress_hyper_6_20_chunk.v1, compress_hyper_6_20_chunk.v2, compress_hyper_6_20_chunk.v3, compress_hyper_6_20_chunk._ts_meta_count, compress_hyper_6_20_chunk._ts_meta_sequence_num, compress_hyper_6_20_chunk._ts_meta_min_3, compress_hyper_6_20_chunk._ts_meta_max_3, compress_hyper_6_20_chunk._ts_meta_min_1, compress_hyper_6_20_chunk._ts_meta_max_1, compress_hyper_6_20_chunk._ts_meta_min_2, compress_hyper_6_20_chunk._ts_meta_max_2 + Sort Key: compress_hyper_6_20_chunk.device_id DESC, compress_hyper_6_20_chunk.device_id_peer DESC, compress_hyper_6_20_chunk._ts_meta_sequence_num + Sort Method: quicksort + -> Seq Scan on _timescaledb_internal.compress_hyper_6_20_chunk (actual rows=1 loops=1) + Output: compress_hyper_6_20_chunk."time", compress_hyper_6_20_chunk.device_id, compress_hyper_6_20_chunk.device_id_peer, compress_hyper_6_20_chunk.v0, compress_hyper_6_20_chunk.v1, compress_hyper_6_20_chunk.v2, compress_hyper_6_20_chunk.v3, compress_hyper_6_20_chunk._ts_meta_count, compress_hyper_6_20_chunk._ts_meta_sequence_num, compress_hyper_6_20_chunk._ts_meta_min_3, compress_hyper_6_20_chunk._ts_meta_max_3, compress_hyper_6_20_chunk._ts_meta_min_1, compress_hyper_6_20_chunk._ts_meta_max_1, compress_hyper_6_20_chunk._ts_meta_min_2, compress_hyper_6_20_chunk._ts_meta_max_2 + Filter: (compress_hyper_6_20_chunk._ts_meta_max_3 > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_11_chunk (actual rows=1512 loops=1) + Output: _hyper_2_11_chunk."time", _hyper_2_11_chunk.device_id, _hyper_2_11_chunk.device_id_peer, _hyper_2_11_chunk.v0, _hyper_2_11_chunk.v1, _hyper_2_11_chunk.v2, _hyper_2_11_chunk.v3 + Vectorized Filter: (_hyper_2_11_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Bulk Decompression: true + -> Sort (actual rows=3 loops=1) + Output: compress_hyper_6_21_chunk."time", compress_hyper_6_21_chunk.device_id, compress_hyper_6_21_chunk.device_id_peer, compress_hyper_6_21_chunk.v0, compress_hyper_6_21_chunk.v1, compress_hyper_6_21_chunk.v2, compress_hyper_6_21_chunk.v3, compress_hyper_6_21_chunk._ts_meta_count, compress_hyper_6_21_chunk._ts_meta_sequence_num, compress_hyper_6_21_chunk._ts_meta_min_3, compress_hyper_6_21_chunk._ts_meta_max_3, compress_hyper_6_21_chunk._ts_meta_min_1, compress_hyper_6_21_chunk._ts_meta_max_1, compress_hyper_6_21_chunk._ts_meta_min_2, compress_hyper_6_21_chunk._ts_meta_max_2 + Sort Key: compress_hyper_6_21_chunk.device_id DESC, compress_hyper_6_21_chunk.device_id_peer DESC, compress_hyper_6_21_chunk._ts_meta_sequence_num + Sort Method: quicksort + -> Seq Scan on _timescaledb_internal.compress_hyper_6_21_chunk (actual rows=3 loops=1) + Output: compress_hyper_6_21_chunk."time", compress_hyper_6_21_chunk.device_id, compress_hyper_6_21_chunk.device_id_peer, compress_hyper_6_21_chunk.v0, compress_hyper_6_21_chunk.v1, compress_hyper_6_21_chunk.v2, compress_hyper_6_21_chunk.v3, compress_hyper_6_21_chunk._ts_meta_count, compress_hyper_6_21_chunk._ts_meta_sequence_num, compress_hyper_6_21_chunk._ts_meta_min_3, compress_hyper_6_21_chunk._ts_meta_max_3, compress_hyper_6_21_chunk._ts_meta_min_1, compress_hyper_6_21_chunk._ts_meta_max_1, compress_hyper_6_21_chunk._ts_meta_min_2, compress_hyper_6_21_chunk._ts_meta_max_2 + Filter: (compress_hyper_6_21_chunk._ts_meta_max_3 > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Index Scan using _hyper_2_12_chunk_metrics_space_device_id_device_id_peer_v0_v_2 on _timescaledb_internal._hyper_2_12_chunk (actual rows=504 loops=1) + Output: _hyper_2_12_chunk."time", _hyper_2_12_chunk.device_id, _hyper_2_12_chunk.device_id_peer, _hyper_2_12_chunk.v0, _hyper_2_12_chunk.v1, _hyper_2_12_chunk.v2, _hyper_2_12_chunk.v3 + Index Cond: (_hyper_2_12_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) +(36 rows) + +-- should not produce ordered path +:PREFIX_VERBOSE +SELECT * +FROM :TEST_TABLE +WHERE time > '2000-01-08' +ORDER BY device_id DESC, + device_id_peer DESC, + v0, + v1, + time; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort (actual rows=4195 loops=1) + Output: _hyper_2_7_chunk."time", _hyper_2_7_chunk.device_id, _hyper_2_7_chunk.device_id_peer, _hyper_2_7_chunk.v0, _hyper_2_7_chunk.v1, _hyper_2_7_chunk.v2, _hyper_2_7_chunk.v3 + Sort Key: _hyper_2_7_chunk.device_id DESC, _hyper_2_7_chunk.device_id_peer DESC, _hyper_2_7_chunk.v0, _hyper_2_7_chunk.v1, _hyper_2_7_chunk."time" + Sort Method: quicksort + -> Append (actual rows=4195 loops=1) + -> Seq Scan on _timescaledb_internal._hyper_2_7_chunk (actual rows=335 loops=1) + Output: _hyper_2_7_chunk."time", _hyper_2_7_chunk.device_id, _hyper_2_7_chunk.device_id_peer, _hyper_2_7_chunk.v0, _hyper_2_7_chunk.v1, _hyper_2_7_chunk.v2, _hyper_2_7_chunk.v3 + Filter: (_hyper_2_7_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 169 + -> Seq Scan on _timescaledb_internal._hyper_2_8_chunk (actual rows=1005 loops=1) + Output: _hyper_2_8_chunk."time", _hyper_2_8_chunk.device_id, _hyper_2_8_chunk.device_id_peer, _hyper_2_8_chunk.v0, _hyper_2_8_chunk.v1, _hyper_2_8_chunk.v2, _hyper_2_8_chunk.v3 + Filter: (_hyper_2_8_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 507 + -> Seq Scan on _timescaledb_internal._hyper_2_9_chunk (actual rows=335 loops=1) + Output: _hyper_2_9_chunk."time", _hyper_2_9_chunk.device_id, _hyper_2_9_chunk.device_id_peer, _hyper_2_9_chunk.v0, _hyper_2_9_chunk.v1, _hyper_2_9_chunk.v2, _hyper_2_9_chunk.v3 + Filter: (_hyper_2_9_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 169 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_10_chunk (actual rows=504 loops=1) + Output: _hyper_2_10_chunk."time", _hyper_2_10_chunk.device_id, _hyper_2_10_chunk.device_id_peer, _hyper_2_10_chunk.v0, _hyper_2_10_chunk.v1, _hyper_2_10_chunk.v2, _hyper_2_10_chunk.v3 + Vectorized Filter: (_hyper_2_10_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Bulk Decompression: true + -> Seq Scan on _timescaledb_internal.compress_hyper_6_20_chunk (actual rows=1 loops=1) + Output: compress_hyper_6_20_chunk."time", compress_hyper_6_20_chunk.device_id, compress_hyper_6_20_chunk.device_id_peer, compress_hyper_6_20_chunk.v0, compress_hyper_6_20_chunk.v1, compress_hyper_6_20_chunk.v2, compress_hyper_6_20_chunk.v3, compress_hyper_6_20_chunk._ts_meta_count, compress_hyper_6_20_chunk._ts_meta_sequence_num, compress_hyper_6_20_chunk._ts_meta_min_3, compress_hyper_6_20_chunk._ts_meta_max_3, compress_hyper_6_20_chunk._ts_meta_min_1, compress_hyper_6_20_chunk._ts_meta_max_1, compress_hyper_6_20_chunk._ts_meta_min_2, compress_hyper_6_20_chunk._ts_meta_max_2 + Filter: (compress_hyper_6_20_chunk._ts_meta_max_3 > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_11_chunk (actual rows=1512 loops=1) + Output: _hyper_2_11_chunk."time", _hyper_2_11_chunk.device_id, _hyper_2_11_chunk.device_id_peer, _hyper_2_11_chunk.v0, _hyper_2_11_chunk.v1, _hyper_2_11_chunk.v2, _hyper_2_11_chunk.v3 + Vectorized Filter: (_hyper_2_11_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Bulk Decompression: true + -> Seq Scan on _timescaledb_internal.compress_hyper_6_21_chunk (actual rows=3 loops=1) + Output: compress_hyper_6_21_chunk."time", compress_hyper_6_21_chunk.device_id, compress_hyper_6_21_chunk.device_id_peer, compress_hyper_6_21_chunk.v0, compress_hyper_6_21_chunk.v1, compress_hyper_6_21_chunk.v2, compress_hyper_6_21_chunk.v3, compress_hyper_6_21_chunk._ts_meta_count, compress_hyper_6_21_chunk._ts_meta_sequence_num, compress_hyper_6_21_chunk._ts_meta_min_3, compress_hyper_6_21_chunk._ts_meta_max_3, compress_hyper_6_21_chunk._ts_meta_min_1, compress_hyper_6_21_chunk._ts_meta_max_1, compress_hyper_6_21_chunk._ts_meta_min_2, compress_hyper_6_21_chunk._ts_meta_max_2 + Filter: (compress_hyper_6_21_chunk._ts_meta_max_3 > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Seq Scan on _timescaledb_internal._hyper_2_12_chunk (actual rows=504 loops=1) + Output: _hyper_2_12_chunk."time", _hyper_2_12_chunk.device_id, _hyper_2_12_chunk.device_id_peer, _hyper_2_12_chunk.v0, _hyper_2_12_chunk.v1, _hyper_2_12_chunk.v2, _hyper_2_12_chunk.v3 + Filter: (_hyper_2_12_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) +(34 rows) + +-- +-- test constraint exclusion +-- +-- test plan time exclusion +-- first chunk should be excluded +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE time > '2000-01-08' +ORDER BY time, + device_id; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------- + Incremental Sort (actual rows=4195 loops=1) + Sort Key: metrics_space."time", metrics_space.device_id + Presorted Key: metrics_space."time" + Full-sort Groups: 120 Sort Method: quicksort + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=4195 loops=1) + Order: metrics_space."time" + -> Merge Append (actual rows=1675 loops=1) + Sort Key: _hyper_2_7_chunk."time" + -> Index Scan Backward using _hyper_2_7_chunk_metrics_space_time_idx on _hyper_2_7_chunk (actual rows=335 loops=1) + Index Cond: ("time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Index Scan Backward using _hyper_2_8_chunk_metrics_space_time_idx on _hyper_2_8_chunk (actual rows=1005 loops=1) + Index Cond: ("time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Index Scan Backward using _hyper_2_9_chunk_metrics_space_time_idx on _hyper_2_9_chunk (actual rows=335 loops=1) + Index Cond: ("time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Merge Append (actual rows=2520 loops=1) + Sort Key: _hyper_2_10_chunk."time" + -> Sort (actual rows=504 loops=1) + Sort Key: _hyper_2_10_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=504 loops=1) + Sort Key: _hyper_2_10_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=504 loops=1) + Vectorized Filter: ("time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Seq Scan on compress_hyper_6_20_chunk (actual rows=1 loops=1) + Filter: (_ts_meta_max_3 > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Sort (actual rows=1512 loops=1) + Sort Key: _hyper_2_11_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=1512 loops=1) + Sort Key: _hyper_2_11_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (actual rows=1512 loops=1) + Vectorized Filter: ("time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Seq Scan on compress_hyper_6_21_chunk (actual rows=3 loops=1) + Filter: (_ts_meta_max_3 > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Index Scan Backward using _hyper_2_12_chunk_metrics_space_time_idx on _hyper_2_12_chunk (actual rows=504 loops=1) + Index Cond: ("time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) +(38 rows) + +-- test runtime exclusion +-- first chunk should be excluded +:PREFIX +SELECT * +FROM :TEST_TABLE +WHERE time > '2000-01-08'::text::timestamptz +ORDER BY time, + device_id; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------- + Incremental Sort (actual rows=4195 loops=1) + Sort Key: metrics_space."time", metrics_space.device_id + Presorted Key: metrics_space."time" + Full-sort Groups: 120 Sort Method: quicksort + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=4195 loops=1) + Order: metrics_space."time" + -> Merge Append (actual rows=0 loops=1) + Sort Key: _hyper_2_4_chunk."time" + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_4_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_4_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=0 loops=1) + Filter: ("time" > ('2000-01-08'::cstring)::timestamp with time zone) + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=0 loops=1) + Filter: (_ts_meta_max_3 > ('2000-01-08'::cstring)::timestamp with time zone) + Rows Removed by Filter: 1 + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_5_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_5_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=0 loops=1) + Filter: ("time" > ('2000-01-08'::cstring)::timestamp with time zone) + -> Seq Scan on compress_hyper_6_18_chunk (actual rows=0 loops=1) + Filter: (_ts_meta_max_3 > ('2000-01-08'::cstring)::timestamp with time zone) + Rows Removed by Filter: 3 + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_6_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_2_6_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=0 loops=1) + Filter: ("time" > ('2000-01-08'::cstring)::timestamp with time zone) + -> Seq Scan on compress_hyper_6_19_chunk (actual rows=0 loops=1) + Filter: (_ts_meta_max_3 > ('2000-01-08'::cstring)::timestamp with time zone) + Rows Removed by Filter: 1 + -> Merge Append (actual rows=1675 loops=1) + Sort Key: _hyper_2_7_chunk."time" + -> Index Scan Backward using _hyper_2_7_chunk_metrics_space_time_idx on _hyper_2_7_chunk (actual rows=335 loops=1) + Index Cond: ("time" > ('2000-01-08'::cstring)::timestamp with time zone) + -> Index Scan Backward using _hyper_2_8_chunk_metrics_space_time_idx on _hyper_2_8_chunk (actual rows=1005 loops=1) + Index Cond: ("time" > ('2000-01-08'::cstring)::timestamp with time zone) + -> Index Scan Backward using _hyper_2_9_chunk_metrics_space_time_idx on _hyper_2_9_chunk (actual rows=335 loops=1) + Index Cond: ("time" > ('2000-01-08'::cstring)::timestamp with time zone) + -> Merge Append (actual rows=2520 loops=1) + Sort Key: _hyper_2_10_chunk."time" + -> Sort (actual rows=504 loops=1) + Sort Key: _hyper_2_10_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=504 loops=1) + Sort Key: _hyper_2_10_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=504 loops=1) + Filter: ("time" > ('2000-01-08'::cstring)::timestamp with time zone) + -> Seq Scan on compress_hyper_6_20_chunk (actual rows=1 loops=1) + Filter: (_ts_meta_max_3 > ('2000-01-08'::cstring)::timestamp with time zone) + -> Sort (actual rows=1512 loops=1) + Sort Key: _hyper_2_11_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=1512 loops=1) + Sort Key: _hyper_2_11_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (actual rows=1512 loops=1) + Filter: ("time" > ('2000-01-08'::cstring)::timestamp with time zone) + -> Seq Scan on compress_hyper_6_21_chunk (actual rows=3 loops=1) + Filter: (_ts_meta_max_3 > ('2000-01-08'::cstring)::timestamp with time zone) + -> Index Scan Backward using _hyper_2_12_chunk_metrics_space_time_idx on _hyper_2_12_chunk (actual rows=504 loops=1) + Index Cond: ("time" > ('2000-01-08'::cstring)::timestamp with time zone) +(73 rows) + +-- test aggregate +:PREFIX +SELECT count(*) +FROM :TEST_TABLE; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Finalize Aggregate (actual rows=1 loops=1) + -> Append (actual rows=9 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=1 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=1080 loops=1) + -> Seq Scan on compress_hyper_6_18_chunk (actual rows=3 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_19_chunk (actual rows=1 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_2_7_chunk (actual rows=504 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_2_8_chunk (actual rows=1512 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_2_9_chunk (actual rows=504 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=504 loops=1) + -> Seq Scan on compress_hyper_6_20_chunk (actual rows=1 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (actual rows=1512 loops=1) + -> Seq Scan on compress_hyper_6_21_chunk (actual rows=3 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_2_12_chunk (actual rows=504 loops=1) +(25 rows) + +-- test aggregate with GROUP BY +-- Disable hash aggregation to get a deterministic test output +SET enable_hashagg = OFF; +:PREFIX +SELECT count(*) +FROM :TEST_TABLE +GROUP BY device_id +ORDER BY device_id; + QUERY PLAN +------------------------------------------------------------------------------------------------------------- + Finalize GroupAggregate (actual rows=5 loops=1) + Group Key: _hyper_2_4_chunk.device_id + -> Sort (actual rows=15 loops=1) + Sort Key: _hyper_2_4_chunk.device_id + Sort Method: quicksort + -> Append (actual rows=15 loops=1) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: _hyper_2_4_chunk.device_id + -> Sort (actual rows=360 loops=1) + Sort Key: _hyper_2_4_chunk.device_id + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=1 loops=1) + -> Partial GroupAggregate (actual rows=3 loops=1) + Group Key: _hyper_2_5_chunk.device_id + -> Sort (actual rows=1080 loops=1) + Sort Key: _hyper_2_5_chunk.device_id + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=1080 loops=1) + -> Seq Scan on compress_hyper_6_18_chunk (actual rows=3 loops=1) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: _hyper_2_6_chunk.device_id + -> Sort (actual rows=360 loops=1) + Sort Key: _hyper_2_6_chunk.device_id + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_19_chunk (actual rows=1 loops=1) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: _hyper_2_7_chunk.device_id + -> Sort (actual rows=504 loops=1) + Sort Key: _hyper_2_7_chunk.device_id + Sort Method: quicksort + -> Seq Scan on _hyper_2_7_chunk (actual rows=504 loops=1) + -> Partial GroupAggregate (actual rows=3 loops=1) + Group Key: _hyper_2_8_chunk.device_id + -> Sort (actual rows=1512 loops=1) + Sort Key: _hyper_2_8_chunk.device_id + Sort Method: quicksort + -> Seq Scan on _hyper_2_8_chunk (actual rows=1512 loops=1) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: _hyper_2_9_chunk.device_id + -> Sort (actual rows=504 loops=1) + Sort Key: _hyper_2_9_chunk.device_id + Sort Method: quicksort + -> Seq Scan on _hyper_2_9_chunk (actual rows=504 loops=1) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: _hyper_2_10_chunk.device_id + -> Sort (actual rows=504 loops=1) + Sort Key: _hyper_2_10_chunk.device_id + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=504 loops=1) + -> Seq Scan on compress_hyper_6_20_chunk (actual rows=1 loops=1) + -> Partial GroupAggregate (actual rows=3 loops=1) + Group Key: _hyper_2_11_chunk.device_id + -> Sort (actual rows=1512 loops=1) + Sort Key: _hyper_2_11_chunk.device_id + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (actual rows=1512 loops=1) + -> Seq Scan on compress_hyper_6_21_chunk (actual rows=3 loops=1) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: _hyper_2_12_chunk.device_id + -> Sort (actual rows=504 loops=1) + Sort Key: _hyper_2_12_chunk.device_id + Sort Method: quicksort + -> Seq Scan on _hyper_2_12_chunk (actual rows=504 loops=1) +(65 rows) + +-- test window functions with GROUP BY +:PREFIX +SELECT sum(count(*)) OVER () +FROM :TEST_TABLE +GROUP BY device_id +ORDER BY device_id; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------- + WindowAgg (actual rows=5 loops=1) + -> Finalize GroupAggregate (actual rows=5 loops=1) + Group Key: _hyper_2_4_chunk.device_id + -> Sort (actual rows=15 loops=1) + Sort Key: _hyper_2_4_chunk.device_id + Sort Method: quicksort + -> Append (actual rows=15 loops=1) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: _hyper_2_4_chunk.device_id + -> Sort (actual rows=360 loops=1) + Sort Key: _hyper_2_4_chunk.device_id + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=1 loops=1) + -> Partial GroupAggregate (actual rows=3 loops=1) + Group Key: _hyper_2_5_chunk.device_id + -> Sort (actual rows=1080 loops=1) + Sort Key: _hyper_2_5_chunk.device_id + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=1080 loops=1) + -> Seq Scan on compress_hyper_6_18_chunk (actual rows=3 loops=1) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: _hyper_2_6_chunk.device_id + -> Sort (actual rows=360 loops=1) + Sort Key: _hyper_2_6_chunk.device_id + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_19_chunk (actual rows=1 loops=1) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: _hyper_2_7_chunk.device_id + -> Sort (actual rows=504 loops=1) + Sort Key: _hyper_2_7_chunk.device_id + Sort Method: quicksort + -> Seq Scan on _hyper_2_7_chunk (actual rows=504 loops=1) + -> Partial GroupAggregate (actual rows=3 loops=1) + Group Key: _hyper_2_8_chunk.device_id + -> Sort (actual rows=1512 loops=1) + Sort Key: _hyper_2_8_chunk.device_id + Sort Method: quicksort + -> Seq Scan on _hyper_2_8_chunk (actual rows=1512 loops=1) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: _hyper_2_9_chunk.device_id + -> Sort (actual rows=504 loops=1) + Sort Key: _hyper_2_9_chunk.device_id + Sort Method: quicksort + -> Seq Scan on _hyper_2_9_chunk (actual rows=504 loops=1) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: _hyper_2_10_chunk.device_id + -> Sort (actual rows=504 loops=1) + Sort Key: _hyper_2_10_chunk.device_id + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=504 loops=1) + -> Seq Scan on compress_hyper_6_20_chunk (actual rows=1 loops=1) + -> Partial GroupAggregate (actual rows=3 loops=1) + Group Key: _hyper_2_11_chunk.device_id + -> Sort (actual rows=1512 loops=1) + Sort Key: _hyper_2_11_chunk.device_id + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (actual rows=1512 loops=1) + -> Seq Scan on compress_hyper_6_21_chunk (actual rows=3 loops=1) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: _hyper_2_12_chunk.device_id + -> Sort (actual rows=504 loops=1) + Sort Key: _hyper_2_12_chunk.device_id + Sort Method: quicksort + -> Seq Scan on _hyper_2_12_chunk (actual rows=504 loops=1) +(66 rows) + +SET enable_hashagg = ON; +-- test CTE +:PREFIX WITH q AS ( + SELECT v1 + FROM :TEST_TABLE + ORDER BY time +) +SELECT * +FROM q +ORDER BY v1; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------- + Sort (actual rows=6840 loops=1) + Sort Key: q.v1 + Sort Method: quicksort + -> Subquery Scan on q (actual rows=6840 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=6840 loops=1) + Order: metrics_space."time" + -> Merge Append (actual rows=1800 loops=1) + Sort Key: _hyper_2_4_chunk."time" + -> Sort (actual rows=360 loops=1) + Sort Key: _hyper_2_4_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=360 loops=1) + Sort Key: _hyper_2_4_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=1 loops=1) + -> Sort (actual rows=1080 loops=1) + Sort Key: _hyper_2_5_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=1080 loops=1) + Sort Key: _hyper_2_5_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=1080 loops=1) + -> Seq Scan on compress_hyper_6_18_chunk (actual rows=3 loops=1) + -> Sort (actual rows=360 loops=1) + Sort Key: _hyper_2_6_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=360 loops=1) + Sort Key: _hyper_2_6_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_19_chunk (actual rows=1 loops=1) + -> Merge Append (actual rows=2520 loops=1) + Sort Key: _hyper_2_7_chunk."time" + -> Index Scan Backward using _hyper_2_7_chunk_metrics_space_time_idx on _hyper_2_7_chunk (actual rows=504 loops=1) + -> Index Scan Backward using _hyper_2_8_chunk_metrics_space_time_idx on _hyper_2_8_chunk (actual rows=1512 loops=1) + -> Index Scan Backward using _hyper_2_9_chunk_metrics_space_time_idx on _hyper_2_9_chunk (actual rows=504 loops=1) + -> Merge Append (actual rows=2520 loops=1) + Sort Key: _hyper_2_10_chunk."time" + -> Sort (actual rows=504 loops=1) + Sort Key: _hyper_2_10_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=504 loops=1) + Sort Key: _hyper_2_10_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=504 loops=1) + -> Seq Scan on compress_hyper_6_20_chunk (actual rows=1 loops=1) + -> Sort (actual rows=1512 loops=1) + Sort Key: _hyper_2_11_chunk."time" + Sort Method: quicksort + -> Sort (actual rows=1512 loops=1) + Sort Key: _hyper_2_11_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (actual rows=1512 loops=1) + -> Seq Scan on compress_hyper_6_21_chunk (actual rows=3 loops=1) + -> Index Scan Backward using _hyper_2_12_chunk_metrics_space_time_idx on _hyper_2_12_chunk (actual rows=504 loops=1) +(56 rows) + +-- test CTE join +:PREFIX WITH q1 AS ( + SELECT time, + v1 + FROM :TEST_TABLE + WHERE device_id = 1 + ORDER BY time +), +q2 AS ( + SELECT time, + v2 + FROM :TEST_TABLE + WHERE device_id = 2 + ORDER BY time +) +SELECT * +FROM q1 + INNER JOIN q2 ON q1.time = q2.time +ORDER BY q1.time; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------- + Merge Join (actual rows=1368 loops=1) + Merge Cond: (metrics_space."time" = metrics_space_1."time") + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=1368 loops=1) + Order: metrics_space."time" + -> Sort (actual rows=360 loops=1) + Sort Key: _hyper_2_4_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=1 loops=1) + Filter: (device_id = 1) + -> Index Scan Backward using _hyper_2_7_chunk_metrics_space_time_idx on _hyper_2_7_chunk (actual rows=504 loops=1) + Filter: (device_id = 1) + -> Sort (actual rows=504 loops=1) + Sort Key: _hyper_2_10_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=504 loops=1) + -> Seq Scan on compress_hyper_6_20_chunk (actual rows=1 loops=1) + Filter: (device_id = 1) + -> Materialize (actual rows=1368 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space metrics_space_1 (actual rows=1368 loops=1) + Order: metrics_space_1."time" + -> Sort (actual rows=360 loops=1) + Sort Key: _hyper_2_5_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_18_chunk (actual rows=1 loops=1) + Filter: (device_id = 2) + Rows Removed by Filter: 2 + -> Index Scan Backward using _hyper_2_8_chunk_metrics_space_device_id_time_idx on _hyper_2_8_chunk (actual rows=504 loops=1) + Index Cond: (device_id = 2) + -> Sort (actual rows=504 loops=1) + Sort Key: _hyper_2_11_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (actual rows=504 loops=1) + -> Seq Scan on compress_hyper_6_21_chunk (actual rows=1 loops=1) + Filter: (device_id = 2) + Rows Removed by Filter: 2 +(37 rows) + +-- test prepared statement +PREPARE prep AS +SELECT count(time) +FROM :TEST_TABLE +WHERE device_id = 1; +:PREFIX EXECUTE prep; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Finalize Aggregate (actual rows=1 loops=1) + -> Append (actual rows=3 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=1 loops=1) + Filter: (device_id = 1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_2_7_chunk (actual rows=504 loops=1) + Filter: (device_id = 1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=504 loops=1) + -> Seq Scan on compress_hyper_6_20_chunk (actual rows=1 loops=1) + Filter: (device_id = 1) +(13 rows) + +EXECUTE prep; + count +------- + 1368 +(1 row) + +EXECUTE prep; + count +------- + 1368 +(1 row) + +EXECUTE prep; + count +------- + 1368 +(1 row) + +EXECUTE prep; + count +------- + 1368 +(1 row) + +EXECUTE prep; + count +------- + 1368 +(1 row) + +EXECUTE prep; + count +------- + 1368 +(1 row) + +DEALLOCATE prep; +-- +-- test indexes +-- +SET enable_seqscan TO FALSE; +-- IndexScans should work +:PREFIX_VERBOSE +SELECT time, + device_id +FROM :TEST_TABLE +WHERE device_id = 1 +ORDER BY device_id, + time; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Append (actual rows=1368 loops=1) + Sort Key: _hyper_2_4_chunk."time" + -> Sort (actual rows=360 loops=1) + Output: _hyper_2_4_chunk."time", _hyper_2_4_chunk.device_id + Sort Key: _hyper_2_4_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_4_chunk (actual rows=360 loops=1) + Output: _hyper_2_4_chunk."time", _hyper_2_4_chunk.device_id + Bulk Decompression: true + -> Index Scan using compress_hyper_6_17_chunk_c_space_index_2 on _timescaledb_internal.compress_hyper_6_17_chunk (actual rows=1 loops=1) + Output: compress_hyper_6_17_chunk."time", compress_hyper_6_17_chunk.device_id, compress_hyper_6_17_chunk.device_id_peer, compress_hyper_6_17_chunk.v0, compress_hyper_6_17_chunk.v1, compress_hyper_6_17_chunk.v2, compress_hyper_6_17_chunk.v3, compress_hyper_6_17_chunk._ts_meta_count, compress_hyper_6_17_chunk._ts_meta_sequence_num, compress_hyper_6_17_chunk._ts_meta_min_3, compress_hyper_6_17_chunk._ts_meta_max_3, compress_hyper_6_17_chunk._ts_meta_min_1, compress_hyper_6_17_chunk._ts_meta_max_1, compress_hyper_6_17_chunk._ts_meta_min_2, compress_hyper_6_17_chunk._ts_meta_max_2 + Index Cond: (compress_hyper_6_17_chunk.device_id = 1) + -> Index Scan Backward using _hyper_2_7_chunk_metrics_space_time_idx on _timescaledb_internal._hyper_2_7_chunk (actual rows=504 loops=1) + Output: _hyper_2_7_chunk."time", _hyper_2_7_chunk.device_id + Filter: (_hyper_2_7_chunk.device_id = 1) + -> Sort (actual rows=504 loops=1) + Output: _hyper_2_10_chunk."time", _hyper_2_10_chunk.device_id + Sort Key: _hyper_2_10_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_10_chunk (actual rows=504 loops=1) + Output: _hyper_2_10_chunk."time", _hyper_2_10_chunk.device_id + Bulk Decompression: true + -> Index Scan using compress_hyper_6_20_chunk_c_space_index_2 on _timescaledb_internal.compress_hyper_6_20_chunk (actual rows=1 loops=1) + Output: compress_hyper_6_20_chunk."time", compress_hyper_6_20_chunk.device_id, compress_hyper_6_20_chunk.device_id_peer, compress_hyper_6_20_chunk.v0, compress_hyper_6_20_chunk.v1, compress_hyper_6_20_chunk.v2, compress_hyper_6_20_chunk.v3, compress_hyper_6_20_chunk._ts_meta_count, compress_hyper_6_20_chunk._ts_meta_sequence_num, compress_hyper_6_20_chunk._ts_meta_min_3, compress_hyper_6_20_chunk._ts_meta_max_3, compress_hyper_6_20_chunk._ts_meta_min_1, compress_hyper_6_20_chunk._ts_meta_max_1, compress_hyper_6_20_chunk._ts_meta_min_2, compress_hyper_6_20_chunk._ts_meta_max_2 + Index Cond: (compress_hyper_6_20_chunk.device_id = 1) +(25 rows) + +-- globs should not plan IndexOnlyScans +:PREFIX_VERBOSE +SELECT * +FROM :TEST_TABLE +WHERE device_id = 1 +ORDER BY device_id, + time; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Append (actual rows=1368 loops=1) + Sort Key: _hyper_2_4_chunk."time" + -> Sort (actual rows=360 loops=1) + Output: _hyper_2_4_chunk."time", _hyper_2_4_chunk.device_id, _hyper_2_4_chunk.device_id_peer, _hyper_2_4_chunk.v0, _hyper_2_4_chunk.v1, _hyper_2_4_chunk.v2, _hyper_2_4_chunk.v3 + Sort Key: _hyper_2_4_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_4_chunk (actual rows=360 loops=1) + Output: _hyper_2_4_chunk."time", _hyper_2_4_chunk.device_id, _hyper_2_4_chunk.device_id_peer, _hyper_2_4_chunk.v0, _hyper_2_4_chunk.v1, _hyper_2_4_chunk.v2, _hyper_2_4_chunk.v3 + Bulk Decompression: true + -> Index Scan using compress_hyper_6_17_chunk_c_space_index_2 on _timescaledb_internal.compress_hyper_6_17_chunk (actual rows=1 loops=1) + Output: compress_hyper_6_17_chunk."time", compress_hyper_6_17_chunk.device_id, compress_hyper_6_17_chunk.device_id_peer, compress_hyper_6_17_chunk.v0, compress_hyper_6_17_chunk.v1, compress_hyper_6_17_chunk.v2, compress_hyper_6_17_chunk.v3, compress_hyper_6_17_chunk._ts_meta_count, compress_hyper_6_17_chunk._ts_meta_sequence_num, compress_hyper_6_17_chunk._ts_meta_min_3, compress_hyper_6_17_chunk._ts_meta_max_3, compress_hyper_6_17_chunk._ts_meta_min_1, compress_hyper_6_17_chunk._ts_meta_max_1, compress_hyper_6_17_chunk._ts_meta_min_2, compress_hyper_6_17_chunk._ts_meta_max_2 + Index Cond: (compress_hyper_6_17_chunk.device_id = 1) + -> Index Scan Backward using _hyper_2_7_chunk_metrics_space_time_idx on _timescaledb_internal._hyper_2_7_chunk (actual rows=504 loops=1) + Output: _hyper_2_7_chunk."time", _hyper_2_7_chunk.device_id, _hyper_2_7_chunk.device_id_peer, _hyper_2_7_chunk.v0, _hyper_2_7_chunk.v1, _hyper_2_7_chunk.v2, _hyper_2_7_chunk.v3 + Filter: (_hyper_2_7_chunk.device_id = 1) + -> Sort (actual rows=504 loops=1) + Output: _hyper_2_10_chunk."time", _hyper_2_10_chunk.device_id, _hyper_2_10_chunk.device_id_peer, _hyper_2_10_chunk.v0, _hyper_2_10_chunk.v1, _hyper_2_10_chunk.v2, _hyper_2_10_chunk.v3 + Sort Key: _hyper_2_10_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_10_chunk (actual rows=504 loops=1) + Output: _hyper_2_10_chunk."time", _hyper_2_10_chunk.device_id, _hyper_2_10_chunk.device_id_peer, _hyper_2_10_chunk.v0, _hyper_2_10_chunk.v1, _hyper_2_10_chunk.v2, _hyper_2_10_chunk.v3 + Bulk Decompression: true + -> Index Scan using compress_hyper_6_20_chunk_c_space_index_2 on _timescaledb_internal.compress_hyper_6_20_chunk (actual rows=1 loops=1) + Output: compress_hyper_6_20_chunk."time", compress_hyper_6_20_chunk.device_id, compress_hyper_6_20_chunk.device_id_peer, compress_hyper_6_20_chunk.v0, compress_hyper_6_20_chunk.v1, compress_hyper_6_20_chunk.v2, compress_hyper_6_20_chunk.v3, compress_hyper_6_20_chunk._ts_meta_count, compress_hyper_6_20_chunk._ts_meta_sequence_num, compress_hyper_6_20_chunk._ts_meta_min_3, compress_hyper_6_20_chunk._ts_meta_max_3, compress_hyper_6_20_chunk._ts_meta_min_1, compress_hyper_6_20_chunk._ts_meta_max_1, compress_hyper_6_20_chunk._ts_meta_min_2, compress_hyper_6_20_chunk._ts_meta_max_2 + Index Cond: (compress_hyper_6_20_chunk.device_id = 1) +(25 rows) + +-- whole row reference should work +:PREFIX_VERBOSE +SELECT test_table +FROM :TEST_TABLE AS test_table +WHERE device_id = 1 +ORDER BY device_id, + time; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Append (actual rows=1368 loops=1) + Sort Key: test_table_1."time" + -> Sort (actual rows=360 loops=1) + Output: ((test_table_1.*)::metrics_space), test_table_1.device_id, test_table_1."time" + Sort Key: test_table_1."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_4_chunk test_table_1 (actual rows=360 loops=1) + Output: test_table_1.*, test_table_1.device_id, test_table_1."time" + Bulk Decompression: true + -> Index Scan using compress_hyper_6_17_chunk_c_space_index_2 on _timescaledb_internal.compress_hyper_6_17_chunk (actual rows=1 loops=1) + Output: compress_hyper_6_17_chunk."time", compress_hyper_6_17_chunk.device_id, compress_hyper_6_17_chunk.device_id_peer, compress_hyper_6_17_chunk.v0, compress_hyper_6_17_chunk.v1, compress_hyper_6_17_chunk.v2, compress_hyper_6_17_chunk.v3, compress_hyper_6_17_chunk._ts_meta_count, compress_hyper_6_17_chunk._ts_meta_sequence_num, compress_hyper_6_17_chunk._ts_meta_min_3, compress_hyper_6_17_chunk._ts_meta_max_3, compress_hyper_6_17_chunk._ts_meta_min_1, compress_hyper_6_17_chunk._ts_meta_max_1, compress_hyper_6_17_chunk._ts_meta_min_2, compress_hyper_6_17_chunk._ts_meta_max_2 + Index Cond: (compress_hyper_6_17_chunk.device_id = 1) + -> Index Scan Backward using _hyper_2_7_chunk_metrics_space_time_idx on _timescaledb_internal._hyper_2_7_chunk test_table_2 (actual rows=504 loops=1) + Output: test_table_2.*, test_table_2.device_id, test_table_2."time" + Filter: (test_table_2.device_id = 1) + -> Sort (actual rows=504 loops=1) + Output: ((test_table_3.*)::metrics_space), test_table_3.device_id, test_table_3."time" + Sort Key: test_table_3."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_10_chunk test_table_3 (actual rows=504 loops=1) + Output: test_table_3.*, test_table_3.device_id, test_table_3."time" + Bulk Decompression: true + -> Index Scan using compress_hyper_6_20_chunk_c_space_index_2 on _timescaledb_internal.compress_hyper_6_20_chunk (actual rows=1 loops=1) + Output: compress_hyper_6_20_chunk."time", compress_hyper_6_20_chunk.device_id, compress_hyper_6_20_chunk.device_id_peer, compress_hyper_6_20_chunk.v0, compress_hyper_6_20_chunk.v1, compress_hyper_6_20_chunk.v2, compress_hyper_6_20_chunk.v3, compress_hyper_6_20_chunk._ts_meta_count, compress_hyper_6_20_chunk._ts_meta_sequence_num, compress_hyper_6_20_chunk._ts_meta_min_3, compress_hyper_6_20_chunk._ts_meta_max_3, compress_hyper_6_20_chunk._ts_meta_min_1, compress_hyper_6_20_chunk._ts_meta_max_1, compress_hyper_6_20_chunk._ts_meta_min_2, compress_hyper_6_20_chunk._ts_meta_max_2 + Index Cond: (compress_hyper_6_20_chunk.device_id = 1) +(25 rows) + +-- even when we select only a segmentby column, we still need count +:PREFIX_VERBOSE +SELECT device_id +FROM :TEST_TABLE +WHERE device_id = 1 +ORDER BY device_id; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------- + Append (actual rows=1368 loops=1) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_4_chunk (actual rows=360 loops=1) + Output: _hyper_2_4_chunk.device_id + Bulk Decompression: false + -> Index Only Scan using compress_hyper_6_17_chunk_c_space_index_2 on _timescaledb_internal.compress_hyper_6_17_chunk (actual rows=1 loops=1) + Output: compress_hyper_6_17_chunk.device_id, compress_hyper_6_17_chunk._ts_meta_count + Index Cond: (compress_hyper_6_17_chunk.device_id = 1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_2_7_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _timescaledb_internal._hyper_2_7_chunk (actual rows=504 loops=1) + Output: _hyper_2_7_chunk.device_id + Index Cond: (_hyper_2_7_chunk.device_id = 1) + Heap Fetches: 504 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_10_chunk (actual rows=504 loops=1) + Output: _hyper_2_10_chunk.device_id + Bulk Decompression: false + -> Index Only Scan using compress_hyper_6_20_chunk_c_space_index_2 on _timescaledb_internal.compress_hyper_6_20_chunk (actual rows=1 loops=1) + Output: compress_hyper_6_20_chunk.device_id, compress_hyper_6_20_chunk._ts_meta_count + Index Cond: (compress_hyper_6_20_chunk.device_id = 1) + Heap Fetches: 1 +(19 rows) + +:PREFIX_VERBOSE +SELECT count(*) +FROM :TEST_TABLE +WHERE device_id = 1; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Finalize Aggregate (actual rows=1 loops=1) + Output: count(*) + -> Append (actual rows=3 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + Output: PARTIAL count(*) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_4_chunk (actual rows=360 loops=1) + Bulk Decompression: false + -> Index Only Scan using compress_hyper_6_17_chunk_c_space_index_2 on _timescaledb_internal.compress_hyper_6_17_chunk (actual rows=1 loops=1) + Output: compress_hyper_6_17_chunk.device_id, compress_hyper_6_17_chunk._ts_meta_count + Index Cond: (compress_hyper_6_17_chunk.device_id = 1) + Heap Fetches: 1 + -> Partial Aggregate (actual rows=1 loops=1) + Output: PARTIAL count(*) + -> Index Only Scan using _hyper_2_7_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _timescaledb_internal._hyper_2_7_chunk (actual rows=504 loops=1) + Index Cond: (_hyper_2_7_chunk.device_id = 1) + Heap Fetches: 504 + -> Partial Aggregate (actual rows=1 loops=1) + Output: PARTIAL count(*) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_10_chunk (actual rows=504 loops=1) + Bulk Decompression: false + -> Index Only Scan using compress_hyper_6_20_chunk_c_space_index_2 on _timescaledb_internal.compress_hyper_6_20_chunk (actual rows=1 loops=1) + Output: compress_hyper_6_20_chunk.device_id, compress_hyper_6_20_chunk._ts_meta_count + Index Cond: (compress_hyper_6_20_chunk.device_id = 1) + Heap Fetches: 1 +(24 rows) + +-- should be able to order using an index +CREATE INDEX tmp_idx ON :TEST_TABLE (device_id); +:PREFIX_VERBOSE +SELECT device_id +FROM :TEST_TABLE +ORDER BY device_id; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Append (actual rows=6840 loops=1) + Sort Key: _hyper_2_4_chunk.device_id + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_4_chunk (actual rows=360 loops=1) + Output: _hyper_2_4_chunk.device_id + Bulk Decompression: false + -> Index Only Scan using compress_hyper_6_17_chunk_c_space_index_2 on _timescaledb_internal.compress_hyper_6_17_chunk (actual rows=1 loops=1) + Output: compress_hyper_6_17_chunk.device_id, compress_hyper_6_17_chunk._ts_meta_count + Heap Fetches: 1 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_5_chunk (actual rows=1080 loops=1) + Output: _hyper_2_5_chunk.device_id + Bulk Decompression: false + -> Index Only Scan using compress_hyper_6_18_chunk_c_space_index_2 on _timescaledb_internal.compress_hyper_6_18_chunk (actual rows=3 loops=1) + Output: compress_hyper_6_18_chunk.device_id, compress_hyper_6_18_chunk._ts_meta_count + Heap Fetches: 3 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_6_chunk (actual rows=360 loops=1) + Output: _hyper_2_6_chunk.device_id + Bulk Decompression: false + -> Index Only Scan using compress_hyper_6_19_chunk_c_space_index_2 on _timescaledb_internal.compress_hyper_6_19_chunk (actual rows=1 loops=1) + Output: compress_hyper_6_19_chunk.device_id, compress_hyper_6_19_chunk._ts_meta_count + Heap Fetches: 1 + -> Index Only Scan using _hyper_2_7_chunk_tmp_idx on _timescaledb_internal._hyper_2_7_chunk (actual rows=504 loops=1) + Output: _hyper_2_7_chunk.device_id + Heap Fetches: 504 + -> Index Only Scan using _hyper_2_8_chunk_tmp_idx on _timescaledb_internal._hyper_2_8_chunk (actual rows=1512 loops=1) + Output: _hyper_2_8_chunk.device_id + Heap Fetches: 1512 + -> Index Only Scan using _hyper_2_9_chunk_tmp_idx on _timescaledb_internal._hyper_2_9_chunk (actual rows=504 loops=1) + Output: _hyper_2_9_chunk.device_id + Heap Fetches: 504 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_10_chunk (actual rows=504 loops=1) + Output: _hyper_2_10_chunk.device_id + Bulk Decompression: false + -> Index Only Scan using compress_hyper_6_20_chunk_c_space_index_2 on _timescaledb_internal.compress_hyper_6_20_chunk (actual rows=1 loops=1) + Output: compress_hyper_6_20_chunk.device_id, compress_hyper_6_20_chunk._ts_meta_count + Heap Fetches: 1 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_11_chunk (actual rows=1512 loops=1) + Output: _hyper_2_11_chunk.device_id + Bulk Decompression: false + -> Index Only Scan using compress_hyper_6_21_chunk_c_space_index_2 on _timescaledb_internal.compress_hyper_6_21_chunk (actual rows=3 loops=1) + Output: compress_hyper_6_21_chunk.device_id, compress_hyper_6_21_chunk._ts_meta_count + Heap Fetches: 3 + -> Index Only Scan using _hyper_2_12_chunk_tmp_idx on _timescaledb_internal._hyper_2_12_chunk (actual rows=504 loops=1) + Output: _hyper_2_12_chunk.device_id + Heap Fetches: 504 +(44 rows) + +DROP INDEX tmp_idx CASCADE; +--use the peer index +:PREFIX_VERBOSE +SELECT * +FROM :TEST_TABLE +WHERE device_id_peer = 1 +ORDER BY device_id_peer, + time; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Append (actual rows=0 loops=1) + Sort Key: _hyper_2_4_chunk."time" + -> Sort (actual rows=0 loops=1) + Output: _hyper_2_4_chunk."time", _hyper_2_4_chunk.device_id, _hyper_2_4_chunk.device_id_peer, _hyper_2_4_chunk.v0, _hyper_2_4_chunk.v1, _hyper_2_4_chunk.v2, _hyper_2_4_chunk.v3 + Sort Key: _hyper_2_4_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_4_chunk (actual rows=0 loops=1) + Output: _hyper_2_4_chunk."time", _hyper_2_4_chunk.device_id, _hyper_2_4_chunk.device_id_peer, _hyper_2_4_chunk.v0, _hyper_2_4_chunk.v1, _hyper_2_4_chunk.v2, _hyper_2_4_chunk.v3 + Bulk Decompression: true + -> Index Scan using compress_hyper_6_17_chunk__compressed_hypertable_6_device_id_pe on _timescaledb_internal.compress_hyper_6_17_chunk (actual rows=0 loops=1) + Output: compress_hyper_6_17_chunk."time", compress_hyper_6_17_chunk.device_id, compress_hyper_6_17_chunk.device_id_peer, compress_hyper_6_17_chunk.v0, compress_hyper_6_17_chunk.v1, compress_hyper_6_17_chunk.v2, compress_hyper_6_17_chunk.v3, compress_hyper_6_17_chunk._ts_meta_count, compress_hyper_6_17_chunk._ts_meta_sequence_num, compress_hyper_6_17_chunk._ts_meta_min_3, compress_hyper_6_17_chunk._ts_meta_max_3, compress_hyper_6_17_chunk._ts_meta_min_1, compress_hyper_6_17_chunk._ts_meta_max_1, compress_hyper_6_17_chunk._ts_meta_min_2, compress_hyper_6_17_chunk._ts_meta_max_2 + Index Cond: (compress_hyper_6_17_chunk.device_id_peer = 1) + -> Sort (actual rows=0 loops=1) + Output: _hyper_2_5_chunk."time", _hyper_2_5_chunk.device_id, _hyper_2_5_chunk.device_id_peer, _hyper_2_5_chunk.v0, _hyper_2_5_chunk.v1, _hyper_2_5_chunk.v2, _hyper_2_5_chunk.v3 + Sort Key: _hyper_2_5_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_5_chunk (actual rows=0 loops=1) + Output: _hyper_2_5_chunk."time", _hyper_2_5_chunk.device_id, _hyper_2_5_chunk.device_id_peer, _hyper_2_5_chunk.v0, _hyper_2_5_chunk.v1, _hyper_2_5_chunk.v2, _hyper_2_5_chunk.v3 + Bulk Decompression: true + -> Index Scan using compress_hyper_6_18_chunk__compressed_hypertable_6_device_id_pe on _timescaledb_internal.compress_hyper_6_18_chunk (actual rows=0 loops=1) + Output: compress_hyper_6_18_chunk."time", compress_hyper_6_18_chunk.device_id, compress_hyper_6_18_chunk.device_id_peer, compress_hyper_6_18_chunk.v0, compress_hyper_6_18_chunk.v1, compress_hyper_6_18_chunk.v2, compress_hyper_6_18_chunk.v3, compress_hyper_6_18_chunk._ts_meta_count, compress_hyper_6_18_chunk._ts_meta_sequence_num, compress_hyper_6_18_chunk._ts_meta_min_3, compress_hyper_6_18_chunk._ts_meta_max_3, compress_hyper_6_18_chunk._ts_meta_min_1, compress_hyper_6_18_chunk._ts_meta_max_1, compress_hyper_6_18_chunk._ts_meta_min_2, compress_hyper_6_18_chunk._ts_meta_max_2 + Index Cond: (compress_hyper_6_18_chunk.device_id_peer = 1) + -> Sort (actual rows=0 loops=1) + Output: _hyper_2_6_chunk."time", _hyper_2_6_chunk.device_id, _hyper_2_6_chunk.device_id_peer, _hyper_2_6_chunk.v0, _hyper_2_6_chunk.v1, _hyper_2_6_chunk.v2, _hyper_2_6_chunk.v3 + Sort Key: _hyper_2_6_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_6_chunk (actual rows=0 loops=1) + Output: _hyper_2_6_chunk."time", _hyper_2_6_chunk.device_id, _hyper_2_6_chunk.device_id_peer, _hyper_2_6_chunk.v0, _hyper_2_6_chunk.v1, _hyper_2_6_chunk.v2, _hyper_2_6_chunk.v3 + Bulk Decompression: true + -> Index Scan using compress_hyper_6_19_chunk__compressed_hypertable_6_device_id_pe on _timescaledb_internal.compress_hyper_6_19_chunk (actual rows=0 loops=1) + Output: compress_hyper_6_19_chunk."time", compress_hyper_6_19_chunk.device_id, compress_hyper_6_19_chunk.device_id_peer, compress_hyper_6_19_chunk.v0, compress_hyper_6_19_chunk.v1, compress_hyper_6_19_chunk.v2, compress_hyper_6_19_chunk.v3, compress_hyper_6_19_chunk._ts_meta_count, compress_hyper_6_19_chunk._ts_meta_sequence_num, compress_hyper_6_19_chunk._ts_meta_min_3, compress_hyper_6_19_chunk._ts_meta_max_3, compress_hyper_6_19_chunk._ts_meta_min_1, compress_hyper_6_19_chunk._ts_meta_max_1, compress_hyper_6_19_chunk._ts_meta_min_2, compress_hyper_6_19_chunk._ts_meta_max_2 + Index Cond: (compress_hyper_6_19_chunk.device_id_peer = 1) + -> Index Scan Backward using _hyper_2_7_chunk_metrics_space_time_idx on _timescaledb_internal._hyper_2_7_chunk (actual rows=0 loops=1) + Output: _hyper_2_7_chunk."time", _hyper_2_7_chunk.device_id, _hyper_2_7_chunk.device_id_peer, _hyper_2_7_chunk.v0, _hyper_2_7_chunk.v1, _hyper_2_7_chunk.v2, _hyper_2_7_chunk.v3 + Filter: (_hyper_2_7_chunk.device_id_peer = 1) + Rows Removed by Filter: 504 + -> Index Scan Backward using _hyper_2_8_chunk_metrics_space_time_idx on _timescaledb_internal._hyper_2_8_chunk (actual rows=0 loops=1) + Output: _hyper_2_8_chunk."time", _hyper_2_8_chunk.device_id, _hyper_2_8_chunk.device_id_peer, _hyper_2_8_chunk.v0, _hyper_2_8_chunk.v1, _hyper_2_8_chunk.v2, _hyper_2_8_chunk.v3 + Filter: (_hyper_2_8_chunk.device_id_peer = 1) + Rows Removed by Filter: 1512 + -> Index Scan Backward using _hyper_2_9_chunk_metrics_space_time_idx on _timescaledb_internal._hyper_2_9_chunk (actual rows=0 loops=1) + Output: _hyper_2_9_chunk."time", _hyper_2_9_chunk.device_id, _hyper_2_9_chunk.device_id_peer, _hyper_2_9_chunk.v0, _hyper_2_9_chunk.v1, _hyper_2_9_chunk.v2, _hyper_2_9_chunk.v3 + Filter: (_hyper_2_9_chunk.device_id_peer = 1) + Rows Removed by Filter: 504 + -> Sort (actual rows=0 loops=1) + Output: _hyper_2_10_chunk."time", _hyper_2_10_chunk.device_id, _hyper_2_10_chunk.device_id_peer, _hyper_2_10_chunk.v0, _hyper_2_10_chunk.v1, _hyper_2_10_chunk.v2, _hyper_2_10_chunk.v3 + Sort Key: _hyper_2_10_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_10_chunk (actual rows=0 loops=1) + Output: _hyper_2_10_chunk."time", _hyper_2_10_chunk.device_id, _hyper_2_10_chunk.device_id_peer, _hyper_2_10_chunk.v0, _hyper_2_10_chunk.v1, _hyper_2_10_chunk.v2, _hyper_2_10_chunk.v3 + Bulk Decompression: true + -> Index Scan using compress_hyper_6_20_chunk__compressed_hypertable_6_device_id_pe on _timescaledb_internal.compress_hyper_6_20_chunk (actual rows=0 loops=1) + Output: compress_hyper_6_20_chunk."time", compress_hyper_6_20_chunk.device_id, compress_hyper_6_20_chunk.device_id_peer, compress_hyper_6_20_chunk.v0, compress_hyper_6_20_chunk.v1, compress_hyper_6_20_chunk.v2, compress_hyper_6_20_chunk.v3, compress_hyper_6_20_chunk._ts_meta_count, compress_hyper_6_20_chunk._ts_meta_sequence_num, compress_hyper_6_20_chunk._ts_meta_min_3, compress_hyper_6_20_chunk._ts_meta_max_3, compress_hyper_6_20_chunk._ts_meta_min_1, compress_hyper_6_20_chunk._ts_meta_max_1, compress_hyper_6_20_chunk._ts_meta_min_2, compress_hyper_6_20_chunk._ts_meta_max_2 + Index Cond: (compress_hyper_6_20_chunk.device_id_peer = 1) + -> Sort (actual rows=0 loops=1) + Output: _hyper_2_11_chunk."time", _hyper_2_11_chunk.device_id, _hyper_2_11_chunk.device_id_peer, _hyper_2_11_chunk.v0, _hyper_2_11_chunk.v1, _hyper_2_11_chunk.v2, _hyper_2_11_chunk.v3 + Sort Key: _hyper_2_11_chunk."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_11_chunk (actual rows=0 loops=1) + Output: _hyper_2_11_chunk."time", _hyper_2_11_chunk.device_id, _hyper_2_11_chunk.device_id_peer, _hyper_2_11_chunk.v0, _hyper_2_11_chunk.v1, _hyper_2_11_chunk.v2, _hyper_2_11_chunk.v3 + Bulk Decompression: true + -> Index Scan using compress_hyper_6_21_chunk__compressed_hypertable_6_device_id_pe on _timescaledb_internal.compress_hyper_6_21_chunk (actual rows=0 loops=1) + Output: compress_hyper_6_21_chunk."time", compress_hyper_6_21_chunk.device_id, compress_hyper_6_21_chunk.device_id_peer, compress_hyper_6_21_chunk.v0, compress_hyper_6_21_chunk.v1, compress_hyper_6_21_chunk.v2, compress_hyper_6_21_chunk.v3, compress_hyper_6_21_chunk._ts_meta_count, compress_hyper_6_21_chunk._ts_meta_sequence_num, compress_hyper_6_21_chunk._ts_meta_min_3, compress_hyper_6_21_chunk._ts_meta_max_3, compress_hyper_6_21_chunk._ts_meta_min_1, compress_hyper_6_21_chunk._ts_meta_max_1, compress_hyper_6_21_chunk._ts_meta_min_2, compress_hyper_6_21_chunk._ts_meta_max_2 + Index Cond: (compress_hyper_6_21_chunk.device_id_peer = 1) + -> Index Scan Backward using _hyper_2_12_chunk_metrics_space_time_idx on _timescaledb_internal._hyper_2_12_chunk (actual rows=0 loops=1) + Output: _hyper_2_12_chunk."time", _hyper_2_12_chunk.device_id, _hyper_2_12_chunk.device_id_peer, _hyper_2_12_chunk.v0, _hyper_2_12_chunk.v1, _hyper_2_12_chunk.v2, _hyper_2_12_chunk.v3 + Filter: (_hyper_2_12_chunk.device_id_peer = 1) + Rows Removed by Filter: 504 +(68 rows) + +:PREFIX_VERBOSE +SELECT device_id_peer +FROM :TEST_TABLE +WHERE device_id_peer = 1 +ORDER BY device_id_peer; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Append (actual rows=0 loops=1) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_4_chunk (actual rows=0 loops=1) + Output: _hyper_2_4_chunk.device_id_peer + Bulk Decompression: false + -> Index Scan using compress_hyper_6_17_chunk__compressed_hypertable_6_device_id_pe on _timescaledb_internal.compress_hyper_6_17_chunk (actual rows=0 loops=1) + Output: compress_hyper_6_17_chunk."time", compress_hyper_6_17_chunk.device_id, compress_hyper_6_17_chunk.device_id_peer, compress_hyper_6_17_chunk.v0, compress_hyper_6_17_chunk.v1, compress_hyper_6_17_chunk.v2, compress_hyper_6_17_chunk.v3, compress_hyper_6_17_chunk._ts_meta_count, compress_hyper_6_17_chunk._ts_meta_sequence_num, compress_hyper_6_17_chunk._ts_meta_min_3, compress_hyper_6_17_chunk._ts_meta_max_3, compress_hyper_6_17_chunk._ts_meta_min_1, compress_hyper_6_17_chunk._ts_meta_max_1, compress_hyper_6_17_chunk._ts_meta_min_2, compress_hyper_6_17_chunk._ts_meta_max_2 + Index Cond: (compress_hyper_6_17_chunk.device_id_peer = 1) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_5_chunk (actual rows=0 loops=1) + Output: _hyper_2_5_chunk.device_id_peer + Bulk Decompression: false + -> Index Scan using compress_hyper_6_18_chunk__compressed_hypertable_6_device_id_pe on _timescaledb_internal.compress_hyper_6_18_chunk (actual rows=0 loops=1) + Output: compress_hyper_6_18_chunk."time", compress_hyper_6_18_chunk.device_id, compress_hyper_6_18_chunk.device_id_peer, compress_hyper_6_18_chunk.v0, compress_hyper_6_18_chunk.v1, compress_hyper_6_18_chunk.v2, compress_hyper_6_18_chunk.v3, compress_hyper_6_18_chunk._ts_meta_count, compress_hyper_6_18_chunk._ts_meta_sequence_num, compress_hyper_6_18_chunk._ts_meta_min_3, compress_hyper_6_18_chunk._ts_meta_max_3, compress_hyper_6_18_chunk._ts_meta_min_1, compress_hyper_6_18_chunk._ts_meta_max_1, compress_hyper_6_18_chunk._ts_meta_min_2, compress_hyper_6_18_chunk._ts_meta_max_2 + Index Cond: (compress_hyper_6_18_chunk.device_id_peer = 1) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_6_chunk (actual rows=0 loops=1) + Output: _hyper_2_6_chunk.device_id_peer + Bulk Decompression: false + -> Index Scan using compress_hyper_6_19_chunk__compressed_hypertable_6_device_id_pe on _timescaledb_internal.compress_hyper_6_19_chunk (actual rows=0 loops=1) + Output: compress_hyper_6_19_chunk."time", compress_hyper_6_19_chunk.device_id, compress_hyper_6_19_chunk.device_id_peer, compress_hyper_6_19_chunk.v0, compress_hyper_6_19_chunk.v1, compress_hyper_6_19_chunk.v2, compress_hyper_6_19_chunk.v3, compress_hyper_6_19_chunk._ts_meta_count, compress_hyper_6_19_chunk._ts_meta_sequence_num, compress_hyper_6_19_chunk._ts_meta_min_3, compress_hyper_6_19_chunk._ts_meta_max_3, compress_hyper_6_19_chunk._ts_meta_min_1, compress_hyper_6_19_chunk._ts_meta_max_1, compress_hyper_6_19_chunk._ts_meta_min_2, compress_hyper_6_19_chunk._ts_meta_max_2 + Index Cond: (compress_hyper_6_19_chunk.device_id_peer = 1) + -> Index Only Scan using _hyper_2_7_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _timescaledb_internal._hyper_2_7_chunk (actual rows=0 loops=1) + Output: _hyper_2_7_chunk.device_id_peer + Index Cond: (_hyper_2_7_chunk.device_id_peer = 1) + Heap Fetches: 0 + -> Index Only Scan using _hyper_2_8_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _timescaledb_internal._hyper_2_8_chunk (actual rows=0 loops=1) + Output: _hyper_2_8_chunk.device_id_peer + Index Cond: (_hyper_2_8_chunk.device_id_peer = 1) + Heap Fetches: 0 + -> Index Only Scan using _hyper_2_9_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _timescaledb_internal._hyper_2_9_chunk (actual rows=0 loops=1) + Output: _hyper_2_9_chunk.device_id_peer + Index Cond: (_hyper_2_9_chunk.device_id_peer = 1) + Heap Fetches: 0 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_10_chunk (actual rows=0 loops=1) + Output: _hyper_2_10_chunk.device_id_peer + Bulk Decompression: false + -> Index Scan using compress_hyper_6_20_chunk__compressed_hypertable_6_device_id_pe on _timescaledb_internal.compress_hyper_6_20_chunk (actual rows=0 loops=1) + Output: compress_hyper_6_20_chunk."time", compress_hyper_6_20_chunk.device_id, compress_hyper_6_20_chunk.device_id_peer, compress_hyper_6_20_chunk.v0, compress_hyper_6_20_chunk.v1, compress_hyper_6_20_chunk.v2, compress_hyper_6_20_chunk.v3, compress_hyper_6_20_chunk._ts_meta_count, compress_hyper_6_20_chunk._ts_meta_sequence_num, compress_hyper_6_20_chunk._ts_meta_min_3, compress_hyper_6_20_chunk._ts_meta_max_3, compress_hyper_6_20_chunk._ts_meta_min_1, compress_hyper_6_20_chunk._ts_meta_max_1, compress_hyper_6_20_chunk._ts_meta_min_2, compress_hyper_6_20_chunk._ts_meta_max_2 + Index Cond: (compress_hyper_6_20_chunk.device_id_peer = 1) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_11_chunk (actual rows=0 loops=1) + Output: _hyper_2_11_chunk.device_id_peer + Bulk Decompression: false + -> Index Scan using compress_hyper_6_21_chunk__compressed_hypertable_6_device_id_pe on _timescaledb_internal.compress_hyper_6_21_chunk (actual rows=0 loops=1) + Output: compress_hyper_6_21_chunk."time", compress_hyper_6_21_chunk.device_id, compress_hyper_6_21_chunk.device_id_peer, compress_hyper_6_21_chunk.v0, compress_hyper_6_21_chunk.v1, compress_hyper_6_21_chunk.v2, compress_hyper_6_21_chunk.v3, compress_hyper_6_21_chunk._ts_meta_count, compress_hyper_6_21_chunk._ts_meta_sequence_num, compress_hyper_6_21_chunk._ts_meta_min_3, compress_hyper_6_21_chunk._ts_meta_max_3, compress_hyper_6_21_chunk._ts_meta_min_1, compress_hyper_6_21_chunk._ts_meta_max_1, compress_hyper_6_21_chunk._ts_meta_min_2, compress_hyper_6_21_chunk._ts_meta_max_2 + Index Cond: (compress_hyper_6_21_chunk.device_id_peer = 1) + -> Index Only Scan using _hyper_2_12_chunk_metrics_space_device_id_device_id_peer_v0_v_2 on _timescaledb_internal._hyper_2_12_chunk (actual rows=0 loops=1) + Output: _hyper_2_12_chunk.device_id_peer + Index Cond: (_hyper_2_12_chunk.device_id_peer = 1) + Heap Fetches: 0 +(47 rows) + +--ensure that we can get a nested loop +SET enable_seqscan TO TRUE; +SET enable_hashjoin TO FALSE; +:PREFIX_VERBOSE +SELECT device_id_peer +FROM :TEST_TABLE +WHERE device_id_peer IN ( + VALUES (1)); + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Append (actual rows=0 loops=1) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_4_chunk (actual rows=0 loops=1) + Output: _hyper_2_4_chunk.device_id_peer + Bulk Decompression: false + -> Seq Scan on _timescaledb_internal.compress_hyper_6_17_chunk (actual rows=0 loops=1) + Output: compress_hyper_6_17_chunk."time", compress_hyper_6_17_chunk.device_id, compress_hyper_6_17_chunk.device_id_peer, compress_hyper_6_17_chunk.v0, compress_hyper_6_17_chunk.v1, compress_hyper_6_17_chunk.v2, compress_hyper_6_17_chunk.v3, compress_hyper_6_17_chunk._ts_meta_count, compress_hyper_6_17_chunk._ts_meta_sequence_num, compress_hyper_6_17_chunk._ts_meta_min_3, compress_hyper_6_17_chunk._ts_meta_max_3, compress_hyper_6_17_chunk._ts_meta_min_1, compress_hyper_6_17_chunk._ts_meta_max_1, compress_hyper_6_17_chunk._ts_meta_min_2, compress_hyper_6_17_chunk._ts_meta_max_2 + Filter: (compress_hyper_6_17_chunk.device_id_peer = 1) + Rows Removed by Filter: 1 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_5_chunk (actual rows=0 loops=1) + Output: _hyper_2_5_chunk.device_id_peer + Bulk Decompression: false + -> Seq Scan on _timescaledb_internal.compress_hyper_6_18_chunk (actual rows=0 loops=1) + Output: compress_hyper_6_18_chunk."time", compress_hyper_6_18_chunk.device_id, compress_hyper_6_18_chunk.device_id_peer, compress_hyper_6_18_chunk.v0, compress_hyper_6_18_chunk.v1, compress_hyper_6_18_chunk.v2, compress_hyper_6_18_chunk.v3, compress_hyper_6_18_chunk._ts_meta_count, compress_hyper_6_18_chunk._ts_meta_sequence_num, compress_hyper_6_18_chunk._ts_meta_min_3, compress_hyper_6_18_chunk._ts_meta_max_3, compress_hyper_6_18_chunk._ts_meta_min_1, compress_hyper_6_18_chunk._ts_meta_max_1, compress_hyper_6_18_chunk._ts_meta_min_2, compress_hyper_6_18_chunk._ts_meta_max_2 + Filter: (compress_hyper_6_18_chunk.device_id_peer = 1) + Rows Removed by Filter: 3 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_6_chunk (actual rows=0 loops=1) + Output: _hyper_2_6_chunk.device_id_peer + Bulk Decompression: false + -> Seq Scan on _timescaledb_internal.compress_hyper_6_19_chunk (actual rows=0 loops=1) + Output: compress_hyper_6_19_chunk."time", compress_hyper_6_19_chunk.device_id, compress_hyper_6_19_chunk.device_id_peer, compress_hyper_6_19_chunk.v0, compress_hyper_6_19_chunk.v1, compress_hyper_6_19_chunk.v2, compress_hyper_6_19_chunk.v3, compress_hyper_6_19_chunk._ts_meta_count, compress_hyper_6_19_chunk._ts_meta_sequence_num, compress_hyper_6_19_chunk._ts_meta_min_3, compress_hyper_6_19_chunk._ts_meta_max_3, compress_hyper_6_19_chunk._ts_meta_min_1, compress_hyper_6_19_chunk._ts_meta_max_1, compress_hyper_6_19_chunk._ts_meta_min_2, compress_hyper_6_19_chunk._ts_meta_max_2 + Filter: (compress_hyper_6_19_chunk.device_id_peer = 1) + Rows Removed by Filter: 1 + -> Index Only Scan using _hyper_2_7_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _timescaledb_internal._hyper_2_7_chunk (actual rows=0 loops=1) + Output: _hyper_2_7_chunk.device_id_peer + Index Cond: (_hyper_2_7_chunk.device_id_peer = 1) + Heap Fetches: 0 + -> Index Only Scan using _hyper_2_8_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _timescaledb_internal._hyper_2_8_chunk (actual rows=0 loops=1) + Output: _hyper_2_8_chunk.device_id_peer + Index Cond: (_hyper_2_8_chunk.device_id_peer = 1) + Heap Fetches: 0 + -> Index Only Scan using _hyper_2_9_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _timescaledb_internal._hyper_2_9_chunk (actual rows=0 loops=1) + Output: _hyper_2_9_chunk.device_id_peer + Index Cond: (_hyper_2_9_chunk.device_id_peer = 1) + Heap Fetches: 0 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_10_chunk (actual rows=0 loops=1) + Output: _hyper_2_10_chunk.device_id_peer + Bulk Decompression: false + -> Seq Scan on _timescaledb_internal.compress_hyper_6_20_chunk (actual rows=0 loops=1) + Output: compress_hyper_6_20_chunk."time", compress_hyper_6_20_chunk.device_id, compress_hyper_6_20_chunk.device_id_peer, compress_hyper_6_20_chunk.v0, compress_hyper_6_20_chunk.v1, compress_hyper_6_20_chunk.v2, compress_hyper_6_20_chunk.v3, compress_hyper_6_20_chunk._ts_meta_count, compress_hyper_6_20_chunk._ts_meta_sequence_num, compress_hyper_6_20_chunk._ts_meta_min_3, compress_hyper_6_20_chunk._ts_meta_max_3, compress_hyper_6_20_chunk._ts_meta_min_1, compress_hyper_6_20_chunk._ts_meta_max_1, compress_hyper_6_20_chunk._ts_meta_min_2, compress_hyper_6_20_chunk._ts_meta_max_2 + Filter: (compress_hyper_6_20_chunk.device_id_peer = 1) + Rows Removed by Filter: 1 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_11_chunk (actual rows=0 loops=1) + Output: _hyper_2_11_chunk.device_id_peer + Bulk Decompression: false + -> Seq Scan on _timescaledb_internal.compress_hyper_6_21_chunk (actual rows=0 loops=1) + Output: compress_hyper_6_21_chunk."time", compress_hyper_6_21_chunk.device_id, compress_hyper_6_21_chunk.device_id_peer, compress_hyper_6_21_chunk.v0, compress_hyper_6_21_chunk.v1, compress_hyper_6_21_chunk.v2, compress_hyper_6_21_chunk.v3, compress_hyper_6_21_chunk._ts_meta_count, compress_hyper_6_21_chunk._ts_meta_sequence_num, compress_hyper_6_21_chunk._ts_meta_min_3, compress_hyper_6_21_chunk._ts_meta_max_3, compress_hyper_6_21_chunk._ts_meta_min_1, compress_hyper_6_21_chunk._ts_meta_max_1, compress_hyper_6_21_chunk._ts_meta_min_2, compress_hyper_6_21_chunk._ts_meta_max_2 + Filter: (compress_hyper_6_21_chunk.device_id_peer = 1) + Rows Removed by Filter: 3 + -> Index Only Scan using _hyper_2_12_chunk_metrics_space_device_id_device_id_peer_v0_v_2 on _timescaledb_internal._hyper_2_12_chunk (actual rows=0 loops=1) + Output: _hyper_2_12_chunk.device_id_peer + Index Cond: (_hyper_2_12_chunk.device_id_peer = 1) + Heap Fetches: 0 +(52 rows) + +--with multiple values can get a nested loop. +:PREFIX_VERBOSE +SELECT device_id_peer +FROM :TEST_TABLE +WHERE device_id_peer IN ( + VALUES (1), + (2)); + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Nested Loop Semi Join (actual rows=0 loops=1) + Output: _hyper_2_4_chunk.device_id_peer + Join Filter: (_hyper_2_4_chunk.device_id_peer = "*VALUES*".column1) + Rows Removed by Join Filter: 13680 + -> Append (actual rows=6840 loops=1) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_4_chunk (actual rows=360 loops=1) + Output: _hyper_2_4_chunk.device_id_peer + Bulk Decompression: false + -> Seq Scan on _timescaledb_internal.compress_hyper_6_17_chunk (actual rows=1 loops=1) + Output: compress_hyper_6_17_chunk."time", compress_hyper_6_17_chunk.device_id, compress_hyper_6_17_chunk.device_id_peer, compress_hyper_6_17_chunk.v0, compress_hyper_6_17_chunk.v1, compress_hyper_6_17_chunk.v2, compress_hyper_6_17_chunk.v3, compress_hyper_6_17_chunk._ts_meta_count, compress_hyper_6_17_chunk._ts_meta_sequence_num, compress_hyper_6_17_chunk._ts_meta_min_3, compress_hyper_6_17_chunk._ts_meta_max_3, compress_hyper_6_17_chunk._ts_meta_min_1, compress_hyper_6_17_chunk._ts_meta_max_1, compress_hyper_6_17_chunk._ts_meta_min_2, compress_hyper_6_17_chunk._ts_meta_max_2 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_5_chunk (actual rows=1080 loops=1) + Output: _hyper_2_5_chunk.device_id_peer + Bulk Decompression: false + -> Seq Scan on _timescaledb_internal.compress_hyper_6_18_chunk (actual rows=3 loops=1) + Output: compress_hyper_6_18_chunk."time", compress_hyper_6_18_chunk.device_id, compress_hyper_6_18_chunk.device_id_peer, compress_hyper_6_18_chunk.v0, compress_hyper_6_18_chunk.v1, compress_hyper_6_18_chunk.v2, compress_hyper_6_18_chunk.v3, compress_hyper_6_18_chunk._ts_meta_count, compress_hyper_6_18_chunk._ts_meta_sequence_num, compress_hyper_6_18_chunk._ts_meta_min_3, compress_hyper_6_18_chunk._ts_meta_max_3, compress_hyper_6_18_chunk._ts_meta_min_1, compress_hyper_6_18_chunk._ts_meta_max_1, compress_hyper_6_18_chunk._ts_meta_min_2, compress_hyper_6_18_chunk._ts_meta_max_2 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_6_chunk (actual rows=360 loops=1) + Output: _hyper_2_6_chunk.device_id_peer + Bulk Decompression: false + -> Seq Scan on _timescaledb_internal.compress_hyper_6_19_chunk (actual rows=1 loops=1) + Output: compress_hyper_6_19_chunk."time", compress_hyper_6_19_chunk.device_id, compress_hyper_6_19_chunk.device_id_peer, compress_hyper_6_19_chunk.v0, compress_hyper_6_19_chunk.v1, compress_hyper_6_19_chunk.v2, compress_hyper_6_19_chunk.v3, compress_hyper_6_19_chunk._ts_meta_count, compress_hyper_6_19_chunk._ts_meta_sequence_num, compress_hyper_6_19_chunk._ts_meta_min_3, compress_hyper_6_19_chunk._ts_meta_max_3, compress_hyper_6_19_chunk._ts_meta_min_1, compress_hyper_6_19_chunk._ts_meta_max_1, compress_hyper_6_19_chunk._ts_meta_min_2, compress_hyper_6_19_chunk._ts_meta_max_2 + -> Seq Scan on _timescaledb_internal._hyper_2_7_chunk (actual rows=504 loops=1) + Output: _hyper_2_7_chunk.device_id_peer + -> Seq Scan on _timescaledb_internal._hyper_2_8_chunk (actual rows=1512 loops=1) + Output: _hyper_2_8_chunk.device_id_peer + -> Seq Scan on _timescaledb_internal._hyper_2_9_chunk (actual rows=504 loops=1) + Output: _hyper_2_9_chunk.device_id_peer + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_10_chunk (actual rows=504 loops=1) + Output: _hyper_2_10_chunk.device_id_peer + Bulk Decompression: false + -> Seq Scan on _timescaledb_internal.compress_hyper_6_20_chunk (actual rows=1 loops=1) + Output: compress_hyper_6_20_chunk."time", compress_hyper_6_20_chunk.device_id, compress_hyper_6_20_chunk.device_id_peer, compress_hyper_6_20_chunk.v0, compress_hyper_6_20_chunk.v1, compress_hyper_6_20_chunk.v2, compress_hyper_6_20_chunk.v3, compress_hyper_6_20_chunk._ts_meta_count, compress_hyper_6_20_chunk._ts_meta_sequence_num, compress_hyper_6_20_chunk._ts_meta_min_3, compress_hyper_6_20_chunk._ts_meta_max_3, compress_hyper_6_20_chunk._ts_meta_min_1, compress_hyper_6_20_chunk._ts_meta_max_1, compress_hyper_6_20_chunk._ts_meta_min_2, compress_hyper_6_20_chunk._ts_meta_max_2 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_11_chunk (actual rows=1512 loops=1) + Output: _hyper_2_11_chunk.device_id_peer + Bulk Decompression: false + -> Seq Scan on _timescaledb_internal.compress_hyper_6_21_chunk (actual rows=3 loops=1) + Output: compress_hyper_6_21_chunk."time", compress_hyper_6_21_chunk.device_id, compress_hyper_6_21_chunk.device_id_peer, compress_hyper_6_21_chunk.v0, compress_hyper_6_21_chunk.v1, compress_hyper_6_21_chunk.v2, compress_hyper_6_21_chunk.v3, compress_hyper_6_21_chunk._ts_meta_count, compress_hyper_6_21_chunk._ts_meta_sequence_num, compress_hyper_6_21_chunk._ts_meta_min_3, compress_hyper_6_21_chunk._ts_meta_max_3, compress_hyper_6_21_chunk._ts_meta_min_1, compress_hyper_6_21_chunk._ts_meta_max_1, compress_hyper_6_21_chunk._ts_meta_min_2, compress_hyper_6_21_chunk._ts_meta_max_2 + -> Seq Scan on _timescaledb_internal._hyper_2_12_chunk (actual rows=504 loops=1) + Output: _hyper_2_12_chunk.device_id_peer + -> Materialize (actual rows=2 loops=6840) + Output: "*VALUES*".column1 + -> Values Scan on "*VALUES*" (actual rows=2 loops=1) + Output: "*VALUES*".column1 +(42 rows) + +RESET enable_hashjoin; +:PREFIX_VERBOSE +SELECT device_id_peer +FROM :TEST_TABLE +WHERE device_id IN ( + VALUES (1)); + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Append (actual rows=1368 loops=1) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_4_chunk (actual rows=360 loops=1) + Output: _hyper_2_4_chunk.device_id_peer + Bulk Decompression: false + -> Seq Scan on _timescaledb_internal.compress_hyper_6_17_chunk (actual rows=1 loops=1) + Output: compress_hyper_6_17_chunk."time", compress_hyper_6_17_chunk.device_id, compress_hyper_6_17_chunk.device_id_peer, compress_hyper_6_17_chunk.v0, compress_hyper_6_17_chunk.v1, compress_hyper_6_17_chunk.v2, compress_hyper_6_17_chunk.v3, compress_hyper_6_17_chunk._ts_meta_count, compress_hyper_6_17_chunk._ts_meta_sequence_num, compress_hyper_6_17_chunk._ts_meta_min_3, compress_hyper_6_17_chunk._ts_meta_max_3, compress_hyper_6_17_chunk._ts_meta_min_1, compress_hyper_6_17_chunk._ts_meta_max_1, compress_hyper_6_17_chunk._ts_meta_min_2, compress_hyper_6_17_chunk._ts_meta_max_2 + Filter: (compress_hyper_6_17_chunk.device_id = 1) + -> Seq Scan on _timescaledb_internal._hyper_2_7_chunk (actual rows=504 loops=1) + Output: _hyper_2_7_chunk.device_id_peer + Filter: (_hyper_2_7_chunk.device_id = 1) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_10_chunk (actual rows=504 loops=1) + Output: _hyper_2_10_chunk.device_id_peer + Bulk Decompression: false + -> Seq Scan on _timescaledb_internal.compress_hyper_6_20_chunk (actual rows=1 loops=1) + Output: compress_hyper_6_20_chunk."time", compress_hyper_6_20_chunk.device_id, compress_hyper_6_20_chunk.device_id_peer, compress_hyper_6_20_chunk.v0, compress_hyper_6_20_chunk.v1, compress_hyper_6_20_chunk.v2, compress_hyper_6_20_chunk.v3, compress_hyper_6_20_chunk._ts_meta_count, compress_hyper_6_20_chunk._ts_meta_sequence_num, compress_hyper_6_20_chunk._ts_meta_min_3, compress_hyper_6_20_chunk._ts_meta_max_3, compress_hyper_6_20_chunk._ts_meta_min_1, compress_hyper_6_20_chunk._ts_meta_max_1, compress_hyper_6_20_chunk._ts_meta_min_2, compress_hyper_6_20_chunk._ts_meta_max_2 + Filter: (compress_hyper_6_20_chunk.device_id = 1) +(16 rows) + +--with multiple values can get a semi-join or nested loop depending on seq_page_cost. +:PREFIX_VERBOSE +SELECT device_id_peer +FROM :TEST_TABLE +WHERE device_id IN ( + VALUES (1), + (2)); + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Hash Semi Join (actual rows=2736 loops=1) + Output: _hyper_2_4_chunk.device_id_peer + Hash Cond: (_hyper_2_4_chunk.device_id = "*VALUES*".column1) + -> Append (actual rows=6840 loops=1) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_4_chunk (actual rows=360 loops=1) + Output: _hyper_2_4_chunk.device_id_peer, _hyper_2_4_chunk.device_id + Bulk Decompression: false + -> Seq Scan on _timescaledb_internal.compress_hyper_6_17_chunk (actual rows=1 loops=1) + Output: compress_hyper_6_17_chunk."time", compress_hyper_6_17_chunk.device_id, compress_hyper_6_17_chunk.device_id_peer, compress_hyper_6_17_chunk.v0, compress_hyper_6_17_chunk.v1, compress_hyper_6_17_chunk.v2, compress_hyper_6_17_chunk.v3, compress_hyper_6_17_chunk._ts_meta_count, compress_hyper_6_17_chunk._ts_meta_sequence_num, compress_hyper_6_17_chunk._ts_meta_min_3, compress_hyper_6_17_chunk._ts_meta_max_3, compress_hyper_6_17_chunk._ts_meta_min_1, compress_hyper_6_17_chunk._ts_meta_max_1, compress_hyper_6_17_chunk._ts_meta_min_2, compress_hyper_6_17_chunk._ts_meta_max_2 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_5_chunk (actual rows=1080 loops=1) + Output: _hyper_2_5_chunk.device_id_peer, _hyper_2_5_chunk.device_id + Bulk Decompression: false + -> Seq Scan on _timescaledb_internal.compress_hyper_6_18_chunk (actual rows=3 loops=1) + Output: compress_hyper_6_18_chunk."time", compress_hyper_6_18_chunk.device_id, compress_hyper_6_18_chunk.device_id_peer, compress_hyper_6_18_chunk.v0, compress_hyper_6_18_chunk.v1, compress_hyper_6_18_chunk.v2, compress_hyper_6_18_chunk.v3, compress_hyper_6_18_chunk._ts_meta_count, compress_hyper_6_18_chunk._ts_meta_sequence_num, compress_hyper_6_18_chunk._ts_meta_min_3, compress_hyper_6_18_chunk._ts_meta_max_3, compress_hyper_6_18_chunk._ts_meta_min_1, compress_hyper_6_18_chunk._ts_meta_max_1, compress_hyper_6_18_chunk._ts_meta_min_2, compress_hyper_6_18_chunk._ts_meta_max_2 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_6_chunk (actual rows=360 loops=1) + Output: _hyper_2_6_chunk.device_id_peer, _hyper_2_6_chunk.device_id + Bulk Decompression: false + -> Seq Scan on _timescaledb_internal.compress_hyper_6_19_chunk (actual rows=1 loops=1) + Output: compress_hyper_6_19_chunk."time", compress_hyper_6_19_chunk.device_id, compress_hyper_6_19_chunk.device_id_peer, compress_hyper_6_19_chunk.v0, compress_hyper_6_19_chunk.v1, compress_hyper_6_19_chunk.v2, compress_hyper_6_19_chunk.v3, compress_hyper_6_19_chunk._ts_meta_count, compress_hyper_6_19_chunk._ts_meta_sequence_num, compress_hyper_6_19_chunk._ts_meta_min_3, compress_hyper_6_19_chunk._ts_meta_max_3, compress_hyper_6_19_chunk._ts_meta_min_1, compress_hyper_6_19_chunk._ts_meta_max_1, compress_hyper_6_19_chunk._ts_meta_min_2, compress_hyper_6_19_chunk._ts_meta_max_2 + -> Seq Scan on _timescaledb_internal._hyper_2_7_chunk (actual rows=504 loops=1) + Output: _hyper_2_7_chunk.device_id_peer, _hyper_2_7_chunk.device_id + -> Seq Scan on _timescaledb_internal._hyper_2_8_chunk (actual rows=1512 loops=1) + Output: _hyper_2_8_chunk.device_id_peer, _hyper_2_8_chunk.device_id + -> Seq Scan on _timescaledb_internal._hyper_2_9_chunk (actual rows=504 loops=1) + Output: _hyper_2_9_chunk.device_id_peer, _hyper_2_9_chunk.device_id + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_10_chunk (actual rows=504 loops=1) + Output: _hyper_2_10_chunk.device_id_peer, _hyper_2_10_chunk.device_id + Bulk Decompression: false + -> Seq Scan on _timescaledb_internal.compress_hyper_6_20_chunk (actual rows=1 loops=1) + Output: compress_hyper_6_20_chunk."time", compress_hyper_6_20_chunk.device_id, compress_hyper_6_20_chunk.device_id_peer, compress_hyper_6_20_chunk.v0, compress_hyper_6_20_chunk.v1, compress_hyper_6_20_chunk.v2, compress_hyper_6_20_chunk.v3, compress_hyper_6_20_chunk._ts_meta_count, compress_hyper_6_20_chunk._ts_meta_sequence_num, compress_hyper_6_20_chunk._ts_meta_min_3, compress_hyper_6_20_chunk._ts_meta_max_3, compress_hyper_6_20_chunk._ts_meta_min_1, compress_hyper_6_20_chunk._ts_meta_max_1, compress_hyper_6_20_chunk._ts_meta_min_2, compress_hyper_6_20_chunk._ts_meta_max_2 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_11_chunk (actual rows=1512 loops=1) + Output: _hyper_2_11_chunk.device_id_peer, _hyper_2_11_chunk.device_id + Bulk Decompression: false + -> Seq Scan on _timescaledb_internal.compress_hyper_6_21_chunk (actual rows=3 loops=1) + Output: compress_hyper_6_21_chunk."time", compress_hyper_6_21_chunk.device_id, compress_hyper_6_21_chunk.device_id_peer, compress_hyper_6_21_chunk.v0, compress_hyper_6_21_chunk.v1, compress_hyper_6_21_chunk.v2, compress_hyper_6_21_chunk.v3, compress_hyper_6_21_chunk._ts_meta_count, compress_hyper_6_21_chunk._ts_meta_sequence_num, compress_hyper_6_21_chunk._ts_meta_min_3, compress_hyper_6_21_chunk._ts_meta_max_3, compress_hyper_6_21_chunk._ts_meta_min_1, compress_hyper_6_21_chunk._ts_meta_max_1, compress_hyper_6_21_chunk._ts_meta_min_2, compress_hyper_6_21_chunk._ts_meta_max_2 + -> Seq Scan on _timescaledb_internal._hyper_2_12_chunk (actual rows=504 loops=1) + Output: _hyper_2_12_chunk.device_id_peer, _hyper_2_12_chunk.device_id + -> Hash (actual rows=2 loops=1) + Output: "*VALUES*".column1 + Buckets: 1024 Batches: 1 + -> Values Scan on "*VALUES*" (actual rows=2 loops=1) + Output: "*VALUES*".column1 +(42 rows) + +SET seq_page_cost = 100; +-- loop/row counts of this query is different on windows so we run it without analyze +:PREFIX_NO_ANALYZE +SELECT device_id_peer +FROM :TEST_TABLE +WHERE device_id IN ( + VALUES (1), + (2)); + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Nested Loop + Output: _hyper_2_4_chunk.device_id_peer + -> Unique + Output: "*VALUES*".column1 + -> Sort + Output: "*VALUES*".column1 + Sort Key: "*VALUES*".column1 + -> Values Scan on "*VALUES*" + Output: "*VALUES*".column1 + -> Append + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_4_chunk + Output: _hyper_2_4_chunk.device_id_peer, _hyper_2_4_chunk.device_id + -> Index Scan using compress_hyper_6_17_chunk_c_space_index_2 on _timescaledb_internal.compress_hyper_6_17_chunk + Output: compress_hyper_6_17_chunk."time", compress_hyper_6_17_chunk.device_id, compress_hyper_6_17_chunk.device_id_peer, compress_hyper_6_17_chunk.v0, compress_hyper_6_17_chunk.v1, compress_hyper_6_17_chunk.v2, compress_hyper_6_17_chunk.v3, compress_hyper_6_17_chunk._ts_meta_count, compress_hyper_6_17_chunk._ts_meta_sequence_num, compress_hyper_6_17_chunk._ts_meta_min_3, compress_hyper_6_17_chunk._ts_meta_max_3, compress_hyper_6_17_chunk._ts_meta_min_1, compress_hyper_6_17_chunk._ts_meta_max_1, compress_hyper_6_17_chunk._ts_meta_min_2, compress_hyper_6_17_chunk._ts_meta_max_2 + Index Cond: (compress_hyper_6_17_chunk.device_id = "*VALUES*".column1) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_5_chunk + Output: _hyper_2_5_chunk.device_id_peer, _hyper_2_5_chunk.device_id + -> Index Scan using compress_hyper_6_18_chunk_c_space_index_2 on _timescaledb_internal.compress_hyper_6_18_chunk + Output: compress_hyper_6_18_chunk."time", compress_hyper_6_18_chunk.device_id, compress_hyper_6_18_chunk.device_id_peer, compress_hyper_6_18_chunk.v0, compress_hyper_6_18_chunk.v1, compress_hyper_6_18_chunk.v2, compress_hyper_6_18_chunk.v3, compress_hyper_6_18_chunk._ts_meta_count, compress_hyper_6_18_chunk._ts_meta_sequence_num, compress_hyper_6_18_chunk._ts_meta_min_3, compress_hyper_6_18_chunk._ts_meta_max_3, compress_hyper_6_18_chunk._ts_meta_min_1, compress_hyper_6_18_chunk._ts_meta_max_1, compress_hyper_6_18_chunk._ts_meta_min_2, compress_hyper_6_18_chunk._ts_meta_max_2 + Index Cond: (compress_hyper_6_18_chunk.device_id = "*VALUES*".column1) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_6_chunk + Output: _hyper_2_6_chunk.device_id_peer, _hyper_2_6_chunk.device_id + -> Index Scan using compress_hyper_6_19_chunk_c_space_index_2 on _timescaledb_internal.compress_hyper_6_19_chunk + Output: compress_hyper_6_19_chunk."time", compress_hyper_6_19_chunk.device_id, compress_hyper_6_19_chunk.device_id_peer, compress_hyper_6_19_chunk.v0, compress_hyper_6_19_chunk.v1, compress_hyper_6_19_chunk.v2, compress_hyper_6_19_chunk.v3, compress_hyper_6_19_chunk._ts_meta_count, compress_hyper_6_19_chunk._ts_meta_sequence_num, compress_hyper_6_19_chunk._ts_meta_min_3, compress_hyper_6_19_chunk._ts_meta_max_3, compress_hyper_6_19_chunk._ts_meta_min_1, compress_hyper_6_19_chunk._ts_meta_max_1, compress_hyper_6_19_chunk._ts_meta_min_2, compress_hyper_6_19_chunk._ts_meta_max_2 + Index Cond: (compress_hyper_6_19_chunk.device_id = "*VALUES*".column1) + -> Index Only Scan using _hyper_2_7_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _timescaledb_internal._hyper_2_7_chunk + Output: _hyper_2_7_chunk.device_id_peer, _hyper_2_7_chunk.device_id + Index Cond: (_hyper_2_7_chunk.device_id = "*VALUES*".column1) + -> Index Only Scan using _hyper_2_8_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _timescaledb_internal._hyper_2_8_chunk + Output: _hyper_2_8_chunk.device_id_peer, _hyper_2_8_chunk.device_id + Index Cond: (_hyper_2_8_chunk.device_id = "*VALUES*".column1) + -> Index Only Scan using _hyper_2_9_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _timescaledb_internal._hyper_2_9_chunk + Output: _hyper_2_9_chunk.device_id_peer, _hyper_2_9_chunk.device_id + Index Cond: (_hyper_2_9_chunk.device_id = "*VALUES*".column1) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_10_chunk + Output: _hyper_2_10_chunk.device_id_peer, _hyper_2_10_chunk.device_id + -> Index Scan using compress_hyper_6_20_chunk_c_space_index_2 on _timescaledb_internal.compress_hyper_6_20_chunk + Output: compress_hyper_6_20_chunk."time", compress_hyper_6_20_chunk.device_id, compress_hyper_6_20_chunk.device_id_peer, compress_hyper_6_20_chunk.v0, compress_hyper_6_20_chunk.v1, compress_hyper_6_20_chunk.v2, compress_hyper_6_20_chunk.v3, compress_hyper_6_20_chunk._ts_meta_count, compress_hyper_6_20_chunk._ts_meta_sequence_num, compress_hyper_6_20_chunk._ts_meta_min_3, compress_hyper_6_20_chunk._ts_meta_max_3, compress_hyper_6_20_chunk._ts_meta_min_1, compress_hyper_6_20_chunk._ts_meta_max_1, compress_hyper_6_20_chunk._ts_meta_min_2, compress_hyper_6_20_chunk._ts_meta_max_2 + Index Cond: (compress_hyper_6_20_chunk.device_id = "*VALUES*".column1) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_11_chunk + Output: _hyper_2_11_chunk.device_id_peer, _hyper_2_11_chunk.device_id + -> Index Scan using compress_hyper_6_21_chunk_c_space_index_2 on _timescaledb_internal.compress_hyper_6_21_chunk + Output: compress_hyper_6_21_chunk."time", compress_hyper_6_21_chunk.device_id, compress_hyper_6_21_chunk.device_id_peer, compress_hyper_6_21_chunk.v0, compress_hyper_6_21_chunk.v1, compress_hyper_6_21_chunk.v2, compress_hyper_6_21_chunk.v3, compress_hyper_6_21_chunk._ts_meta_count, compress_hyper_6_21_chunk._ts_meta_sequence_num, compress_hyper_6_21_chunk._ts_meta_min_3, compress_hyper_6_21_chunk._ts_meta_max_3, compress_hyper_6_21_chunk._ts_meta_min_1, compress_hyper_6_21_chunk._ts_meta_max_1, compress_hyper_6_21_chunk._ts_meta_min_2, compress_hyper_6_21_chunk._ts_meta_max_2 + Index Cond: (compress_hyper_6_21_chunk.device_id = "*VALUES*".column1) + -> Index Only Scan using _hyper_2_12_chunk_metrics_space_device_id_device_id_peer_v0_v_2 on _timescaledb_internal._hyper_2_12_chunk + Output: _hyper_2_12_chunk.device_id_peer, _hyper_2_12_chunk.device_id + Index Cond: (_hyper_2_12_chunk.device_id = "*VALUES*".column1) +(47 rows) + +RESET seq_page_cost; +:PREFIX_VERBOSE +SELECT device_id_peer +FROM :TEST_TABLE +WHERE device_id IN ( + VALUES (1)); + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Append (actual rows=1368 loops=1) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_4_chunk (actual rows=360 loops=1) + Output: _hyper_2_4_chunk.device_id_peer + Bulk Decompression: false + -> Seq Scan on _timescaledb_internal.compress_hyper_6_17_chunk (actual rows=1 loops=1) + Output: compress_hyper_6_17_chunk."time", compress_hyper_6_17_chunk.device_id, compress_hyper_6_17_chunk.device_id_peer, compress_hyper_6_17_chunk.v0, compress_hyper_6_17_chunk.v1, compress_hyper_6_17_chunk.v2, compress_hyper_6_17_chunk.v3, compress_hyper_6_17_chunk._ts_meta_count, compress_hyper_6_17_chunk._ts_meta_sequence_num, compress_hyper_6_17_chunk._ts_meta_min_3, compress_hyper_6_17_chunk._ts_meta_max_3, compress_hyper_6_17_chunk._ts_meta_min_1, compress_hyper_6_17_chunk._ts_meta_max_1, compress_hyper_6_17_chunk._ts_meta_min_2, compress_hyper_6_17_chunk._ts_meta_max_2 + Filter: (compress_hyper_6_17_chunk.device_id = 1) + -> Seq Scan on _timescaledb_internal._hyper_2_7_chunk (actual rows=504 loops=1) + Output: _hyper_2_7_chunk.device_id_peer + Filter: (_hyper_2_7_chunk.device_id = 1) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_10_chunk (actual rows=504 loops=1) + Output: _hyper_2_10_chunk.device_id_peer + Bulk Decompression: false + -> Seq Scan on _timescaledb_internal.compress_hyper_6_20_chunk (actual rows=1 loops=1) + Output: compress_hyper_6_20_chunk."time", compress_hyper_6_20_chunk.device_id, compress_hyper_6_20_chunk.device_id_peer, compress_hyper_6_20_chunk.v0, compress_hyper_6_20_chunk.v1, compress_hyper_6_20_chunk.v2, compress_hyper_6_20_chunk.v3, compress_hyper_6_20_chunk._ts_meta_count, compress_hyper_6_20_chunk._ts_meta_sequence_num, compress_hyper_6_20_chunk._ts_meta_min_3, compress_hyper_6_20_chunk._ts_meta_max_3, compress_hyper_6_20_chunk._ts_meta_min_1, compress_hyper_6_20_chunk._ts_meta_max_1, compress_hyper_6_20_chunk._ts_meta_min_2, compress_hyper_6_20_chunk._ts_meta_max_2 + Filter: (compress_hyper_6_20_chunk.device_id = 1) +(16 rows) + +:PREFIX_VERBOSE +SELECT device_id_peer +FROM :TEST_TABLE +WHERE device_id IN ( + VALUES (1), + (2)); + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Hash Semi Join (actual rows=2736 loops=1) + Output: _hyper_2_4_chunk.device_id_peer + Hash Cond: (_hyper_2_4_chunk.device_id = "*VALUES*".column1) + -> Append (actual rows=6840 loops=1) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_4_chunk (actual rows=360 loops=1) + Output: _hyper_2_4_chunk.device_id_peer, _hyper_2_4_chunk.device_id + Bulk Decompression: false + -> Seq Scan on _timescaledb_internal.compress_hyper_6_17_chunk (actual rows=1 loops=1) + Output: compress_hyper_6_17_chunk."time", compress_hyper_6_17_chunk.device_id, compress_hyper_6_17_chunk.device_id_peer, compress_hyper_6_17_chunk.v0, compress_hyper_6_17_chunk.v1, compress_hyper_6_17_chunk.v2, compress_hyper_6_17_chunk.v3, compress_hyper_6_17_chunk._ts_meta_count, compress_hyper_6_17_chunk._ts_meta_sequence_num, compress_hyper_6_17_chunk._ts_meta_min_3, compress_hyper_6_17_chunk._ts_meta_max_3, compress_hyper_6_17_chunk._ts_meta_min_1, compress_hyper_6_17_chunk._ts_meta_max_1, compress_hyper_6_17_chunk._ts_meta_min_2, compress_hyper_6_17_chunk._ts_meta_max_2 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_5_chunk (actual rows=1080 loops=1) + Output: _hyper_2_5_chunk.device_id_peer, _hyper_2_5_chunk.device_id + Bulk Decompression: false + -> Seq Scan on _timescaledb_internal.compress_hyper_6_18_chunk (actual rows=3 loops=1) + Output: compress_hyper_6_18_chunk."time", compress_hyper_6_18_chunk.device_id, compress_hyper_6_18_chunk.device_id_peer, compress_hyper_6_18_chunk.v0, compress_hyper_6_18_chunk.v1, compress_hyper_6_18_chunk.v2, compress_hyper_6_18_chunk.v3, compress_hyper_6_18_chunk._ts_meta_count, compress_hyper_6_18_chunk._ts_meta_sequence_num, compress_hyper_6_18_chunk._ts_meta_min_3, compress_hyper_6_18_chunk._ts_meta_max_3, compress_hyper_6_18_chunk._ts_meta_min_1, compress_hyper_6_18_chunk._ts_meta_max_1, compress_hyper_6_18_chunk._ts_meta_min_2, compress_hyper_6_18_chunk._ts_meta_max_2 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_6_chunk (actual rows=360 loops=1) + Output: _hyper_2_6_chunk.device_id_peer, _hyper_2_6_chunk.device_id + Bulk Decompression: false + -> Seq Scan on _timescaledb_internal.compress_hyper_6_19_chunk (actual rows=1 loops=1) + Output: compress_hyper_6_19_chunk."time", compress_hyper_6_19_chunk.device_id, compress_hyper_6_19_chunk.device_id_peer, compress_hyper_6_19_chunk.v0, compress_hyper_6_19_chunk.v1, compress_hyper_6_19_chunk.v2, compress_hyper_6_19_chunk.v3, compress_hyper_6_19_chunk._ts_meta_count, compress_hyper_6_19_chunk._ts_meta_sequence_num, compress_hyper_6_19_chunk._ts_meta_min_3, compress_hyper_6_19_chunk._ts_meta_max_3, compress_hyper_6_19_chunk._ts_meta_min_1, compress_hyper_6_19_chunk._ts_meta_max_1, compress_hyper_6_19_chunk._ts_meta_min_2, compress_hyper_6_19_chunk._ts_meta_max_2 + -> Seq Scan on _timescaledb_internal._hyper_2_7_chunk (actual rows=504 loops=1) + Output: _hyper_2_7_chunk.device_id_peer, _hyper_2_7_chunk.device_id + -> Seq Scan on _timescaledb_internal._hyper_2_8_chunk (actual rows=1512 loops=1) + Output: _hyper_2_8_chunk.device_id_peer, _hyper_2_8_chunk.device_id + -> Seq Scan on _timescaledb_internal._hyper_2_9_chunk (actual rows=504 loops=1) + Output: _hyper_2_9_chunk.device_id_peer, _hyper_2_9_chunk.device_id + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_10_chunk (actual rows=504 loops=1) + Output: _hyper_2_10_chunk.device_id_peer, _hyper_2_10_chunk.device_id + Bulk Decompression: false + -> Seq Scan on _timescaledb_internal.compress_hyper_6_20_chunk (actual rows=1 loops=1) + Output: compress_hyper_6_20_chunk."time", compress_hyper_6_20_chunk.device_id, compress_hyper_6_20_chunk.device_id_peer, compress_hyper_6_20_chunk.v0, compress_hyper_6_20_chunk.v1, compress_hyper_6_20_chunk.v2, compress_hyper_6_20_chunk.v3, compress_hyper_6_20_chunk._ts_meta_count, compress_hyper_6_20_chunk._ts_meta_sequence_num, compress_hyper_6_20_chunk._ts_meta_min_3, compress_hyper_6_20_chunk._ts_meta_max_3, compress_hyper_6_20_chunk._ts_meta_min_1, compress_hyper_6_20_chunk._ts_meta_max_1, compress_hyper_6_20_chunk._ts_meta_min_2, compress_hyper_6_20_chunk._ts_meta_max_2 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_11_chunk (actual rows=1512 loops=1) + Output: _hyper_2_11_chunk.device_id_peer, _hyper_2_11_chunk.device_id + Bulk Decompression: false + -> Seq Scan on _timescaledb_internal.compress_hyper_6_21_chunk (actual rows=3 loops=1) + Output: compress_hyper_6_21_chunk."time", compress_hyper_6_21_chunk.device_id, compress_hyper_6_21_chunk.device_id_peer, compress_hyper_6_21_chunk.v0, compress_hyper_6_21_chunk.v1, compress_hyper_6_21_chunk.v2, compress_hyper_6_21_chunk.v3, compress_hyper_6_21_chunk._ts_meta_count, compress_hyper_6_21_chunk._ts_meta_sequence_num, compress_hyper_6_21_chunk._ts_meta_min_3, compress_hyper_6_21_chunk._ts_meta_max_3, compress_hyper_6_21_chunk._ts_meta_min_1, compress_hyper_6_21_chunk._ts_meta_max_1, compress_hyper_6_21_chunk._ts_meta_min_2, compress_hyper_6_21_chunk._ts_meta_max_2 + -> Seq Scan on _timescaledb_internal._hyper_2_12_chunk (actual rows=504 loops=1) + Output: _hyper_2_12_chunk.device_id_peer, _hyper_2_12_chunk.device_id + -> Hash (actual rows=2 loops=1) + Output: "*VALUES*".column1 + Buckets: 1024 Batches: 1 + -> Values Scan on "*VALUES*" (actual rows=2 loops=1) + Output: "*VALUES*".column1 +(42 rows) + +-- test view +CREATE OR REPLACE VIEW compressed_view AS +SELECT time, + device_id, + v1, + v2 +FROM :TEST_TABLE; +:PREFIX +SELECT * +FROM compressed_view +WHERE device_id = 1 +ORDER BY time DESC +LIMIT 10; + QUERY PLAN +----------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=10 loops=1) + Order: metrics_space."time" DESC + -> Sort (actual rows=10 loops=1) + Sort Key: _hyper_2_10_chunk."time" DESC + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=504 loops=1) + -> Seq Scan on compress_hyper_6_20_chunk (actual rows=1 loops=1) + Filter: (device_id = 1) + -> Index Scan using _hyper_2_7_chunk_metrics_space_time_idx on _hyper_2_7_chunk (never executed) + Filter: (device_id = 1) + -> Sort (never executed) + Sort Key: _hyper_2_4_chunk."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (never executed) + -> Seq Scan on compress_hyper_6_17_chunk (never executed) + Filter: (device_id = 1) +(16 rows) + +DROP VIEW compressed_view; +-- test INNER JOIN +:PREFIX +SELECT * +FROM :TEST_TABLE m1 + INNER JOIN :TEST_TABLE m2 ON m1.time = m2.time + AND m1.device_id = m2.device_id + ORDER BY m1.time, + m1.device_id + LIMIT 10; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Incremental Sort (actual rows=10 loops=1) + Sort Key: m1."time", m1.device_id + Presorted Key: m1."time" + Full-sort Groups: 1 Sort Method: quicksort + -> Merge Join (actual rows=11 loops=1) + Merge Cond: (m1."time" = m2."time") + Join Filter: (m1.device_id = m2.device_id) + Rows Removed by Join Filter: 40 + -> Custom Scan (ChunkAppend) on metrics_space m1 (actual rows=11 loops=1) + Order: m1."time" + -> Merge Append (actual rows=11 loops=1) + Sort Key: m1_1."time" + -> Sort (actual rows=3 loops=1) + Sort Key: m1_1."time" + Sort Method: quicksort + -> Sort (actual rows=360 loops=1) + Sort Key: m1_1."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk m1_1 (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=1 loops=1) + -> Sort (actual rows=7 loops=1) + Sort Key: m1_2."time" + Sort Method: quicksort + -> Sort (actual rows=1080 loops=1) + Sort Key: m1_2."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk m1_2 (actual rows=1080 loops=1) + -> Seq Scan on compress_hyper_6_18_chunk (actual rows=3 loops=1) + -> Sort (actual rows=3 loops=1) + Sort Key: m1_3."time" + Sort Method: quicksort + -> Sort (actual rows=360 loops=1) + Sort Key: m1_3."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk m1_3 (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_19_chunk (actual rows=1 loops=1) + -> Merge Append (never executed) + Sort Key: m1_4."time" + -> Index Scan Backward using _hyper_2_7_chunk_metrics_space_time_idx on _hyper_2_7_chunk m1_4 (never executed) + -> Index Scan Backward using _hyper_2_8_chunk_metrics_space_time_idx on _hyper_2_8_chunk m1_5 (never executed) + -> Index Scan Backward using _hyper_2_9_chunk_metrics_space_time_idx on _hyper_2_9_chunk m1_6 (never executed) + -> Merge Append (never executed) + Sort Key: m1_7."time" + -> Sort (never executed) + Sort Key: m1_7."time" + -> Sort (never executed) + Sort Key: m1_7."time" + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk m1_7 (never executed) + -> Seq Scan on compress_hyper_6_20_chunk (never executed) + -> Sort (never executed) + Sort Key: m1_8."time" + -> Sort (never executed) + Sort Key: m1_8."time" + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk m1_8 (never executed) + -> Seq Scan on compress_hyper_6_21_chunk (never executed) + -> Index Scan Backward using _hyper_2_12_chunk_metrics_space_time_idx on _hyper_2_12_chunk m1_9 (never executed) + -> Materialize (actual rows=51 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space m2 (actual rows=11 loops=1) + Order: m2."time" + -> Merge Append (actual rows=11 loops=1) + Sort Key: m2_1."time" + -> Sort (actual rows=3 loops=1) + Sort Key: m2_1."time" + Sort Method: quicksort + -> Sort (actual rows=360 loops=1) + Sort Key: m2_1."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk m2_1 (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_17_chunk compress_hyper_6_17_chunk_1 (actual rows=1 loops=1) + -> Sort (actual rows=7 loops=1) + Sort Key: m2_2."time" + Sort Method: quicksort + -> Sort (actual rows=1080 loops=1) + Sort Key: m2_2."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk m2_2 (actual rows=1080 loops=1) + -> Seq Scan on compress_hyper_6_18_chunk compress_hyper_6_18_chunk_1 (actual rows=3 loops=1) + -> Sort (actual rows=3 loops=1) + Sort Key: m2_3."time" + Sort Method: quicksort + -> Sort (actual rows=360 loops=1) + Sort Key: m2_3."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk m2_3 (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_19_chunk compress_hyper_6_19_chunk_1 (actual rows=1 loops=1) + -> Merge Append (never executed) + Sort Key: m2_4."time" + -> Index Scan Backward using _hyper_2_7_chunk_metrics_space_time_idx on _hyper_2_7_chunk m2_4 (never executed) + -> Index Scan Backward using _hyper_2_8_chunk_metrics_space_time_idx on _hyper_2_8_chunk m2_5 (never executed) + -> Index Scan Backward using _hyper_2_9_chunk_metrics_space_time_idx on _hyper_2_9_chunk m2_6 (never executed) + -> Merge Append (never executed) + Sort Key: m2_7."time" + -> Sort (never executed) + Sort Key: m2_7."time" + -> Sort (never executed) + Sort Key: m2_7."time" + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk m2_7 (never executed) + -> Seq Scan on compress_hyper_6_20_chunk compress_hyper_6_20_chunk_1 (never executed) + -> Sort (never executed) + Sort Key: m2_8."time" + -> Sort (never executed) + Sort Key: m2_8."time" + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk m2_8 (never executed) + -> Seq Scan on compress_hyper_6_21_chunk compress_hyper_6_21_chunk_1 (never executed) + -> Index Scan Backward using _hyper_2_12_chunk_metrics_space_time_idx on _hyper_2_12_chunk m2_9 (never executed) +(106 rows) + +:PREFIX +SELECT * +FROM :TEST_TABLE m1 + INNER JOIN :TEST_TABLE m2 ON m1.time = m2.time + INNER JOIN :TEST_TABLE m3 ON m2.time = m3.time + AND m1.device_id = m2.device_id + AND m3.device_id = 3 + ORDER BY m1.time, + m1.device_id + LIMIT 10; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Incremental Sort (actual rows=10 loops=1) + Sort Key: m1."time", m1.device_id + Presorted Key: m1."time" + Full-sort Groups: 1 Sort Method: quicksort + -> Merge Join (actual rows=11 loops=1) + Merge Cond: (m1."time" = m3_1."time") + -> Merge Join (actual rows=11 loops=1) + Merge Cond: (m1."time" = m2."time") + Join Filter: (m1.device_id = m2.device_id) + Rows Removed by Join Filter: 40 + -> Custom Scan (ChunkAppend) on metrics_space m1 (actual rows=11 loops=1) + Order: m1."time" + -> Merge Append (actual rows=11 loops=1) + Sort Key: m1_1."time" + -> Sort (actual rows=3 loops=1) + Sort Key: m1_1."time" + Sort Method: quicksort + -> Sort (actual rows=360 loops=1) + Sort Key: m1_1."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk m1_1 (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=1 loops=1) + -> Sort (actual rows=7 loops=1) + Sort Key: m1_2."time" + Sort Method: quicksort + -> Sort (actual rows=1080 loops=1) + Sort Key: m1_2."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk m1_2 (actual rows=1080 loops=1) + -> Seq Scan on compress_hyper_6_18_chunk (actual rows=3 loops=1) + -> Sort (actual rows=3 loops=1) + Sort Key: m1_3."time" + Sort Method: quicksort + -> Sort (actual rows=360 loops=1) + Sort Key: m1_3."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk m1_3 (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_19_chunk (actual rows=1 loops=1) + -> Merge Append (never executed) + Sort Key: m1_4."time" + -> Index Scan Backward using _hyper_2_7_chunk_metrics_space_time_idx on _hyper_2_7_chunk m1_4 (never executed) + -> Index Scan Backward using _hyper_2_8_chunk_metrics_space_time_idx on _hyper_2_8_chunk m1_5 (never executed) + -> Index Scan Backward using _hyper_2_9_chunk_metrics_space_time_idx on _hyper_2_9_chunk m1_6 (never executed) + -> Merge Append (never executed) + Sort Key: m1_7."time" + -> Sort (never executed) + Sort Key: m1_7."time" + -> Sort (never executed) + Sort Key: m1_7."time" + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk m1_7 (never executed) + -> Seq Scan on compress_hyper_6_20_chunk (never executed) + -> Sort (never executed) + Sort Key: m1_8."time" + -> Sort (never executed) + Sort Key: m1_8."time" + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk m1_8 (never executed) + -> Seq Scan on compress_hyper_6_21_chunk (never executed) + -> Index Scan Backward using _hyper_2_12_chunk_metrics_space_time_idx on _hyper_2_12_chunk m1_9 (never executed) + -> Materialize (actual rows=51 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space m2 (actual rows=11 loops=1) + Order: m2."time" + -> Merge Append (actual rows=11 loops=1) + Sort Key: m2_1."time" + -> Sort (actual rows=3 loops=1) + Sort Key: m2_1."time" + Sort Method: quicksort + -> Sort (actual rows=360 loops=1) + Sort Key: m2_1."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk m2_1 (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_17_chunk compress_hyper_6_17_chunk_1 (actual rows=1 loops=1) + -> Sort (actual rows=7 loops=1) + Sort Key: m2_2."time" + Sort Method: quicksort + -> Sort (actual rows=1080 loops=1) + Sort Key: m2_2."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk m2_2 (actual rows=1080 loops=1) + -> Seq Scan on compress_hyper_6_18_chunk compress_hyper_6_18_chunk_1 (actual rows=3 loops=1) + -> Sort (actual rows=3 loops=1) + Sort Key: m2_3."time" + Sort Method: quicksort + -> Sort (actual rows=360 loops=1) + Sort Key: m2_3."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk m2_3 (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_19_chunk compress_hyper_6_19_chunk_1 (actual rows=1 loops=1) + -> Merge Append (never executed) + Sort Key: m2_4."time" + -> Index Scan Backward using _hyper_2_7_chunk_metrics_space_time_idx on _hyper_2_7_chunk m2_4 (never executed) + -> Index Scan Backward using _hyper_2_8_chunk_metrics_space_time_idx on _hyper_2_8_chunk m2_5 (never executed) + -> Index Scan Backward using _hyper_2_9_chunk_metrics_space_time_idx on _hyper_2_9_chunk m2_6 (never executed) + -> Merge Append (never executed) + Sort Key: m2_7."time" + -> Sort (never executed) + Sort Key: m2_7."time" + -> Sort (never executed) + Sort Key: m2_7."time" + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk m2_7 (never executed) + -> Seq Scan on compress_hyper_6_20_chunk compress_hyper_6_20_chunk_1 (never executed) + -> Sort (never executed) + Sort Key: m2_8."time" + -> Sort (never executed) + Sort Key: m2_8."time" + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk m2_8 (never executed) + -> Seq Scan on compress_hyper_6_21_chunk compress_hyper_6_21_chunk_1 (never executed) + -> Index Scan Backward using _hyper_2_12_chunk_metrics_space_time_idx on _hyper_2_12_chunk m2_9 (never executed) + -> Materialize (actual rows=11 loops=1) + -> Merge Append (actual rows=3 loops=1) + Sort Key: m3_1."time" + -> Sort (actual rows=3 loops=1) + Sort Key: m3_1."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk m3_1 (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_19_chunk compress_hyper_6_19_chunk_2 (actual rows=1 loops=1) + Filter: (device_id = 3) + -> Index Scan Backward using _hyper_2_9_chunk_metrics_space_time_idx on _hyper_2_9_chunk m3_2 (actual rows=1 loops=1) + Filter: (device_id = 3) + -> Index Scan Backward using _hyper_2_12_chunk_metrics_space_time_idx on _hyper_2_12_chunk m3_3 (actual rows=1 loops=1) + Filter: (device_id = 3) +(121 rows) + +:PREFIX +SELECT * +FROM :TEST_TABLE m1 + INNER JOIN :TEST_TABLE m2 ON m1.time = m2.time + AND m1.device_id = 1 + AND m2.device_id = 2 + ORDER BY m1.time, + m1.device_id, + m2.time, + m2.device_id + LIMIT 100; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=100 loops=1) + -> Merge Join (actual rows=100 loops=1) + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_space m1 (actual rows=100 loops=1) + Order: m1."time" + -> Sort (actual rows=100 loops=1) + Sort Key: m1_1."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk m1_1 (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=1 loops=1) + Filter: (device_id = 1) + -> Index Scan Backward using _hyper_2_7_chunk_metrics_space_time_idx on _hyper_2_7_chunk m1_2 (never executed) + Filter: (device_id = 1) + -> Sort (never executed) + Sort Key: m1_3."time" + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk m1_3 (never executed) + -> Seq Scan on compress_hyper_6_20_chunk (never executed) + Filter: (device_id = 1) + -> Materialize (actual rows=100 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space m2 (actual rows=100 loops=1) + Order: m2."time" + -> Sort (actual rows=100 loops=1) + Sort Key: m2_1."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk m2_1 (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_18_chunk (actual rows=1 loops=1) + Filter: (device_id = 2) + Rows Removed by Filter: 2 + -> Index Scan Backward using _hyper_2_8_chunk_metrics_space_device_id_time_idx on _hyper_2_8_chunk m2_2 (never executed) + Index Cond: (device_id = 2) + -> Sort (never executed) + Sort Key: m2_3."time" + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk m2_3 (never executed) + -> Seq Scan on compress_hyper_6_21_chunk (never executed) + Filter: (device_id = 2) +(35 rows) + +:PREFIX +SELECT * +FROM metrics m1 + INNER JOIN metrics_space m2 ON m1.time = m2.time + AND m1.device_id = 1 + AND m2.device_id = 2 + ORDER BY m1.time, + m1.device_id, + m2.time, + m2.device_id + LIMIT 100; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=100 loops=1) + -> Merge Join (actual rows=100 loops=1) + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics m1 (actual rows=100 loops=1) + Order: m1."time" + -> Sort (actual rows=100 loops=1) + Sort Key: m1_1."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk m1_1 (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=1 loops=1) + Filter: (device_id = 1) + Rows Removed by Filter: 4 + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk m1_2 (never executed) + Filter: (device_id = 1) + -> Sort (never executed) + Sort Key: m1_3."time" + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk m1_3 (never executed) + -> Seq Scan on compress_hyper_5_16_chunk (never executed) + Filter: (device_id = 1) + -> Materialize (actual rows=100 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space m2 (actual rows=100 loops=1) + Order: m2."time" + -> Sort (actual rows=100 loops=1) + Sort Key: m2_1."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk m2_1 (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_18_chunk (actual rows=1 loops=1) + Filter: (device_id = 2) + Rows Removed by Filter: 2 + -> Index Scan Backward using _hyper_2_8_chunk_metrics_space_device_id_time_idx on _hyper_2_8_chunk m2_2 (never executed) + Index Cond: (device_id = 2) + -> Sort (never executed) + Sort Key: m2_3."time" + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk m2_3 (never executed) + -> Seq Scan on compress_hyper_6_21_chunk (never executed) + Filter: (device_id = 2) +(36 rows) + +-- test OUTER JOIN +:PREFIX +SELECT * +FROM :TEST_TABLE m1 + LEFT OUTER JOIN :TEST_TABLE m2 ON m1.time = m2.time + AND m1.device_id = m2.device_id +ORDER BY m1.time, + m1.device_id +LIMIT 10; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Incremental Sort (actual rows=10 loops=1) + Sort Key: m1."time", m1.device_id + Presorted Key: m1."time" + Full-sort Groups: 1 Sort Method: quicksort + -> Merge Left Join (actual rows=11 loops=1) + Merge Cond: (m1."time" = m2."time") + Join Filter: (m1.device_id = m2.device_id) + Rows Removed by Join Filter: 40 + -> Custom Scan (ChunkAppend) on metrics_space m1 (actual rows=11 loops=1) + Order: m1."time" + -> Merge Append (actual rows=11 loops=1) + Sort Key: m1_1."time" + -> Sort (actual rows=3 loops=1) + Sort Key: m1_1."time" + Sort Method: quicksort + -> Sort (actual rows=360 loops=1) + Sort Key: m1_1."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk m1_1 (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=1 loops=1) + -> Sort (actual rows=7 loops=1) + Sort Key: m1_2."time" + Sort Method: quicksort + -> Sort (actual rows=1080 loops=1) + Sort Key: m1_2."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk m1_2 (actual rows=1080 loops=1) + -> Seq Scan on compress_hyper_6_18_chunk (actual rows=3 loops=1) + -> Sort (actual rows=3 loops=1) + Sort Key: m1_3."time" + Sort Method: quicksort + -> Sort (actual rows=360 loops=1) + Sort Key: m1_3."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk m1_3 (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_19_chunk (actual rows=1 loops=1) + -> Merge Append (never executed) + Sort Key: m1_4."time" + -> Index Scan Backward using _hyper_2_7_chunk_metrics_space_time_idx on _hyper_2_7_chunk m1_4 (never executed) + -> Index Scan Backward using _hyper_2_8_chunk_metrics_space_time_idx on _hyper_2_8_chunk m1_5 (never executed) + -> Index Scan Backward using _hyper_2_9_chunk_metrics_space_time_idx on _hyper_2_9_chunk m1_6 (never executed) + -> Merge Append (never executed) + Sort Key: m1_7."time" + -> Sort (never executed) + Sort Key: m1_7."time" + -> Sort (never executed) + Sort Key: m1_7."time" + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk m1_7 (never executed) + -> Seq Scan on compress_hyper_6_20_chunk (never executed) + -> Sort (never executed) + Sort Key: m1_8."time" + -> Sort (never executed) + Sort Key: m1_8."time" + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk m1_8 (never executed) + -> Seq Scan on compress_hyper_6_21_chunk (never executed) + -> Index Scan Backward using _hyper_2_12_chunk_metrics_space_time_idx on _hyper_2_12_chunk m1_9 (never executed) + -> Materialize (actual rows=51 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space m2 (actual rows=11 loops=1) + Order: m2."time" + -> Merge Append (actual rows=11 loops=1) + Sort Key: m2_1."time" + -> Sort (actual rows=3 loops=1) + Sort Key: m2_1."time" + Sort Method: quicksort + -> Sort (actual rows=360 loops=1) + Sort Key: m2_1."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk m2_1 (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_17_chunk compress_hyper_6_17_chunk_1 (actual rows=1 loops=1) + -> Sort (actual rows=7 loops=1) + Sort Key: m2_2."time" + Sort Method: quicksort + -> Sort (actual rows=1080 loops=1) + Sort Key: m2_2."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk m2_2 (actual rows=1080 loops=1) + -> Seq Scan on compress_hyper_6_18_chunk compress_hyper_6_18_chunk_1 (actual rows=3 loops=1) + -> Sort (actual rows=3 loops=1) + Sort Key: m2_3."time" + Sort Method: quicksort + -> Sort (actual rows=360 loops=1) + Sort Key: m2_3."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk m2_3 (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_19_chunk compress_hyper_6_19_chunk_1 (actual rows=1 loops=1) + -> Merge Append (never executed) + Sort Key: m2_4."time" + -> Index Scan Backward using _hyper_2_7_chunk_metrics_space_time_idx on _hyper_2_7_chunk m2_4 (never executed) + -> Index Scan Backward using _hyper_2_8_chunk_metrics_space_time_idx on _hyper_2_8_chunk m2_5 (never executed) + -> Index Scan Backward using _hyper_2_9_chunk_metrics_space_time_idx on _hyper_2_9_chunk m2_6 (never executed) + -> Merge Append (never executed) + Sort Key: m2_7."time" + -> Sort (never executed) + Sort Key: m2_7."time" + -> Sort (never executed) + Sort Key: m2_7."time" + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk m2_7 (never executed) + -> Seq Scan on compress_hyper_6_20_chunk compress_hyper_6_20_chunk_1 (never executed) + -> Sort (never executed) + Sort Key: m2_8."time" + -> Sort (never executed) + Sort Key: m2_8."time" + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk m2_8 (never executed) + -> Seq Scan on compress_hyper_6_21_chunk compress_hyper_6_21_chunk_1 (never executed) + -> Index Scan Backward using _hyper_2_12_chunk_metrics_space_time_idx on _hyper_2_12_chunk m2_9 (never executed) +(106 rows) + +:PREFIX +SELECT * +FROM :TEST_TABLE m1 + LEFT OUTER JOIN :TEST_TABLE m2 ON m1.time = m2.time + AND m1.device_id = 1 + AND m2.device_id = 2 +ORDER BY m1.time, + m1.device_id, + m2.time, + m2.device_id +LIMIT 100; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=100 loops=1) + -> Incremental Sort (actual rows=100 loops=1) + Sort Key: m1."time", m1.device_id, m2."time", m2.device_id + Presorted Key: m1."time" + Full-sort Groups: 3 Sort Method: quicksort + -> Merge Left Join (actual rows=101 loops=1) + Merge Cond: (m1."time" = m2."time") + Join Filter: (m1.device_id = 1) + Rows Removed by Join Filter: 81 + -> Custom Scan (ChunkAppend) on metrics_space m1 (actual rows=101 loops=1) + Order: m1."time" + -> Merge Append (actual rows=101 loops=1) + Sort Key: m1_1."time" + -> Sort (actual rows=21 loops=1) + Sort Key: m1_1."time" + Sort Method: quicksort + -> Sort (actual rows=360 loops=1) + Sort Key: m1_1."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk m1_1 (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=1 loops=1) + -> Sort (actual rows=61 loops=1) + Sort Key: m1_2."time" + Sort Method: quicksort + -> Sort (actual rows=1080 loops=1) + Sort Key: m1_2."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk m1_2 (actual rows=1080 loops=1) + -> Seq Scan on compress_hyper_6_18_chunk (actual rows=3 loops=1) + -> Sort (actual rows=21 loops=1) + Sort Key: m1_3."time" + Sort Method: quicksort + -> Sort (actual rows=360 loops=1) + Sort Key: m1_3."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk m1_3 (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_19_chunk (actual rows=1 loops=1) + -> Merge Append (never executed) + Sort Key: m1_4."time" + -> Index Scan Backward using _hyper_2_7_chunk_metrics_space_time_idx on _hyper_2_7_chunk m1_4 (never executed) + -> Index Scan Backward using _hyper_2_8_chunk_metrics_space_time_idx on _hyper_2_8_chunk m1_5 (never executed) + -> Index Scan Backward using _hyper_2_9_chunk_metrics_space_time_idx on _hyper_2_9_chunk m1_6 (never executed) + -> Merge Append (never executed) + Sort Key: m1_7."time" + -> Sort (never executed) + Sort Key: m1_7."time" + -> Sort (never executed) + Sort Key: m1_7."time" + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk m1_7 (never executed) + -> Seq Scan on compress_hyper_6_20_chunk (never executed) + -> Sort (never executed) + Sort Key: m1_8."time" + -> Sort (never executed) + Sort Key: m1_8."time" + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk m1_8 (never executed) + -> Seq Scan on compress_hyper_6_21_chunk (never executed) + -> Index Scan Backward using _hyper_2_12_chunk_metrics_space_time_idx on _hyper_2_12_chunk m1_9 (never executed) + -> Materialize (actual rows=102 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space m2 (actual rows=22 loops=1) + Order: m2."time" + -> Merge Append (actual rows=22 loops=1) + Sort Key: m2_1."time" + -> Sort (actual rows=0 loops=1) + Sort Key: m2_1."time" + Sort Method: quicksort + -> Sort (actual rows=0 loops=1) + Sort Key: m2_1."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk m2_1 (actual rows=0 loops=1) + -> Seq Scan on compress_hyper_6_17_chunk compress_hyper_6_17_chunk_1 (actual rows=0 loops=1) + Filter: (device_id = 2) + Rows Removed by Filter: 1 + -> Sort (actual rows=22 loops=1) + Sort Key: m2_2."time" + Sort Method: quicksort + -> Sort (actual rows=360 loops=1) + Sort Key: m2_2."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk m2_2 (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_18_chunk compress_hyper_6_18_chunk_1 (actual rows=1 loops=1) + Filter: (device_id = 2) + Rows Removed by Filter: 2 + -> Sort (actual rows=0 loops=1) + Sort Key: m2_3."time" + Sort Method: quicksort + -> Sort (actual rows=0 loops=1) + Sort Key: m2_3."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk m2_3 (actual rows=0 loops=1) + -> Seq Scan on compress_hyper_6_19_chunk compress_hyper_6_19_chunk_1 (actual rows=0 loops=1) + Filter: (device_id = 2) + Rows Removed by Filter: 1 + -> Merge Append (never executed) + Sort Key: m2_4."time" + -> Index Scan Backward using _hyper_2_7_chunk_metrics_space_time_idx on _hyper_2_7_chunk m2_4 (never executed) + Filter: (device_id = 2) + -> Index Scan Backward using _hyper_2_8_chunk_metrics_space_time_idx on _hyper_2_8_chunk m2_5 (never executed) + Filter: (device_id = 2) + -> Index Scan Backward using _hyper_2_9_chunk_metrics_space_time_idx on _hyper_2_9_chunk m2_6 (never executed) + Filter: (device_id = 2) + -> Merge Append (never executed) + Sort Key: m2_7."time" + -> Sort (never executed) + Sort Key: m2_7."time" + -> Sort (never executed) + Sort Key: m2_7."time" + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk m2_7 (never executed) + -> Seq Scan on compress_hyper_6_20_chunk compress_hyper_6_20_chunk_1 (never executed) + Filter: (device_id = 2) + -> Sort (never executed) + Sort Key: m2_8."time" + -> Sort (never executed) + Sort Key: m2_8."time" + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk m2_8 (never executed) + -> Seq Scan on compress_hyper_6_21_chunk compress_hyper_6_21_chunk_1 (never executed) + Filter: (device_id = 2) + -> Index Scan Backward using _hyper_2_12_chunk_metrics_space_time_idx on _hyper_2_12_chunk m2_9 (never executed) + Filter: (device_id = 2) +(118 rows) + +:PREFIX +SELECT * +FROM metrics m1 + LEFT OUTER JOIN metrics_space m2 ON m1.time = m2.time + AND m1.device_id = 1 + AND m2.device_id = 2 +ORDER BY m1.time, + m1.device_id, + m2.time, + m2.device_id +LIMIT 100; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=100 loops=1) + -> Incremental Sort (actual rows=100 loops=1) + Sort Key: m1."time", m1.device_id, m2."time", m2.device_id + Presorted Key: m1."time" + Full-sort Groups: 3 Sort Method: quicksort + -> Merge Left Join (actual rows=101 loops=1) + Merge Cond: (m1."time" = m2."time") + Join Filter: (m1.device_id = 1) + Rows Removed by Join Filter: 81 + -> Custom Scan (ChunkAppend) on metrics m1 (actual rows=101 loops=1) + Order: m1."time" + -> Sort (actual rows=101 loops=1) + Sort Key: m1_1."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk m1_1 (actual rows=1800 loops=1) + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=5 loops=1) + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk m1_2 (never executed) + -> Sort (never executed) + Sort Key: m1_3."time" + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk m1_3 (never executed) + -> Seq Scan on compress_hyper_5_16_chunk (never executed) + -> Materialize (actual rows=102 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space m2 (actual rows=22 loops=1) + Order: m2."time" + -> Merge Append (actual rows=22 loops=1) + Sort Key: m2_1."time" + -> Sort (actual rows=0 loops=1) + Sort Key: m2_1."time" + Sort Method: quicksort + -> Sort (actual rows=0 loops=1) + Sort Key: m2_1."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk m2_1 (actual rows=0 loops=1) + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=0 loops=1) + Filter: (device_id = 2) + Rows Removed by Filter: 1 + -> Sort (actual rows=22 loops=1) + Sort Key: m2_2."time" + Sort Method: quicksort + -> Sort (actual rows=360 loops=1) + Sort Key: m2_2."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk m2_2 (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_18_chunk (actual rows=1 loops=1) + Filter: (device_id = 2) + Rows Removed by Filter: 2 + -> Sort (actual rows=0 loops=1) + Sort Key: m2_3."time" + Sort Method: quicksort + -> Sort (actual rows=0 loops=1) + Sort Key: m2_3."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk m2_3 (actual rows=0 loops=1) + -> Seq Scan on compress_hyper_6_19_chunk (actual rows=0 loops=1) + Filter: (device_id = 2) + Rows Removed by Filter: 1 + -> Merge Append (never executed) + Sort Key: m2_4."time" + -> Index Scan Backward using _hyper_2_7_chunk_metrics_space_time_idx on _hyper_2_7_chunk m2_4 (never executed) + Filter: (device_id = 2) + -> Index Scan Backward using _hyper_2_8_chunk_metrics_space_time_idx on _hyper_2_8_chunk m2_5 (never executed) + Filter: (device_id = 2) + -> Index Scan Backward using _hyper_2_9_chunk_metrics_space_time_idx on _hyper_2_9_chunk m2_6 (never executed) + Filter: (device_id = 2) + -> Merge Append (never executed) + Sort Key: m2_7."time" + -> Sort (never executed) + Sort Key: m2_7."time" + -> Sort (never executed) + Sort Key: m2_7."time" + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk m2_7 (never executed) + -> Seq Scan on compress_hyper_6_20_chunk (never executed) + Filter: (device_id = 2) + -> Sort (never executed) + Sort Key: m2_8."time" + -> Sort (never executed) + Sort Key: m2_8."time" + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk m2_8 (never executed) + -> Seq Scan on compress_hyper_6_21_chunk (never executed) + Filter: (device_id = 2) + -> Index Scan Backward using _hyper_2_12_chunk_metrics_space_time_idx on _hyper_2_12_chunk m2_9 (never executed) + Filter: (device_id = 2) +(82 rows) + +-- test implicit self-join +:PREFIX +SELECT * +FROM :TEST_TABLE m1, + :TEST_TABLE m2 +WHERE m1.time = m2.time +ORDER BY m1.time, + m1.device_id, + m2.time, + m2.device_id +LIMIT 20; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=20 loops=1) + -> Incremental Sort (actual rows=20 loops=1) + Sort Key: m1."time", m1.device_id, m2.device_id + Presorted Key: m1."time" + Full-sort Groups: 1 Sort Method: quicksort + -> Merge Join (actual rows=26 loops=1) + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_space m1 (actual rows=6 loops=1) + Order: m1."time" + -> Merge Append (actual rows=6 loops=1) + Sort Key: m1_1."time" + -> Sort (actual rows=2 loops=1) + Sort Key: m1_1."time" + Sort Method: quicksort + -> Sort (actual rows=360 loops=1) + Sort Key: m1_1."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk m1_1 (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=1 loops=1) + -> Sort (actual rows=4 loops=1) + Sort Key: m1_2."time" + Sort Method: quicksort + -> Sort (actual rows=1080 loops=1) + Sort Key: m1_2."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk m1_2 (actual rows=1080 loops=1) + -> Seq Scan on compress_hyper_6_18_chunk (actual rows=3 loops=1) + -> Sort (actual rows=2 loops=1) + Sort Key: m1_3."time" + Sort Method: quicksort + -> Sort (actual rows=360 loops=1) + Sort Key: m1_3."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk m1_3 (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_19_chunk (actual rows=1 loops=1) + -> Merge Append (never executed) + Sort Key: m1_4."time" + -> Index Scan Backward using _hyper_2_7_chunk_metrics_space_time_idx on _hyper_2_7_chunk m1_4 (never executed) + -> Index Scan Backward using _hyper_2_8_chunk_metrics_space_time_idx on _hyper_2_8_chunk m1_5 (never executed) + -> Index Scan Backward using _hyper_2_9_chunk_metrics_space_time_idx on _hyper_2_9_chunk m1_6 (never executed) + -> Merge Append (never executed) + Sort Key: m1_7."time" + -> Sort (never executed) + Sort Key: m1_7."time" + -> Sort (never executed) + Sort Key: m1_7."time" + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk m1_7 (never executed) + -> Seq Scan on compress_hyper_6_20_chunk (never executed) + -> Sort (never executed) + Sort Key: m1_8."time" + -> Sort (never executed) + Sort Key: m1_8."time" + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk m1_8 (never executed) + -> Seq Scan on compress_hyper_6_21_chunk (never executed) + -> Index Scan Backward using _hyper_2_12_chunk_metrics_space_time_idx on _hyper_2_12_chunk m1_9 (never executed) + -> Materialize (actual rows=26 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space m2 (actual rows=6 loops=1) + Order: m2."time" + -> Merge Append (actual rows=6 loops=1) + Sort Key: m2_1."time" + -> Sort (actual rows=2 loops=1) + Sort Key: m2_1."time" + Sort Method: quicksort + -> Sort (actual rows=360 loops=1) + Sort Key: m2_1."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk m2_1 (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_17_chunk compress_hyper_6_17_chunk_1 (actual rows=1 loops=1) + -> Sort (actual rows=4 loops=1) + Sort Key: m2_2."time" + Sort Method: quicksort + -> Sort (actual rows=1080 loops=1) + Sort Key: m2_2."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk m2_2 (actual rows=1080 loops=1) + -> Seq Scan on compress_hyper_6_18_chunk compress_hyper_6_18_chunk_1 (actual rows=3 loops=1) + -> Sort (actual rows=2 loops=1) + Sort Key: m2_3."time" + Sort Method: quicksort + -> Sort (actual rows=360 loops=1) + Sort Key: m2_3."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk m2_3 (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_19_chunk compress_hyper_6_19_chunk_1 (actual rows=1 loops=1) + -> Merge Append (never executed) + Sort Key: m2_4."time" + -> Index Scan Backward using _hyper_2_7_chunk_metrics_space_time_idx on _hyper_2_7_chunk m2_4 (never executed) + -> Index Scan Backward using _hyper_2_8_chunk_metrics_space_time_idx on _hyper_2_8_chunk m2_5 (never executed) + -> Index Scan Backward using _hyper_2_9_chunk_metrics_space_time_idx on _hyper_2_9_chunk m2_6 (never executed) + -> Merge Append (never executed) + Sort Key: m2_7."time" + -> Sort (never executed) + Sort Key: m2_7."time" + -> Sort (never executed) + Sort Key: m2_7."time" + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk m2_7 (never executed) + -> Seq Scan on compress_hyper_6_20_chunk compress_hyper_6_20_chunk_1 (never executed) + -> Sort (never executed) + Sort Key: m2_8."time" + -> Sort (never executed) + Sort Key: m2_8."time" + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk m2_8 (never executed) + -> Seq Scan on compress_hyper_6_21_chunk compress_hyper_6_21_chunk_1 (never executed) + -> Index Scan Backward using _hyper_2_12_chunk_metrics_space_time_idx on _hyper_2_12_chunk m2_9 (never executed) +(104 rows) + +-- test self-join with sub-query +:PREFIX +SELECT * +FROM ( + SELECT * + FROM :TEST_TABLE m1) m1 + INNER JOIN ( + SELECT * + FROM :TEST_TABLE m2) m2 ON m1.time = m2.time +ORDER BY m1.time, + m1.device_id, + m2.device_id +LIMIT 10; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Incremental Sort (actual rows=10 loops=1) + Sort Key: m1."time", m1.device_id, m2.device_id + Presorted Key: m1."time" + Full-sort Groups: 1 Sort Method: top-N heapsort + -> Merge Join (actual rows=26 loops=1) + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_space m1 (actual rows=6 loops=1) + Order: m1."time" + -> Merge Append (actual rows=6 loops=1) + Sort Key: m1_1."time" + -> Sort (actual rows=2 loops=1) + Sort Key: m1_1."time" + Sort Method: quicksort + -> Sort (actual rows=360 loops=1) + Sort Key: m1_1."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk m1_1 (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=1 loops=1) + -> Sort (actual rows=4 loops=1) + Sort Key: m1_2."time" + Sort Method: quicksort + -> Sort (actual rows=1080 loops=1) + Sort Key: m1_2."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk m1_2 (actual rows=1080 loops=1) + -> Seq Scan on compress_hyper_6_18_chunk (actual rows=3 loops=1) + -> Sort (actual rows=2 loops=1) + Sort Key: m1_3."time" + Sort Method: quicksort + -> Sort (actual rows=360 loops=1) + Sort Key: m1_3."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk m1_3 (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_19_chunk (actual rows=1 loops=1) + -> Merge Append (never executed) + Sort Key: m1_4."time" + -> Index Scan Backward using _hyper_2_7_chunk_metrics_space_time_idx on _hyper_2_7_chunk m1_4 (never executed) + -> Index Scan Backward using _hyper_2_8_chunk_metrics_space_time_idx on _hyper_2_8_chunk m1_5 (never executed) + -> Index Scan Backward using _hyper_2_9_chunk_metrics_space_time_idx on _hyper_2_9_chunk m1_6 (never executed) + -> Merge Append (never executed) + Sort Key: m1_7."time" + -> Sort (never executed) + Sort Key: m1_7."time" + -> Sort (never executed) + Sort Key: m1_7."time" + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk m1_7 (never executed) + -> Seq Scan on compress_hyper_6_20_chunk (never executed) + -> Sort (never executed) + Sort Key: m1_8."time" + -> Sort (never executed) + Sort Key: m1_8."time" + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk m1_8 (never executed) + -> Seq Scan on compress_hyper_6_21_chunk (never executed) + -> Index Scan Backward using _hyper_2_12_chunk_metrics_space_time_idx on _hyper_2_12_chunk m1_9 (never executed) + -> Materialize (actual rows=26 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space m2 (actual rows=6 loops=1) + Order: m2."time" + -> Merge Append (actual rows=6 loops=1) + Sort Key: m2_1."time" + -> Sort (actual rows=2 loops=1) + Sort Key: m2_1."time" + Sort Method: quicksort + -> Sort (actual rows=360 loops=1) + Sort Key: m2_1."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk m2_1 (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_17_chunk compress_hyper_6_17_chunk_1 (actual rows=1 loops=1) + -> Sort (actual rows=4 loops=1) + Sort Key: m2_2."time" + Sort Method: quicksort + -> Sort (actual rows=1080 loops=1) + Sort Key: m2_2."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk m2_2 (actual rows=1080 loops=1) + -> Seq Scan on compress_hyper_6_18_chunk compress_hyper_6_18_chunk_1 (actual rows=3 loops=1) + -> Sort (actual rows=2 loops=1) + Sort Key: m2_3."time" + Sort Method: quicksort + -> Sort (actual rows=360 loops=1) + Sort Key: m2_3."time" + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk m2_3 (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_6_19_chunk compress_hyper_6_19_chunk_1 (actual rows=1 loops=1) + -> Merge Append (never executed) + Sort Key: m2_4."time" + -> Index Scan Backward using _hyper_2_7_chunk_metrics_space_time_idx on _hyper_2_7_chunk m2_4 (never executed) + -> Index Scan Backward using _hyper_2_8_chunk_metrics_space_time_idx on _hyper_2_8_chunk m2_5 (never executed) + -> Index Scan Backward using _hyper_2_9_chunk_metrics_space_time_idx on _hyper_2_9_chunk m2_6 (never executed) + -> Merge Append (never executed) + Sort Key: m2_7."time" + -> Sort (never executed) + Sort Key: m2_7."time" + -> Sort (never executed) + Sort Key: m2_7."time" + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk m2_7 (never executed) + -> Seq Scan on compress_hyper_6_20_chunk compress_hyper_6_20_chunk_1 (never executed) + -> Sort (never executed) + Sort Key: m2_8."time" + -> Sort (never executed) + Sort Key: m2_8."time" + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk m2_8 (never executed) + -> Seq Scan on compress_hyper_6_21_chunk compress_hyper_6_21_chunk_1 (never executed) + -> Index Scan Backward using _hyper_2_12_chunk_metrics_space_time_idx on _hyper_2_12_chunk m2_9 (never executed) +(104 rows) + +:PREFIX +SELECT * +FROM generate_series('2000-01-01'::timestamptz, '2000-02-01'::timestamptz, '1d'::interval) g (time) + INNER JOIN LATERAL ( + SELECT time + FROM :TEST_TABLE m1 + WHERE m1.time = g.time + LIMIT 1) m1 ON TRUE; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Nested Loop (actual rows=19 loops=1) + -> Function Scan on generate_series g (actual rows=32 loops=1) + -> Limit (actual rows=1 loops=32) + -> Custom Scan (ChunkAppend) on metrics_space m1 (actual rows=1 loops=32) + Chunks excluded during runtime: 7 + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk m1_1 (actual rows=1 loops=5) + Filter: ("time" = g."time") + Rows Removed by Filter: 168 + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=1 loops=5) + Filter: ((_ts_meta_min_3 <= g."time") AND (_ts_meta_max_3 >= g."time")) + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk m1_2 (never executed) + Filter: ("time" = g."time") + -> Seq Scan on compress_hyper_6_18_chunk (never executed) + Filter: ((_ts_meta_min_3 <= g."time") AND (_ts_meta_max_3 >= g."time")) + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk m1_3 (never executed) + Filter: ("time" = g."time") + -> Seq Scan on compress_hyper_6_19_chunk (never executed) + Filter: ((_ts_meta_min_3 <= g."time") AND (_ts_meta_max_3 >= g."time")) + -> Index Only Scan using _hyper_2_7_chunk_metrics_space_time_idx on _hyper_2_7_chunk m1_4 (actual rows=1 loops=7) + Index Cond: ("time" = g."time") + Heap Fetches: 7 + -> Index Only Scan using _hyper_2_8_chunk_metrics_space_time_idx on _hyper_2_8_chunk m1_5 (never executed) + Index Cond: ("time" = g."time") + Heap Fetches: 0 + -> Index Only Scan using _hyper_2_9_chunk_metrics_space_time_idx on _hyper_2_9_chunk m1_6 (never executed) + Index Cond: ("time" = g."time") + Heap Fetches: 0 + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk m1_7 (actual rows=1 loops=7) + Filter: ("time" = g."time") + Rows Removed by Filter: 240 + -> Seq Scan on compress_hyper_6_20_chunk (actual rows=1 loops=7) + Filter: ((_ts_meta_min_3 <= g."time") AND (_ts_meta_max_3 >= g."time")) + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk m1_8 (never executed) + Filter: ("time" = g."time") + -> Seq Scan on compress_hyper_6_21_chunk (never executed) + Filter: ((_ts_meta_min_3 <= g."time") AND (_ts_meta_max_3 >= g."time")) + -> Index Only Scan using _hyper_2_12_chunk_metrics_space_time_idx on _hyper_2_12_chunk m1_9 (never executed) + Index Cond: ("time" = g."time") + Heap Fetches: 0 +(39 rows) + +-- test prepared statement with params pushdown +PREPARE param_prep (int) AS +SELECT * +FROM generate_series('2000-01-01'::timestamptz, '2000-02-01'::timestamptz, '1d'::interval) g (time) + INNER JOIN LATERAL ( + SELECT time + FROM :TEST_TABLE m1 + WHERE m1.time = g.time + AND device_id = $1 + LIMIT 1) m1 ON TRUE; +:PREFIX EXECUTE param_prep (1); + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------- + Nested Loop (actual rows=19 loops=1) + -> Function Scan on generate_series g (actual rows=32 loops=1) + -> Limit (actual rows=1 loops=32) + -> Custom Scan (ChunkAppend) on metrics_space m1 (actual rows=1 loops=32) + Chunks excluded during runtime: 2 + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk m1_1 (actual rows=1 loops=5) + Filter: ("time" = g."time") + Rows Removed by Filter: 168 + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=1 loops=5) + Filter: ((_ts_meta_min_3 <= g."time") AND (_ts_meta_max_3 >= g."time") AND (device_id = 1)) + -> Index Only Scan using _hyper_2_7_chunk_metrics_space_device_id_time_idx on _hyper_2_7_chunk m1_2 (actual rows=1 loops=7) + Index Cond: ((device_id = 1) AND ("time" = g."time")) + Heap Fetches: 7 + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk m1_3 (actual rows=1 loops=7) + Filter: ("time" = g."time") + Rows Removed by Filter: 240 + -> Seq Scan on compress_hyper_6_20_chunk (actual rows=1 loops=7) + Filter: ((_ts_meta_min_3 <= g."time") AND (_ts_meta_max_3 >= g."time") AND (device_id = 1)) +(18 rows) + +:PREFIX EXECUTE param_prep (2); + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------- + Nested Loop (actual rows=19 loops=1) + -> Function Scan on generate_series g (actual rows=32 loops=1) + -> Limit (actual rows=1 loops=32) + -> Custom Scan (ChunkAppend) on metrics_space m1 (actual rows=1 loops=32) + Chunks excluded during runtime: 2 + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk m1_1 (actual rows=1 loops=5) + Filter: ("time" = g."time") + Rows Removed by Filter: 168 + -> Seq Scan on compress_hyper_6_18_chunk (actual rows=1 loops=5) + Filter: ((_ts_meta_min_3 <= g."time") AND (_ts_meta_max_3 >= g."time") AND (device_id = 2)) + -> Index Only Scan using _hyper_2_8_chunk_metrics_space_device_id_time_idx on _hyper_2_8_chunk m1_2 (actual rows=1 loops=7) + Index Cond: ((device_id = 2) AND ("time" = g."time")) + Heap Fetches: 7 + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk m1_3 (actual rows=1 loops=7) + Filter: ("time" = g."time") + Rows Removed by Filter: 240 + -> Seq Scan on compress_hyper_6_21_chunk (actual rows=1 loops=7) + Filter: ((_ts_meta_min_3 <= g."time") AND (_ts_meta_max_3 >= g."time") AND (device_id = 2)) +(18 rows) + +EXECUTE param_prep (1); + time | time +------------------------------+------------------------------ + Sat Jan 01 00:00:00 2000 PST | Sat Jan 01 00:00:00 2000 PST + Sun Jan 02 00:00:00 2000 PST | Sun Jan 02 00:00:00 2000 PST + Mon Jan 03 00:00:00 2000 PST | Mon Jan 03 00:00:00 2000 PST + Tue Jan 04 00:00:00 2000 PST | Tue Jan 04 00:00:00 2000 PST + Wed Jan 05 00:00:00 2000 PST | Wed Jan 05 00:00:00 2000 PST + Thu Jan 06 00:00:00 2000 PST | Thu Jan 06 00:00:00 2000 PST + Fri Jan 07 00:00:00 2000 PST | Fri Jan 07 00:00:00 2000 PST + Sat Jan 08 00:00:00 2000 PST | Sat Jan 08 00:00:00 2000 PST + Sun Jan 09 00:00:00 2000 PST | Sun Jan 09 00:00:00 2000 PST + Mon Jan 10 00:00:00 2000 PST | Mon Jan 10 00:00:00 2000 PST + Tue Jan 11 00:00:00 2000 PST | Tue Jan 11 00:00:00 2000 PST + Wed Jan 12 00:00:00 2000 PST | Wed Jan 12 00:00:00 2000 PST + Thu Jan 13 00:00:00 2000 PST | Thu Jan 13 00:00:00 2000 PST + Fri Jan 14 00:00:00 2000 PST | Fri Jan 14 00:00:00 2000 PST + Sat Jan 15 00:00:00 2000 PST | Sat Jan 15 00:00:00 2000 PST + Sun Jan 16 00:00:00 2000 PST | Sun Jan 16 00:00:00 2000 PST + Mon Jan 17 00:00:00 2000 PST | Mon Jan 17 00:00:00 2000 PST + Tue Jan 18 00:00:00 2000 PST | Tue Jan 18 00:00:00 2000 PST + Wed Jan 19 00:00:00 2000 PST | Wed Jan 19 00:00:00 2000 PST +(19 rows) + +EXECUTE param_prep (2); + time | time +------------------------------+------------------------------ + Sat Jan 01 00:00:00 2000 PST | Sat Jan 01 00:00:00 2000 PST + Sun Jan 02 00:00:00 2000 PST | Sun Jan 02 00:00:00 2000 PST + Mon Jan 03 00:00:00 2000 PST | Mon Jan 03 00:00:00 2000 PST + Tue Jan 04 00:00:00 2000 PST | Tue Jan 04 00:00:00 2000 PST + Wed Jan 05 00:00:00 2000 PST | Wed Jan 05 00:00:00 2000 PST + Thu Jan 06 00:00:00 2000 PST | Thu Jan 06 00:00:00 2000 PST + Fri Jan 07 00:00:00 2000 PST | Fri Jan 07 00:00:00 2000 PST + Sat Jan 08 00:00:00 2000 PST | Sat Jan 08 00:00:00 2000 PST + Sun Jan 09 00:00:00 2000 PST | Sun Jan 09 00:00:00 2000 PST + Mon Jan 10 00:00:00 2000 PST | Mon Jan 10 00:00:00 2000 PST + Tue Jan 11 00:00:00 2000 PST | Tue Jan 11 00:00:00 2000 PST + Wed Jan 12 00:00:00 2000 PST | Wed Jan 12 00:00:00 2000 PST + Thu Jan 13 00:00:00 2000 PST | Thu Jan 13 00:00:00 2000 PST + Fri Jan 14 00:00:00 2000 PST | Fri Jan 14 00:00:00 2000 PST + Sat Jan 15 00:00:00 2000 PST | Sat Jan 15 00:00:00 2000 PST + Sun Jan 16 00:00:00 2000 PST | Sun Jan 16 00:00:00 2000 PST + Mon Jan 17 00:00:00 2000 PST | Mon Jan 17 00:00:00 2000 PST + Tue Jan 18 00:00:00 2000 PST | Tue Jan 18 00:00:00 2000 PST + Wed Jan 19 00:00:00 2000 PST | Wed Jan 19 00:00:00 2000 PST +(19 rows) + +EXECUTE param_prep (1); + time | time +------------------------------+------------------------------ + Sat Jan 01 00:00:00 2000 PST | Sat Jan 01 00:00:00 2000 PST + Sun Jan 02 00:00:00 2000 PST | Sun Jan 02 00:00:00 2000 PST + Mon Jan 03 00:00:00 2000 PST | Mon Jan 03 00:00:00 2000 PST + Tue Jan 04 00:00:00 2000 PST | Tue Jan 04 00:00:00 2000 PST + Wed Jan 05 00:00:00 2000 PST | Wed Jan 05 00:00:00 2000 PST + Thu Jan 06 00:00:00 2000 PST | Thu Jan 06 00:00:00 2000 PST + Fri Jan 07 00:00:00 2000 PST | Fri Jan 07 00:00:00 2000 PST + Sat Jan 08 00:00:00 2000 PST | Sat Jan 08 00:00:00 2000 PST + Sun Jan 09 00:00:00 2000 PST | Sun Jan 09 00:00:00 2000 PST + Mon Jan 10 00:00:00 2000 PST | Mon Jan 10 00:00:00 2000 PST + Tue Jan 11 00:00:00 2000 PST | Tue Jan 11 00:00:00 2000 PST + Wed Jan 12 00:00:00 2000 PST | Wed Jan 12 00:00:00 2000 PST + Thu Jan 13 00:00:00 2000 PST | Thu Jan 13 00:00:00 2000 PST + Fri Jan 14 00:00:00 2000 PST | Fri Jan 14 00:00:00 2000 PST + Sat Jan 15 00:00:00 2000 PST | Sat Jan 15 00:00:00 2000 PST + Sun Jan 16 00:00:00 2000 PST | Sun Jan 16 00:00:00 2000 PST + Mon Jan 17 00:00:00 2000 PST | Mon Jan 17 00:00:00 2000 PST + Tue Jan 18 00:00:00 2000 PST | Tue Jan 18 00:00:00 2000 PST + Wed Jan 19 00:00:00 2000 PST | Wed Jan 19 00:00:00 2000 PST +(19 rows) + +EXECUTE param_prep (2); + time | time +------------------------------+------------------------------ + Sat Jan 01 00:00:00 2000 PST | Sat Jan 01 00:00:00 2000 PST + Sun Jan 02 00:00:00 2000 PST | Sun Jan 02 00:00:00 2000 PST + Mon Jan 03 00:00:00 2000 PST | Mon Jan 03 00:00:00 2000 PST + Tue Jan 04 00:00:00 2000 PST | Tue Jan 04 00:00:00 2000 PST + Wed Jan 05 00:00:00 2000 PST | Wed Jan 05 00:00:00 2000 PST + Thu Jan 06 00:00:00 2000 PST | Thu Jan 06 00:00:00 2000 PST + Fri Jan 07 00:00:00 2000 PST | Fri Jan 07 00:00:00 2000 PST + Sat Jan 08 00:00:00 2000 PST | Sat Jan 08 00:00:00 2000 PST + Sun Jan 09 00:00:00 2000 PST | Sun Jan 09 00:00:00 2000 PST + Mon Jan 10 00:00:00 2000 PST | Mon Jan 10 00:00:00 2000 PST + Tue Jan 11 00:00:00 2000 PST | Tue Jan 11 00:00:00 2000 PST + Wed Jan 12 00:00:00 2000 PST | Wed Jan 12 00:00:00 2000 PST + Thu Jan 13 00:00:00 2000 PST | Thu Jan 13 00:00:00 2000 PST + Fri Jan 14 00:00:00 2000 PST | Fri Jan 14 00:00:00 2000 PST + Sat Jan 15 00:00:00 2000 PST | Sat Jan 15 00:00:00 2000 PST + Sun Jan 16 00:00:00 2000 PST | Sun Jan 16 00:00:00 2000 PST + Mon Jan 17 00:00:00 2000 PST | Mon Jan 17 00:00:00 2000 PST + Tue Jan 18 00:00:00 2000 PST | Tue Jan 18 00:00:00 2000 PST + Wed Jan 19 00:00:00 2000 PST | Wed Jan 19 00:00:00 2000 PST +(19 rows) + +EXECUTE param_prep (1); + time | time +------------------------------+------------------------------ + Sat Jan 01 00:00:00 2000 PST | Sat Jan 01 00:00:00 2000 PST + Sun Jan 02 00:00:00 2000 PST | Sun Jan 02 00:00:00 2000 PST + Mon Jan 03 00:00:00 2000 PST | Mon Jan 03 00:00:00 2000 PST + Tue Jan 04 00:00:00 2000 PST | Tue Jan 04 00:00:00 2000 PST + Wed Jan 05 00:00:00 2000 PST | Wed Jan 05 00:00:00 2000 PST + Thu Jan 06 00:00:00 2000 PST | Thu Jan 06 00:00:00 2000 PST + Fri Jan 07 00:00:00 2000 PST | Fri Jan 07 00:00:00 2000 PST + Sat Jan 08 00:00:00 2000 PST | Sat Jan 08 00:00:00 2000 PST + Sun Jan 09 00:00:00 2000 PST | Sun Jan 09 00:00:00 2000 PST + Mon Jan 10 00:00:00 2000 PST | Mon Jan 10 00:00:00 2000 PST + Tue Jan 11 00:00:00 2000 PST | Tue Jan 11 00:00:00 2000 PST + Wed Jan 12 00:00:00 2000 PST | Wed Jan 12 00:00:00 2000 PST + Thu Jan 13 00:00:00 2000 PST | Thu Jan 13 00:00:00 2000 PST + Fri Jan 14 00:00:00 2000 PST | Fri Jan 14 00:00:00 2000 PST + Sat Jan 15 00:00:00 2000 PST | Sat Jan 15 00:00:00 2000 PST + Sun Jan 16 00:00:00 2000 PST | Sun Jan 16 00:00:00 2000 PST + Mon Jan 17 00:00:00 2000 PST | Mon Jan 17 00:00:00 2000 PST + Tue Jan 18 00:00:00 2000 PST | Tue Jan 18 00:00:00 2000 PST + Wed Jan 19 00:00:00 2000 PST | Wed Jan 19 00:00:00 2000 PST +(19 rows) + +DEALLOCATE param_prep; +-- test continuous aggs +SET client_min_messages TO error; +CREATE MATERIALIZED VIEW cagg_test WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) AS +SELECT time_bucket ('1d', time) AS time, + device_id, + avg(v1) +FROM :TEST_TABLE +WHERE device_id = 1 +GROUP BY 1, + 2 WITH DATA; +SELECT time +FROM cagg_test +ORDER BY time +LIMIT 1; + time +------------------------------ + Fri Dec 31 16:00:00 1999 PST +(1 row) + +DROP MATERIALIZED VIEW cagg_test; +RESET client_min_messages; +--github issue 1558. nested loop with index scan needed +--disables parallel scan +SET enable_seqscan = FALSE; +SET enable_bitmapscan = FALSE; +SET max_parallel_workers_per_gather = 0; +SET enable_hashjoin = FALSE; +SET enable_mergejoin = FALSE; +:PREFIX +SELECT * +FROM metrics, + metrics_space +WHERE metrics.time > metrics_space.time + AND metrics.device_id = metrics_space.device_id + AND metrics.time < metrics_space.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------ + Nested Loop (actual rows=0 loops=1) + -> Append (actual rows=6840 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=360 loops=1) + -> Index Scan using compress_hyper_6_17_chunk_c_space_index_2 on compress_hyper_6_17_chunk (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=1080 loops=1) + -> Index Scan using compress_hyper_6_18_chunk_c_space_index_2 on compress_hyper_6_18_chunk (actual rows=3 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=360 loops=1) + -> Index Scan using compress_hyper_6_19_chunk_c_space_index_2 on compress_hyper_6_19_chunk (actual rows=1 loops=1) + -> Index Scan Backward using _hyper_2_7_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _hyper_2_7_chunk (actual rows=504 loops=1) + -> Index Scan Backward using _hyper_2_8_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _hyper_2_8_chunk (actual rows=1512 loops=1) + -> Index Scan Backward using _hyper_2_9_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _hyper_2_9_chunk (actual rows=504 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=504 loops=1) + -> Index Scan using compress_hyper_6_20_chunk_c_space_index_2 on compress_hyper_6_20_chunk (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (actual rows=1512 loops=1) + -> Index Scan using compress_hyper_6_21_chunk_c_space_index_2 on compress_hyper_6_21_chunk (actual rows=3 loops=1) + -> Index Scan Backward using _hyper_2_12_chunk_metrics_space_device_id_device_id_peer_v0_v_2 on _hyper_2_12_chunk (actual rows=504 loops=1) + -> Append (actual rows=0 loops=6840) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=0 loops=6840) + Filter: (("time" > _hyper_2_4_chunk."time") AND ("time" < _hyper_2_4_chunk."time")) + Rows Removed by Filter: 360 + -> Index Scan using compress_hyper_5_15_chunk_c_index_2 on compress_hyper_5_15_chunk (actual rows=1 loops=6840) + Index Cond: (device_id = _hyper_2_4_chunk.device_id) + -> Index Scan using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk (actual rows=0 loops=6840) + Index Cond: (("time" > _hyper_2_4_chunk."time") AND ("time" < _hyper_2_4_chunk."time")) + Filter: (_hyper_2_4_chunk.device_id = device_id) + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=0 loops=6840) + Filter: (("time" > _hyper_2_4_chunk."time") AND ("time" < _hyper_2_4_chunk."time")) + Rows Removed by Filter: 504 + -> Index Scan using compress_hyper_5_16_chunk_c_index_2 on compress_hyper_5_16_chunk (actual rows=1 loops=6840) + Index Cond: (device_id = _hyper_2_4_chunk.device_id) +(30 rows) + +SET enable_seqscan = TRUE; +SET enable_bitmapscan = TRUE; +SET max_parallel_workers_per_gather = 0; +SET enable_hashjoin = TRUE; +SET enable_mergejoin = TRUE; +---end github issue 1558 +\ir include/transparent_decompression_ordered.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +SET work_mem TO '50MB'; +-- for ordered append tests on compressed chunks we need a hypertable with time as compress_orderby column +CREATE TABLE metrics_ordered(time timestamptz NOT NULL, device_id int, device_id_peer int, v0 int, v1 int, v2 float, v3 float); +SELECT create_hypertable('metrics_ordered','time'); + create_hypertable +------------------------------- + (11,public,metrics_ordered,t) +(1 row) + +ALTER TABLE metrics_ordered SET (timescaledb.compress, timescaledb.compress_orderby='time DESC',timescaledb.compress_segmentby='device_id,device_id_peer'); +INSERT INTO metrics_ordered SELECT * FROM metrics; +CREATE INDEX ON metrics_ordered(device_id,device_id_peer,time); +CREATE INDEX ON metrics_ordered(device_id,time); +CREATE INDEX ON metrics_ordered(device_id_peer,time); +-- compress all chunks +SELECT + compress_chunk(c.schema_name || '.' || c.table_name) +FROM _timescaledb_catalog.chunk c + INNER JOIN _timescaledb_catalog.hypertable ht ON c.hypertable_id=ht.id +WHERE ht.table_name = 'metrics_ordered' +ORDER BY c.id; + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_11_26_chunk + _timescaledb_internal._hyper_11_27_chunk + _timescaledb_internal._hyper_11_28_chunk +(3 rows) + +-- reindexing compressed hypertable to update statistics +DO +$$ +DECLARE + hyper_id int; +BEGIN + SELECT h.compressed_hypertable_id + INTO hyper_id + FROM _timescaledb_catalog.hypertable h + WHERE h.table_name = 'metrics_ordered'; + EXECUTE format('REINDEX TABLE _timescaledb_internal._compressed_hypertable_%s', + hyper_id); +END; +$$; +-- should not have ordered DecompressChunk path because segmentby columns are not part of pathkeys +:PREFIX SELECT * FROM metrics_ordered ORDER BY time DESC LIMIT 10; + QUERY PLAN +------------------------------------------------------------------------------------------ + Limit (actual rows=10 loops=1) + -> Custom Scan (ChunkAppend) on metrics_ordered (actual rows=10 loops=1) + Order: metrics_ordered."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_11_28_chunk (actual rows=10 loops=1) + -> Sort (actual rows=5 loops=1) + Sort Key: compress_hyper_12_31_chunk._ts_meta_max_1 DESC + Sort Method: quicksort + -> Seq Scan on compress_hyper_12_31_chunk (actual rows=5 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_11_27_chunk (never executed) + -> Sort (never executed) + Sort Key: compress_hyper_12_30_chunk._ts_meta_max_1 DESC + -> Seq Scan on compress_hyper_12_30_chunk (never executed) + -> Custom Scan (DecompressChunk) on _hyper_11_26_chunk (never executed) + -> Sort (never executed) + Sort Key: compress_hyper_12_29_chunk._ts_meta_max_1 DESC + -> Seq Scan on compress_hyper_12_29_chunk (never executed) +(16 rows) + +-- should have ordered DecompressChunk path because segmentby columns have equality constraints +:PREFIX SELECT * FROM metrics_ordered WHERE device_id = 1 AND device_id_peer = 3 ORDER BY time DESC LIMIT 10; + QUERY PLAN +----------------------------------------------------------------------------------------- + Limit (actual rows=0 loops=1) + -> Custom Scan (ChunkAppend) on metrics_ordered (actual rows=0 loops=1) + Order: metrics_ordered."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_11_28_chunk (actual rows=0 loops=1) + -> Sort (actual rows=0 loops=1) + Sort Key: compress_hyper_12_31_chunk._ts_meta_sequence_num + Sort Method: quicksort + -> Seq Scan on compress_hyper_12_31_chunk (actual rows=0 loops=1) + Filter: ((device_id = 1) AND (device_id_peer = 3)) + Rows Removed by Filter: 5 + -> Custom Scan (DecompressChunk) on _hyper_11_27_chunk (actual rows=0 loops=1) + -> Sort (actual rows=0 loops=1) + Sort Key: compress_hyper_12_30_chunk._ts_meta_sequence_num + Sort Method: quicksort + -> Seq Scan on compress_hyper_12_30_chunk (actual rows=0 loops=1) + Filter: ((device_id = 1) AND (device_id_peer = 3)) + Rows Removed by Filter: 5 + -> Custom Scan (DecompressChunk) on _hyper_11_26_chunk (actual rows=0 loops=1) + -> Sort (actual rows=0 loops=1) + Sort Key: compress_hyper_12_29_chunk._ts_meta_sequence_num + Sort Method: quicksort + -> Seq Scan on compress_hyper_12_29_chunk (actual rows=0 loops=1) + Filter: ((device_id = 1) AND (device_id_peer = 3)) + Rows Removed by Filter: 5 +(24 rows) + +:PREFIX SELECT DISTINCT ON (d.device_id) * FROM metrics_ordered d INNER JOIN LATERAL (SELECT * FROM metrics_ordered m WHERE m.device_id=d.device_id AND m.device_id_peer = 3 ORDER BY time DESC LIMIT 1 ) m ON true; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------- + Unique (actual rows=0 loops=1) + -> Nested Loop (actual rows=0 loops=1) + -> Merge Append (actual rows=6840 loops=1) + Sort Key: d_1.device_id + -> Custom Scan (DecompressChunk) on _hyper_11_26_chunk d_1 (actual rows=1800 loops=1) + -> Index Scan using compress_hyper_12_29_chunk__compressed_hypertable_12_device_id_ on compress_hyper_12_29_chunk (actual rows=5 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_11_27_chunk d_2 (actual rows=2520 loops=1) + -> Index Scan using compress_hyper_12_30_chunk__compressed_hypertable_12_device_id_ on compress_hyper_12_30_chunk (actual rows=5 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_11_28_chunk d_3 (actual rows=2520 loops=1) + -> Index Scan using compress_hyper_12_31_chunk__compressed_hypertable_12_device_id_ on compress_hyper_12_31_chunk (actual rows=5 loops=1) + -> Limit (actual rows=0 loops=6840) + -> Custom Scan (ChunkAppend) on metrics_ordered m (actual rows=0 loops=6840) + Order: m."time" DESC + Hypertables excluded during runtime: 0 + -> Custom Scan (DecompressChunk) on _hyper_11_28_chunk m_1 (actual rows=0 loops=6840) + -> Sort (actual rows=0 loops=6840) + Sort Key: compress_hyper_12_31_chunk_1._ts_meta_sequence_num + Sort Method: quicksort + -> Seq Scan on compress_hyper_12_31_chunk compress_hyper_12_31_chunk_1 (actual rows=0 loops=6840) + Filter: ((device_id = d_1.device_id) AND (device_id_peer = 3)) + Rows Removed by Filter: 5 + -> Custom Scan (DecompressChunk) on _hyper_11_27_chunk m_2 (actual rows=0 loops=6840) + -> Sort (actual rows=0 loops=6840) + Sort Key: compress_hyper_12_30_chunk_1._ts_meta_sequence_num + Sort Method: quicksort + -> Seq Scan on compress_hyper_12_30_chunk compress_hyper_12_30_chunk_1 (actual rows=0 loops=6840) + Filter: ((device_id = d_1.device_id) AND (device_id_peer = 3)) + Rows Removed by Filter: 5 + -> Custom Scan (DecompressChunk) on _hyper_11_26_chunk m_3 (actual rows=0 loops=6840) + -> Sort (actual rows=0 loops=6840) + Sort Key: compress_hyper_12_29_chunk_1._ts_meta_sequence_num + Sort Method: quicksort + -> Seq Scan on compress_hyper_12_29_chunk compress_hyper_12_29_chunk_1 (actual rows=0 loops=6840) + Filter: ((device_id = d_1.device_id) AND (device_id_peer = 3)) + Rows Removed by Filter: 5 +(35 rows) + +\ir include/transparent_decompression_systemcolumns.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\set TEST_TABLE 'metrics' +-- test system columns +-- all system columns except for tableoid should error +\set ON_ERROR_STOP 0 +SELECT xmin FROM :TEST_TABLE ORDER BY time; +psql:include/transparent_decompression_systemcolumns.sql:10: ERROR: transparent decompression only supports tableoid system column +SELECT cmin FROM :TEST_TABLE ORDER BY time; +psql:include/transparent_decompression_systemcolumns.sql:11: ERROR: transparent decompression only supports tableoid system column +SELECT xmax FROM :TEST_TABLE ORDER BY time; +psql:include/transparent_decompression_systemcolumns.sql:12: ERROR: transparent decompression only supports tableoid system column +SELECT cmax FROM :TEST_TABLE ORDER BY time; +psql:include/transparent_decompression_systemcolumns.sql:13: ERROR: transparent decompression only supports tableoid system column +SELECT ctid FROM :TEST_TABLE ORDER BY time; +psql:include/transparent_decompression_systemcolumns.sql:14: ERROR: transparent decompression only supports tableoid system column +-- test system columns in WHERE and ORDER BY clause +SELECT tableoid, xmin FROM :TEST_TABLE ORDER BY time; +psql:include/transparent_decompression_systemcolumns.sql:17: ERROR: transparent decompression only supports tableoid system column +SELECT FROM :TEST_TABLE ORDER BY cmin::text; +psql:include/transparent_decompression_systemcolumns.sql:18: ERROR: transparent decompression only supports tableoid system column +SELECT FROM :TEST_TABLE WHERE cmin IS NOT NULL; +psql:include/transparent_decompression_systemcolumns.sql:19: ERROR: transparent decompression only supports tableoid system column +\set ON_ERROR_STOP 1 +-- test tableoid in different parts of query +SELECT pg_typeof(tableoid) FROM :TEST_TABLE ORDER BY time LIMIT 1; + pg_typeof +----------- + oid +(1 row) + +SELECT FROM :TEST_TABLE ORDER BY tableoid LIMIT 1; +-- +(1 row) + +SELECT FROM :TEST_TABLE WHERE tableoid::int > 0 LIMIT 1; +-- +(1 row) + +SELECT tableoid::regclass FROM :TEST_TABLE GROUP BY tableoid ORDER BY 1; + tableoid +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk + _timescaledb_internal._hyper_1_2_chunk + _timescaledb_internal._hyper_1_3_chunk +(3 rows) + +SELECT count(distinct tableoid) FROM :TEST_TABLE WHERE device_id=1 AND time < now(); + count +------- + 3 +(1 row) + +-- test prepared statement +PREPARE tableoid_prep AS SELECT tableoid::regclass FROM :TEST_TABLE WHERE device_id = 1 ORDER BY time LIMIT 1; +:PREFIX EXECUTE tableoid_prep; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=1 loops=1) + -> Result (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics (actual rows=1 loops=1) + Order: metrics."time" + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_1_1_chunk."time" + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=360 loops=1) + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=1 loops=1) + Filter: (device_id = 1) + Rows Removed by Filter: 4 + -> Index Scan Backward using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk (never executed) + Filter: (device_id = 1) + -> Sort (never executed) + Sort Key: _hyper_1_3_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (never executed) + -> Seq Scan on compress_hyper_5_16_chunk (never executed) + Filter: (device_id = 1) +(18 rows) + +EXECUTE tableoid_prep; + tableoid +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk +(1 row) + +EXECUTE tableoid_prep; + tableoid +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk +(1 row) + +EXECUTE tableoid_prep; + tableoid +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk +(1 row) + +EXECUTE tableoid_prep; + tableoid +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk +(1 row) + +EXECUTE tableoid_prep; + tableoid +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk +(1 row) + +DEALLOCATE tableoid_prep; +\ir include/transparent_decompression_undiffed.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- test planning regression with many chunks +CREATE TABLE tags(id SERIAL PRIMARY KEY, name TEXT, fleet TEXT); +INSERT INTO tags (name, fleet) VALUES('n1', 'f1'); +CREATE TABLE readings (time timestamptz, tags_id integer, fuel_consumption DOUBLE PRECISION); +CREATE INDEX ON readings(tags_id, "time" DESC); +CREATE INDEX ON readings("time" DESC); +SELECT create_hypertable('readings', 'time', partitioning_column => 'tags_id', number_partitions => 1, chunk_time_interval => 43200000000, create_default_indexes=>false); +psql:include/transparent_decompression_undiffed.sql:12: NOTICE: adding not-null constraint to column "time" + create_hypertable +------------------------ + (13,public,readings,t) +(1 row) + +ALTER TABLE readings SET (timescaledb.compress, timescaledb.compress_segmentby = 'tags_id', timescaledb.compress_orderby = 'time desc'); +INSERT into readings select g, 1, 1.3 from generate_series('2001-03-01 01:01:01', '2003-02-01 01:01:01', '1 day'::interval) g; +SELECT count(compress_chunk(chunk.schema_name|| '.' || chunk.table_name)) +FROM _timescaledb_catalog.chunk chunk +INNER JOIN _timescaledb_catalog.hypertable hypertable ON (chunk.hypertable_id = hypertable.id) +WHERE hypertable.table_name = 'readings' and chunk.compressed_chunk_id IS NULL; + count +------- + 703 +(1 row) + +EXPLAIN (costs off) SELECT t.fleet as fleet, min(r.fuel_consumption) AS avg_fuel_consumption +FROM tags t +INNER JOIN LATERAL(SELECT tags_id, fuel_consumption FROM readings r WHERE r.tags_id = t.id ) r ON true +GROUP BY fleet; + QUERY PLAN +------------------------------------------------------------------------------ + HashAggregate + Group Key: t.fleet + -> Hash Join + Hash Cond: (r_1.tags_id = t.id) + -> Append + -> Custom Scan (DecompressChunk) on _hyper_13_32_chunk r_1 + -> Seq Scan on compress_hyper_14_735_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_33_chunk r_2 + -> Seq Scan on compress_hyper_14_736_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_34_chunk r_3 + -> Seq Scan on compress_hyper_14_737_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_35_chunk r_4 + -> Seq Scan on compress_hyper_14_738_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_36_chunk r_5 + -> Seq Scan on compress_hyper_14_739_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_37_chunk r_6 + -> Seq Scan on compress_hyper_14_740_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_38_chunk r_7 + -> Seq Scan on compress_hyper_14_741_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_39_chunk r_8 + -> Seq Scan on compress_hyper_14_742_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_40_chunk r_9 + -> Seq Scan on compress_hyper_14_743_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_41_chunk r_10 + -> Seq Scan on compress_hyper_14_744_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_42_chunk r_11 + -> Seq Scan on compress_hyper_14_745_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_43_chunk r_12 + -> Seq Scan on compress_hyper_14_746_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_44_chunk r_13 + -> Seq Scan on compress_hyper_14_747_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_45_chunk r_14 + -> Seq Scan on compress_hyper_14_748_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_46_chunk r_15 + -> Seq Scan on compress_hyper_14_749_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_47_chunk r_16 + -> Seq Scan on compress_hyper_14_750_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_48_chunk r_17 + -> Seq Scan on compress_hyper_14_751_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_49_chunk r_18 + -> Seq Scan on compress_hyper_14_752_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_50_chunk r_19 + -> Seq Scan on compress_hyper_14_753_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_51_chunk r_20 + -> Seq Scan on compress_hyper_14_754_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_52_chunk r_21 + -> Seq Scan on compress_hyper_14_755_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_53_chunk r_22 + -> Seq Scan on compress_hyper_14_756_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_54_chunk r_23 + -> Seq Scan on compress_hyper_14_757_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_55_chunk r_24 + -> Seq Scan on compress_hyper_14_758_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_56_chunk r_25 + -> Seq Scan on compress_hyper_14_759_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_57_chunk r_26 + -> Seq Scan on compress_hyper_14_760_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_58_chunk r_27 + -> Seq Scan on compress_hyper_14_761_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_59_chunk r_28 + -> Seq Scan on compress_hyper_14_762_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_60_chunk r_29 + -> Seq Scan on compress_hyper_14_763_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_61_chunk r_30 + -> Seq Scan on compress_hyper_14_764_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_62_chunk r_31 + -> Seq Scan on compress_hyper_14_765_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_63_chunk r_32 + -> Seq Scan on compress_hyper_14_766_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_64_chunk r_33 + -> Seq Scan on compress_hyper_14_767_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_65_chunk r_34 + -> Seq Scan on compress_hyper_14_768_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_66_chunk r_35 + -> Seq Scan on compress_hyper_14_769_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_67_chunk r_36 + -> Seq Scan on compress_hyper_14_770_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_68_chunk r_37 + -> Seq Scan on compress_hyper_14_771_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_69_chunk r_38 + -> Seq Scan on compress_hyper_14_772_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_70_chunk r_39 + -> Seq Scan on compress_hyper_14_773_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_71_chunk r_40 + -> Seq Scan on compress_hyper_14_774_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_72_chunk r_41 + -> Seq Scan on compress_hyper_14_775_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_73_chunk r_42 + -> Seq Scan on compress_hyper_14_776_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_74_chunk r_43 + -> Seq Scan on compress_hyper_14_777_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_75_chunk r_44 + -> Seq Scan on compress_hyper_14_778_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_76_chunk r_45 + -> Seq Scan on compress_hyper_14_779_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_77_chunk r_46 + -> Seq Scan on compress_hyper_14_780_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_78_chunk r_47 + -> Seq Scan on compress_hyper_14_781_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_79_chunk r_48 + -> Seq Scan on compress_hyper_14_782_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_80_chunk r_49 + -> Seq Scan on compress_hyper_14_783_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_81_chunk r_50 + -> Seq Scan on compress_hyper_14_784_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_82_chunk r_51 + -> Seq Scan on compress_hyper_14_785_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_83_chunk r_52 + -> Seq Scan on compress_hyper_14_786_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_84_chunk r_53 + -> Seq Scan on compress_hyper_14_787_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_85_chunk r_54 + -> Seq Scan on compress_hyper_14_788_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_86_chunk r_55 + -> Seq Scan on compress_hyper_14_789_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_87_chunk r_56 + -> Seq Scan on compress_hyper_14_790_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_88_chunk r_57 + -> Seq Scan on compress_hyper_14_791_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_89_chunk r_58 + -> Seq Scan on compress_hyper_14_792_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_90_chunk r_59 + -> Seq Scan on compress_hyper_14_793_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_91_chunk r_60 + -> Seq Scan on compress_hyper_14_794_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_92_chunk r_61 + -> Seq Scan on compress_hyper_14_795_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_93_chunk r_62 + -> Seq Scan on compress_hyper_14_796_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_94_chunk r_63 + -> Seq Scan on compress_hyper_14_797_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_95_chunk r_64 + -> Seq Scan on compress_hyper_14_798_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_96_chunk r_65 + -> Seq Scan on compress_hyper_14_799_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_97_chunk r_66 + -> Seq Scan on compress_hyper_14_800_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_98_chunk r_67 + -> Seq Scan on compress_hyper_14_801_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_99_chunk r_68 + -> Seq Scan on compress_hyper_14_802_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_100_chunk r_69 + -> Seq Scan on compress_hyper_14_803_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_101_chunk r_70 + -> Seq Scan on compress_hyper_14_804_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_102_chunk r_71 + -> Seq Scan on compress_hyper_14_805_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_103_chunk r_72 + -> Seq Scan on compress_hyper_14_806_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_104_chunk r_73 + -> Seq Scan on compress_hyper_14_807_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_105_chunk r_74 + -> Seq Scan on compress_hyper_14_808_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_106_chunk r_75 + -> Seq Scan on compress_hyper_14_809_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_107_chunk r_76 + -> Seq Scan on compress_hyper_14_810_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_108_chunk r_77 + -> Seq Scan on compress_hyper_14_811_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_109_chunk r_78 + -> Seq Scan on compress_hyper_14_812_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_110_chunk r_79 + -> Seq Scan on compress_hyper_14_813_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_111_chunk r_80 + -> Seq Scan on compress_hyper_14_814_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_112_chunk r_81 + -> Seq Scan on compress_hyper_14_815_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_113_chunk r_82 + -> Seq Scan on compress_hyper_14_816_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_114_chunk r_83 + -> Seq Scan on compress_hyper_14_817_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_115_chunk r_84 + -> Seq Scan on compress_hyper_14_818_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_116_chunk r_85 + -> Seq Scan on compress_hyper_14_819_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_117_chunk r_86 + -> Seq Scan on compress_hyper_14_820_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_118_chunk r_87 + -> Seq Scan on compress_hyper_14_821_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_119_chunk r_88 + -> Seq Scan on compress_hyper_14_822_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_120_chunk r_89 + -> Seq Scan on compress_hyper_14_823_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_121_chunk r_90 + -> Seq Scan on compress_hyper_14_824_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_122_chunk r_91 + -> Seq Scan on compress_hyper_14_825_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_123_chunk r_92 + -> Seq Scan on compress_hyper_14_826_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_124_chunk r_93 + -> Seq Scan on compress_hyper_14_827_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_125_chunk r_94 + -> Seq Scan on compress_hyper_14_828_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_126_chunk r_95 + -> Seq Scan on compress_hyper_14_829_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_127_chunk r_96 + -> Seq Scan on compress_hyper_14_830_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_128_chunk r_97 + -> Seq Scan on compress_hyper_14_831_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_129_chunk r_98 + -> Seq Scan on compress_hyper_14_832_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_130_chunk r_99 + -> Seq Scan on compress_hyper_14_833_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_131_chunk r_100 + -> Seq Scan on compress_hyper_14_834_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_132_chunk r_101 + -> Seq Scan on compress_hyper_14_835_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_133_chunk r_102 + -> Seq Scan on compress_hyper_14_836_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_134_chunk r_103 + -> Seq Scan on compress_hyper_14_837_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_135_chunk r_104 + -> Seq Scan on compress_hyper_14_838_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_136_chunk r_105 + -> Seq Scan on compress_hyper_14_839_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_137_chunk r_106 + -> Seq Scan on compress_hyper_14_840_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_138_chunk r_107 + -> Seq Scan on compress_hyper_14_841_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_139_chunk r_108 + -> Seq Scan on compress_hyper_14_842_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_140_chunk r_109 + -> Seq Scan on compress_hyper_14_843_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_141_chunk r_110 + -> Seq Scan on compress_hyper_14_844_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_142_chunk r_111 + -> Seq Scan on compress_hyper_14_845_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_143_chunk r_112 + -> Seq Scan on compress_hyper_14_846_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_144_chunk r_113 + -> Seq Scan on compress_hyper_14_847_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_145_chunk r_114 + -> Seq Scan on compress_hyper_14_848_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_146_chunk r_115 + -> Seq Scan on compress_hyper_14_849_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_147_chunk r_116 + -> Seq Scan on compress_hyper_14_850_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_148_chunk r_117 + -> Seq Scan on compress_hyper_14_851_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_149_chunk r_118 + -> Seq Scan on compress_hyper_14_852_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_150_chunk r_119 + -> Seq Scan on compress_hyper_14_853_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_151_chunk r_120 + -> Seq Scan on compress_hyper_14_854_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_152_chunk r_121 + -> Seq Scan on compress_hyper_14_855_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_153_chunk r_122 + -> Seq Scan on compress_hyper_14_856_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_154_chunk r_123 + -> Seq Scan on compress_hyper_14_857_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_155_chunk r_124 + -> Seq Scan on compress_hyper_14_858_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_156_chunk r_125 + -> Seq Scan on compress_hyper_14_859_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_157_chunk r_126 + -> Seq Scan on compress_hyper_14_860_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_158_chunk r_127 + -> Seq Scan on compress_hyper_14_861_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_159_chunk r_128 + -> Seq Scan on compress_hyper_14_862_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_160_chunk r_129 + -> Seq Scan on compress_hyper_14_863_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_161_chunk r_130 + -> Seq Scan on compress_hyper_14_864_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_162_chunk r_131 + -> Seq Scan on compress_hyper_14_865_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_163_chunk r_132 + -> Seq Scan on compress_hyper_14_866_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_164_chunk r_133 + -> Seq Scan on compress_hyper_14_867_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_165_chunk r_134 + -> Seq Scan on compress_hyper_14_868_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_166_chunk r_135 + -> Seq Scan on compress_hyper_14_869_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_167_chunk r_136 + -> Seq Scan on compress_hyper_14_870_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_168_chunk r_137 + -> Seq Scan on compress_hyper_14_871_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_169_chunk r_138 + -> Seq Scan on compress_hyper_14_872_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_170_chunk r_139 + -> Seq Scan on compress_hyper_14_873_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_171_chunk r_140 + -> Seq Scan on compress_hyper_14_874_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_172_chunk r_141 + -> Seq Scan on compress_hyper_14_875_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_173_chunk r_142 + -> Seq Scan on compress_hyper_14_876_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_174_chunk r_143 + -> Seq Scan on compress_hyper_14_877_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_175_chunk r_144 + -> Seq Scan on compress_hyper_14_878_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_176_chunk r_145 + -> Seq Scan on compress_hyper_14_879_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_177_chunk r_146 + -> Seq Scan on compress_hyper_14_880_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_178_chunk r_147 + -> Seq Scan on compress_hyper_14_881_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_179_chunk r_148 + -> Seq Scan on compress_hyper_14_882_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_180_chunk r_149 + -> Seq Scan on compress_hyper_14_883_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_181_chunk r_150 + -> Seq Scan on compress_hyper_14_884_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_182_chunk r_151 + -> Seq Scan on compress_hyper_14_885_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_183_chunk r_152 + -> Seq Scan on compress_hyper_14_886_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_184_chunk r_153 + -> Seq Scan on compress_hyper_14_887_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_185_chunk r_154 + -> Seq Scan on compress_hyper_14_888_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_186_chunk r_155 + -> Seq Scan on compress_hyper_14_889_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_187_chunk r_156 + -> Seq Scan on compress_hyper_14_890_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_188_chunk r_157 + -> Seq Scan on compress_hyper_14_891_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_189_chunk r_158 + -> Seq Scan on compress_hyper_14_892_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_190_chunk r_159 + -> Seq Scan on compress_hyper_14_893_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_191_chunk r_160 + -> Seq Scan on compress_hyper_14_894_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_192_chunk r_161 + -> Seq Scan on compress_hyper_14_895_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_193_chunk r_162 + -> Seq Scan on compress_hyper_14_896_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_194_chunk r_163 + -> Seq Scan on compress_hyper_14_897_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_195_chunk r_164 + -> Seq Scan on compress_hyper_14_898_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_196_chunk r_165 + -> Seq Scan on compress_hyper_14_899_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_197_chunk r_166 + -> Seq Scan on compress_hyper_14_900_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_198_chunk r_167 + -> Seq Scan on compress_hyper_14_901_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_199_chunk r_168 + -> Seq Scan on compress_hyper_14_902_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_200_chunk r_169 + -> Seq Scan on compress_hyper_14_903_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_201_chunk r_170 + -> Seq Scan on compress_hyper_14_904_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_202_chunk r_171 + -> Seq Scan on compress_hyper_14_905_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_203_chunk r_172 + -> Seq Scan on compress_hyper_14_906_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_204_chunk r_173 + -> Seq Scan on compress_hyper_14_907_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_205_chunk r_174 + -> Seq Scan on compress_hyper_14_908_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_206_chunk r_175 + -> Seq Scan on compress_hyper_14_909_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_207_chunk r_176 + -> Seq Scan on compress_hyper_14_910_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_208_chunk r_177 + -> Seq Scan on compress_hyper_14_911_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_209_chunk r_178 + -> Seq Scan on compress_hyper_14_912_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_210_chunk r_179 + -> Seq Scan on compress_hyper_14_913_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_211_chunk r_180 + -> Seq Scan on compress_hyper_14_914_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_212_chunk r_181 + -> Seq Scan on compress_hyper_14_915_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_213_chunk r_182 + -> Seq Scan on compress_hyper_14_916_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_214_chunk r_183 + -> Seq Scan on compress_hyper_14_917_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_215_chunk r_184 + -> Seq Scan on compress_hyper_14_918_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_216_chunk r_185 + -> Seq Scan on compress_hyper_14_919_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_217_chunk r_186 + -> Seq Scan on compress_hyper_14_920_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_218_chunk r_187 + -> Seq Scan on compress_hyper_14_921_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_219_chunk r_188 + -> Seq Scan on compress_hyper_14_922_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_220_chunk r_189 + -> Seq Scan on compress_hyper_14_923_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_221_chunk r_190 + -> Seq Scan on compress_hyper_14_924_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_222_chunk r_191 + -> Seq Scan on compress_hyper_14_925_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_223_chunk r_192 + -> Seq Scan on compress_hyper_14_926_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_224_chunk r_193 + -> Seq Scan on compress_hyper_14_927_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_225_chunk r_194 + -> Seq Scan on compress_hyper_14_928_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_226_chunk r_195 + -> Seq Scan on compress_hyper_14_929_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_227_chunk r_196 + -> Seq Scan on compress_hyper_14_930_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_228_chunk r_197 + -> Seq Scan on compress_hyper_14_931_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_229_chunk r_198 + -> Seq Scan on compress_hyper_14_932_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_230_chunk r_199 + -> Seq Scan on compress_hyper_14_933_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_231_chunk r_200 + -> Seq Scan on compress_hyper_14_934_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_232_chunk r_201 + -> Seq Scan on compress_hyper_14_935_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_233_chunk r_202 + -> Seq Scan on compress_hyper_14_936_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_234_chunk r_203 + -> Seq Scan on compress_hyper_14_937_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_235_chunk r_204 + -> Seq Scan on compress_hyper_14_938_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_236_chunk r_205 + -> Seq Scan on compress_hyper_14_939_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_237_chunk r_206 + -> Seq Scan on compress_hyper_14_940_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_238_chunk r_207 + -> Seq Scan on compress_hyper_14_941_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_239_chunk r_208 + -> Seq Scan on compress_hyper_14_942_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_240_chunk r_209 + -> Seq Scan on compress_hyper_14_943_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_241_chunk r_210 + -> Seq Scan on compress_hyper_14_944_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_242_chunk r_211 + -> Seq Scan on compress_hyper_14_945_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_243_chunk r_212 + -> Seq Scan on compress_hyper_14_946_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_244_chunk r_213 + -> Seq Scan on compress_hyper_14_947_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_245_chunk r_214 + -> Seq Scan on compress_hyper_14_948_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_246_chunk r_215 + -> Seq Scan on compress_hyper_14_949_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_247_chunk r_216 + -> Seq Scan on compress_hyper_14_950_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_248_chunk r_217 + -> Seq Scan on compress_hyper_14_951_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_249_chunk r_218 + -> Seq Scan on compress_hyper_14_952_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_250_chunk r_219 + -> Seq Scan on compress_hyper_14_953_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_251_chunk r_220 + -> Seq Scan on compress_hyper_14_954_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_252_chunk r_221 + -> Seq Scan on compress_hyper_14_955_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_253_chunk r_222 + -> Seq Scan on compress_hyper_14_956_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_254_chunk r_223 + -> Seq Scan on compress_hyper_14_957_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_255_chunk r_224 + -> Seq Scan on compress_hyper_14_958_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_256_chunk r_225 + -> Seq Scan on compress_hyper_14_959_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_257_chunk r_226 + -> Seq Scan on compress_hyper_14_960_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_258_chunk r_227 + -> Seq Scan on compress_hyper_14_961_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_259_chunk r_228 + -> Seq Scan on compress_hyper_14_962_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_260_chunk r_229 + -> Seq Scan on compress_hyper_14_963_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_261_chunk r_230 + -> Seq Scan on compress_hyper_14_964_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_262_chunk r_231 + -> Seq Scan on compress_hyper_14_965_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_263_chunk r_232 + -> Seq Scan on compress_hyper_14_966_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_264_chunk r_233 + -> Seq Scan on compress_hyper_14_967_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_265_chunk r_234 + -> Seq Scan on compress_hyper_14_968_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_266_chunk r_235 + -> Seq Scan on compress_hyper_14_969_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_267_chunk r_236 + -> Seq Scan on compress_hyper_14_970_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_268_chunk r_237 + -> Seq Scan on compress_hyper_14_971_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_269_chunk r_238 + -> Seq Scan on compress_hyper_14_972_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_270_chunk r_239 + -> Seq Scan on compress_hyper_14_973_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_271_chunk r_240 + -> Seq Scan on compress_hyper_14_974_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_272_chunk r_241 + -> Seq Scan on compress_hyper_14_975_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_273_chunk r_242 + -> Seq Scan on compress_hyper_14_976_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_274_chunk r_243 + -> Seq Scan on compress_hyper_14_977_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_275_chunk r_244 + -> Seq Scan on compress_hyper_14_978_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_276_chunk r_245 + -> Seq Scan on compress_hyper_14_979_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_277_chunk r_246 + -> Seq Scan on compress_hyper_14_980_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_278_chunk r_247 + -> Seq Scan on compress_hyper_14_981_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_279_chunk r_248 + -> Seq Scan on compress_hyper_14_982_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_280_chunk r_249 + -> Seq Scan on compress_hyper_14_983_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_281_chunk r_250 + -> Seq Scan on compress_hyper_14_984_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_282_chunk r_251 + -> Seq Scan on compress_hyper_14_985_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_283_chunk r_252 + -> Seq Scan on compress_hyper_14_986_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_284_chunk r_253 + -> Seq Scan on compress_hyper_14_987_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_285_chunk r_254 + -> Seq Scan on compress_hyper_14_988_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_286_chunk r_255 + -> Seq Scan on compress_hyper_14_989_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_287_chunk r_256 + -> Seq Scan on compress_hyper_14_990_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_288_chunk r_257 + -> Seq Scan on compress_hyper_14_991_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_289_chunk r_258 + -> Seq Scan on compress_hyper_14_992_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_290_chunk r_259 + -> Seq Scan on compress_hyper_14_993_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_291_chunk r_260 + -> Seq Scan on compress_hyper_14_994_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_292_chunk r_261 + -> Seq Scan on compress_hyper_14_995_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_293_chunk r_262 + -> Seq Scan on compress_hyper_14_996_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_294_chunk r_263 + -> Seq Scan on compress_hyper_14_997_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_295_chunk r_264 + -> Seq Scan on compress_hyper_14_998_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_296_chunk r_265 + -> Seq Scan on compress_hyper_14_999_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_297_chunk r_266 + -> Seq Scan on compress_hyper_14_1000_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_298_chunk r_267 + -> Seq Scan on compress_hyper_14_1001_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_299_chunk r_268 + -> Seq Scan on compress_hyper_14_1002_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_300_chunk r_269 + -> Seq Scan on compress_hyper_14_1003_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_301_chunk r_270 + -> Seq Scan on compress_hyper_14_1004_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_302_chunk r_271 + -> Seq Scan on compress_hyper_14_1005_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_303_chunk r_272 + -> Seq Scan on compress_hyper_14_1006_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_304_chunk r_273 + -> Seq Scan on compress_hyper_14_1007_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_305_chunk r_274 + -> Seq Scan on compress_hyper_14_1008_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_306_chunk r_275 + -> Seq Scan on compress_hyper_14_1009_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_307_chunk r_276 + -> Seq Scan on compress_hyper_14_1010_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_308_chunk r_277 + -> Seq Scan on compress_hyper_14_1011_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_309_chunk r_278 + -> Seq Scan on compress_hyper_14_1012_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_310_chunk r_279 + -> Seq Scan on compress_hyper_14_1013_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_311_chunk r_280 + -> Seq Scan on compress_hyper_14_1014_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_312_chunk r_281 + -> Seq Scan on compress_hyper_14_1015_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_313_chunk r_282 + -> Seq Scan on compress_hyper_14_1016_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_314_chunk r_283 + -> Seq Scan on compress_hyper_14_1017_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_315_chunk r_284 + -> Seq Scan on compress_hyper_14_1018_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_316_chunk r_285 + -> Seq Scan on compress_hyper_14_1019_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_317_chunk r_286 + -> Seq Scan on compress_hyper_14_1020_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_318_chunk r_287 + -> Seq Scan on compress_hyper_14_1021_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_319_chunk r_288 + -> Seq Scan on compress_hyper_14_1022_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_320_chunk r_289 + -> Seq Scan on compress_hyper_14_1023_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_321_chunk r_290 + -> Seq Scan on compress_hyper_14_1024_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_322_chunk r_291 + -> Seq Scan on compress_hyper_14_1025_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_323_chunk r_292 + -> Seq Scan on compress_hyper_14_1026_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_324_chunk r_293 + -> Seq Scan on compress_hyper_14_1027_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_325_chunk r_294 + -> Seq Scan on compress_hyper_14_1028_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_326_chunk r_295 + -> Seq Scan on compress_hyper_14_1029_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_327_chunk r_296 + -> Seq Scan on compress_hyper_14_1030_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_328_chunk r_297 + -> Seq Scan on compress_hyper_14_1031_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_329_chunk r_298 + -> Seq Scan on compress_hyper_14_1032_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_330_chunk r_299 + -> Seq Scan on compress_hyper_14_1033_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_331_chunk r_300 + -> Seq Scan on compress_hyper_14_1034_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_332_chunk r_301 + -> Seq Scan on compress_hyper_14_1035_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_333_chunk r_302 + -> Seq Scan on compress_hyper_14_1036_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_334_chunk r_303 + -> Seq Scan on compress_hyper_14_1037_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_335_chunk r_304 + -> Seq Scan on compress_hyper_14_1038_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_336_chunk r_305 + -> Seq Scan on compress_hyper_14_1039_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_337_chunk r_306 + -> Seq Scan on compress_hyper_14_1040_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_338_chunk r_307 + -> Seq Scan on compress_hyper_14_1041_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_339_chunk r_308 + -> Seq Scan on compress_hyper_14_1042_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_340_chunk r_309 + -> Seq Scan on compress_hyper_14_1043_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_341_chunk r_310 + -> Seq Scan on compress_hyper_14_1044_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_342_chunk r_311 + -> Seq Scan on compress_hyper_14_1045_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_343_chunk r_312 + -> Seq Scan on compress_hyper_14_1046_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_344_chunk r_313 + -> Seq Scan on compress_hyper_14_1047_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_345_chunk r_314 + -> Seq Scan on compress_hyper_14_1048_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_346_chunk r_315 + -> Seq Scan on compress_hyper_14_1049_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_347_chunk r_316 + -> Seq Scan on compress_hyper_14_1050_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_348_chunk r_317 + -> Seq Scan on compress_hyper_14_1051_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_349_chunk r_318 + -> Seq Scan on compress_hyper_14_1052_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_350_chunk r_319 + -> Seq Scan on compress_hyper_14_1053_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_351_chunk r_320 + -> Seq Scan on compress_hyper_14_1054_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_352_chunk r_321 + -> Seq Scan on compress_hyper_14_1055_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_353_chunk r_322 + -> Seq Scan on compress_hyper_14_1056_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_354_chunk r_323 + -> Seq Scan on compress_hyper_14_1057_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_355_chunk r_324 + -> Seq Scan on compress_hyper_14_1058_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_356_chunk r_325 + -> Seq Scan on compress_hyper_14_1059_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_357_chunk r_326 + -> Seq Scan on compress_hyper_14_1060_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_358_chunk r_327 + -> Seq Scan on compress_hyper_14_1061_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_359_chunk r_328 + -> Seq Scan on compress_hyper_14_1062_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_360_chunk r_329 + -> Seq Scan on compress_hyper_14_1063_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_361_chunk r_330 + -> Seq Scan on compress_hyper_14_1064_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_362_chunk r_331 + -> Seq Scan on compress_hyper_14_1065_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_363_chunk r_332 + -> Seq Scan on compress_hyper_14_1066_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_364_chunk r_333 + -> Seq Scan on compress_hyper_14_1067_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_365_chunk r_334 + -> Seq Scan on compress_hyper_14_1068_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_366_chunk r_335 + -> Seq Scan on compress_hyper_14_1069_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_367_chunk r_336 + -> Seq Scan on compress_hyper_14_1070_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_368_chunk r_337 + -> Seq Scan on compress_hyper_14_1071_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_369_chunk r_338 + -> Seq Scan on compress_hyper_14_1072_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_370_chunk r_339 + -> Seq Scan on compress_hyper_14_1073_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_371_chunk r_340 + -> Seq Scan on compress_hyper_14_1074_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_372_chunk r_341 + -> Seq Scan on compress_hyper_14_1075_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_373_chunk r_342 + -> Seq Scan on compress_hyper_14_1076_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_374_chunk r_343 + -> Seq Scan on compress_hyper_14_1077_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_375_chunk r_344 + -> Seq Scan on compress_hyper_14_1078_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_376_chunk r_345 + -> Seq Scan on compress_hyper_14_1079_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_377_chunk r_346 + -> Seq Scan on compress_hyper_14_1080_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_378_chunk r_347 + -> Seq Scan on compress_hyper_14_1081_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_379_chunk r_348 + -> Seq Scan on compress_hyper_14_1082_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_380_chunk r_349 + -> Seq Scan on compress_hyper_14_1083_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_381_chunk r_350 + -> Seq Scan on compress_hyper_14_1084_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_382_chunk r_351 + -> Seq Scan on compress_hyper_14_1085_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_383_chunk r_352 + -> Seq Scan on compress_hyper_14_1086_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_384_chunk r_353 + -> Seq Scan on compress_hyper_14_1087_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_385_chunk r_354 + -> Seq Scan on compress_hyper_14_1088_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_386_chunk r_355 + -> Seq Scan on compress_hyper_14_1089_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_387_chunk r_356 + -> Seq Scan on compress_hyper_14_1090_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_388_chunk r_357 + -> Seq Scan on compress_hyper_14_1091_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_389_chunk r_358 + -> Seq Scan on compress_hyper_14_1092_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_390_chunk r_359 + -> Seq Scan on compress_hyper_14_1093_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_391_chunk r_360 + -> Seq Scan on compress_hyper_14_1094_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_392_chunk r_361 + -> Seq Scan on compress_hyper_14_1095_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_393_chunk r_362 + -> Seq Scan on compress_hyper_14_1096_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_394_chunk r_363 + -> Seq Scan on compress_hyper_14_1097_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_395_chunk r_364 + -> Seq Scan on compress_hyper_14_1098_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_396_chunk r_365 + -> Seq Scan on compress_hyper_14_1099_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_397_chunk r_366 + -> Seq Scan on compress_hyper_14_1100_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_398_chunk r_367 + -> Seq Scan on compress_hyper_14_1101_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_399_chunk r_368 + -> Seq Scan on compress_hyper_14_1102_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_400_chunk r_369 + -> Seq Scan on compress_hyper_14_1103_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_401_chunk r_370 + -> Seq Scan on compress_hyper_14_1104_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_402_chunk r_371 + -> Seq Scan on compress_hyper_14_1105_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_403_chunk r_372 + -> Seq Scan on compress_hyper_14_1106_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_404_chunk r_373 + -> Seq Scan on compress_hyper_14_1107_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_405_chunk r_374 + -> Seq Scan on compress_hyper_14_1108_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_406_chunk r_375 + -> Seq Scan on compress_hyper_14_1109_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_407_chunk r_376 + -> Seq Scan on compress_hyper_14_1110_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_408_chunk r_377 + -> Seq Scan on compress_hyper_14_1111_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_409_chunk r_378 + -> Seq Scan on compress_hyper_14_1112_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_410_chunk r_379 + -> Seq Scan on compress_hyper_14_1113_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_411_chunk r_380 + -> Seq Scan on compress_hyper_14_1114_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_412_chunk r_381 + -> Seq Scan on compress_hyper_14_1115_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_413_chunk r_382 + -> Seq Scan on compress_hyper_14_1116_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_414_chunk r_383 + -> Seq Scan on compress_hyper_14_1117_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_415_chunk r_384 + -> Seq Scan on compress_hyper_14_1118_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_416_chunk r_385 + -> Seq Scan on compress_hyper_14_1119_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_417_chunk r_386 + -> Seq Scan on compress_hyper_14_1120_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_418_chunk r_387 + -> Seq Scan on compress_hyper_14_1121_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_419_chunk r_388 + -> Seq Scan on compress_hyper_14_1122_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_420_chunk r_389 + -> Seq Scan on compress_hyper_14_1123_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_421_chunk r_390 + -> Seq Scan on compress_hyper_14_1124_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_422_chunk r_391 + -> Seq Scan on compress_hyper_14_1125_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_423_chunk r_392 + -> Seq Scan on compress_hyper_14_1126_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_424_chunk r_393 + -> Seq Scan on compress_hyper_14_1127_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_425_chunk r_394 + -> Seq Scan on compress_hyper_14_1128_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_426_chunk r_395 + -> Seq Scan on compress_hyper_14_1129_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_427_chunk r_396 + -> Seq Scan on compress_hyper_14_1130_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_428_chunk r_397 + -> Seq Scan on compress_hyper_14_1131_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_429_chunk r_398 + -> Seq Scan on compress_hyper_14_1132_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_430_chunk r_399 + -> Seq Scan on compress_hyper_14_1133_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_431_chunk r_400 + -> Seq Scan on compress_hyper_14_1134_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_432_chunk r_401 + -> Seq Scan on compress_hyper_14_1135_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_433_chunk r_402 + -> Seq Scan on compress_hyper_14_1136_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_434_chunk r_403 + -> Seq Scan on compress_hyper_14_1137_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_435_chunk r_404 + -> Seq Scan on compress_hyper_14_1138_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_436_chunk r_405 + -> Seq Scan on compress_hyper_14_1139_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_437_chunk r_406 + -> Seq Scan on compress_hyper_14_1140_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_438_chunk r_407 + -> Seq Scan on compress_hyper_14_1141_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_439_chunk r_408 + -> Seq Scan on compress_hyper_14_1142_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_440_chunk r_409 + -> Seq Scan on compress_hyper_14_1143_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_441_chunk r_410 + -> Seq Scan on compress_hyper_14_1144_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_442_chunk r_411 + -> Seq Scan on compress_hyper_14_1145_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_443_chunk r_412 + -> Seq Scan on compress_hyper_14_1146_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_444_chunk r_413 + -> Seq Scan on compress_hyper_14_1147_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_445_chunk r_414 + -> Seq Scan on compress_hyper_14_1148_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_446_chunk r_415 + -> Seq Scan on compress_hyper_14_1149_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_447_chunk r_416 + -> Seq Scan on compress_hyper_14_1150_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_448_chunk r_417 + -> Seq Scan on compress_hyper_14_1151_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_449_chunk r_418 + -> Seq Scan on compress_hyper_14_1152_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_450_chunk r_419 + -> Seq Scan on compress_hyper_14_1153_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_451_chunk r_420 + -> Seq Scan on compress_hyper_14_1154_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_452_chunk r_421 + -> Seq Scan on compress_hyper_14_1155_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_453_chunk r_422 + -> Seq Scan on compress_hyper_14_1156_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_454_chunk r_423 + -> Seq Scan on compress_hyper_14_1157_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_455_chunk r_424 + -> Seq Scan on compress_hyper_14_1158_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_456_chunk r_425 + -> Seq Scan on compress_hyper_14_1159_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_457_chunk r_426 + -> Seq Scan on compress_hyper_14_1160_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_458_chunk r_427 + -> Seq Scan on compress_hyper_14_1161_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_459_chunk r_428 + -> Seq Scan on compress_hyper_14_1162_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_460_chunk r_429 + -> Seq Scan on compress_hyper_14_1163_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_461_chunk r_430 + -> Seq Scan on compress_hyper_14_1164_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_462_chunk r_431 + -> Seq Scan on compress_hyper_14_1165_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_463_chunk r_432 + -> Seq Scan on compress_hyper_14_1166_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_464_chunk r_433 + -> Seq Scan on compress_hyper_14_1167_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_465_chunk r_434 + -> Seq Scan on compress_hyper_14_1168_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_466_chunk r_435 + -> Seq Scan on compress_hyper_14_1169_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_467_chunk r_436 + -> Seq Scan on compress_hyper_14_1170_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_468_chunk r_437 + -> Seq Scan on compress_hyper_14_1171_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_469_chunk r_438 + -> Seq Scan on compress_hyper_14_1172_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_470_chunk r_439 + -> Seq Scan on compress_hyper_14_1173_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_471_chunk r_440 + -> Seq Scan on compress_hyper_14_1174_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_472_chunk r_441 + -> Seq Scan on compress_hyper_14_1175_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_473_chunk r_442 + -> Seq Scan on compress_hyper_14_1176_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_474_chunk r_443 + -> Seq Scan on compress_hyper_14_1177_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_475_chunk r_444 + -> Seq Scan on compress_hyper_14_1178_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_476_chunk r_445 + -> Seq Scan on compress_hyper_14_1179_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_477_chunk r_446 + -> Seq Scan on compress_hyper_14_1180_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_478_chunk r_447 + -> Seq Scan on compress_hyper_14_1181_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_479_chunk r_448 + -> Seq Scan on compress_hyper_14_1182_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_480_chunk r_449 + -> Seq Scan on compress_hyper_14_1183_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_481_chunk r_450 + -> Seq Scan on compress_hyper_14_1184_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_482_chunk r_451 + -> Seq Scan on compress_hyper_14_1185_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_483_chunk r_452 + -> Seq Scan on compress_hyper_14_1186_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_484_chunk r_453 + -> Seq Scan on compress_hyper_14_1187_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_485_chunk r_454 + -> Seq Scan on compress_hyper_14_1188_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_486_chunk r_455 + -> Seq Scan on compress_hyper_14_1189_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_487_chunk r_456 + -> Seq Scan on compress_hyper_14_1190_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_488_chunk r_457 + -> Seq Scan on compress_hyper_14_1191_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_489_chunk r_458 + -> Seq Scan on compress_hyper_14_1192_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_490_chunk r_459 + -> Seq Scan on compress_hyper_14_1193_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_491_chunk r_460 + -> Seq Scan on compress_hyper_14_1194_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_492_chunk r_461 + -> Seq Scan on compress_hyper_14_1195_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_493_chunk r_462 + -> Seq Scan on compress_hyper_14_1196_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_494_chunk r_463 + -> Seq Scan on compress_hyper_14_1197_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_495_chunk r_464 + -> Seq Scan on compress_hyper_14_1198_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_496_chunk r_465 + -> Seq Scan on compress_hyper_14_1199_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_497_chunk r_466 + -> Seq Scan on compress_hyper_14_1200_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_498_chunk r_467 + -> Seq Scan on compress_hyper_14_1201_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_499_chunk r_468 + -> Seq Scan on compress_hyper_14_1202_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_500_chunk r_469 + -> Seq Scan on compress_hyper_14_1203_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_501_chunk r_470 + -> Seq Scan on compress_hyper_14_1204_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_502_chunk r_471 + -> Seq Scan on compress_hyper_14_1205_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_503_chunk r_472 + -> Seq Scan on compress_hyper_14_1206_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_504_chunk r_473 + -> Seq Scan on compress_hyper_14_1207_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_505_chunk r_474 + -> Seq Scan on compress_hyper_14_1208_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_506_chunk r_475 + -> Seq Scan on compress_hyper_14_1209_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_507_chunk r_476 + -> Seq Scan on compress_hyper_14_1210_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_508_chunk r_477 + -> Seq Scan on compress_hyper_14_1211_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_509_chunk r_478 + -> Seq Scan on compress_hyper_14_1212_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_510_chunk r_479 + -> Seq Scan on compress_hyper_14_1213_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_511_chunk r_480 + -> Seq Scan on compress_hyper_14_1214_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_512_chunk r_481 + -> Seq Scan on compress_hyper_14_1215_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_513_chunk r_482 + -> Seq Scan on compress_hyper_14_1216_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_514_chunk r_483 + -> Seq Scan on compress_hyper_14_1217_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_515_chunk r_484 + -> Seq Scan on compress_hyper_14_1218_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_516_chunk r_485 + -> Seq Scan on compress_hyper_14_1219_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_517_chunk r_486 + -> Seq Scan on compress_hyper_14_1220_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_518_chunk r_487 + -> Seq Scan on compress_hyper_14_1221_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_519_chunk r_488 + -> Seq Scan on compress_hyper_14_1222_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_520_chunk r_489 + -> Seq Scan on compress_hyper_14_1223_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_521_chunk r_490 + -> Seq Scan on compress_hyper_14_1224_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_522_chunk r_491 + -> Seq Scan on compress_hyper_14_1225_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_523_chunk r_492 + -> Seq Scan on compress_hyper_14_1226_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_524_chunk r_493 + -> Seq Scan on compress_hyper_14_1227_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_525_chunk r_494 + -> Seq Scan on compress_hyper_14_1228_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_526_chunk r_495 + -> Seq Scan on compress_hyper_14_1229_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_527_chunk r_496 + -> Seq Scan on compress_hyper_14_1230_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_528_chunk r_497 + -> Seq Scan on compress_hyper_14_1231_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_529_chunk r_498 + -> Seq Scan on compress_hyper_14_1232_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_530_chunk r_499 + -> Seq Scan on compress_hyper_14_1233_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_531_chunk r_500 + -> Seq Scan on compress_hyper_14_1234_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_532_chunk r_501 + -> Seq Scan on compress_hyper_14_1235_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_533_chunk r_502 + -> Seq Scan on compress_hyper_14_1236_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_534_chunk r_503 + -> Seq Scan on compress_hyper_14_1237_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_535_chunk r_504 + -> Seq Scan on compress_hyper_14_1238_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_536_chunk r_505 + -> Seq Scan on compress_hyper_14_1239_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_537_chunk r_506 + -> Seq Scan on compress_hyper_14_1240_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_538_chunk r_507 + -> Seq Scan on compress_hyper_14_1241_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_539_chunk r_508 + -> Seq Scan on compress_hyper_14_1242_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_540_chunk r_509 + -> Seq Scan on compress_hyper_14_1243_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_541_chunk r_510 + -> Seq Scan on compress_hyper_14_1244_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_542_chunk r_511 + -> Seq Scan on compress_hyper_14_1245_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_543_chunk r_512 + -> Seq Scan on compress_hyper_14_1246_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_544_chunk r_513 + -> Seq Scan on compress_hyper_14_1247_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_545_chunk r_514 + -> Seq Scan on compress_hyper_14_1248_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_546_chunk r_515 + -> Seq Scan on compress_hyper_14_1249_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_547_chunk r_516 + -> Seq Scan on compress_hyper_14_1250_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_548_chunk r_517 + -> Seq Scan on compress_hyper_14_1251_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_549_chunk r_518 + -> Seq Scan on compress_hyper_14_1252_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_550_chunk r_519 + -> Seq Scan on compress_hyper_14_1253_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_551_chunk r_520 + -> Seq Scan on compress_hyper_14_1254_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_552_chunk r_521 + -> Seq Scan on compress_hyper_14_1255_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_553_chunk r_522 + -> Seq Scan on compress_hyper_14_1256_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_554_chunk r_523 + -> Seq Scan on compress_hyper_14_1257_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_555_chunk r_524 + -> Seq Scan on compress_hyper_14_1258_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_556_chunk r_525 + -> Seq Scan on compress_hyper_14_1259_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_557_chunk r_526 + -> Seq Scan on compress_hyper_14_1260_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_558_chunk r_527 + -> Seq Scan on compress_hyper_14_1261_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_559_chunk r_528 + -> Seq Scan on compress_hyper_14_1262_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_560_chunk r_529 + -> Seq Scan on compress_hyper_14_1263_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_561_chunk r_530 + -> Seq Scan on compress_hyper_14_1264_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_562_chunk r_531 + -> Seq Scan on compress_hyper_14_1265_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_563_chunk r_532 + -> Seq Scan on compress_hyper_14_1266_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_564_chunk r_533 + -> Seq Scan on compress_hyper_14_1267_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_565_chunk r_534 + -> Seq Scan on compress_hyper_14_1268_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_566_chunk r_535 + -> Seq Scan on compress_hyper_14_1269_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_567_chunk r_536 + -> Seq Scan on compress_hyper_14_1270_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_568_chunk r_537 + -> Seq Scan on compress_hyper_14_1271_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_569_chunk r_538 + -> Seq Scan on compress_hyper_14_1272_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_570_chunk r_539 + -> Seq Scan on compress_hyper_14_1273_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_571_chunk r_540 + -> Seq Scan on compress_hyper_14_1274_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_572_chunk r_541 + -> Seq Scan on compress_hyper_14_1275_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_573_chunk r_542 + -> Seq Scan on compress_hyper_14_1276_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_574_chunk r_543 + -> Seq Scan on compress_hyper_14_1277_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_575_chunk r_544 + -> Seq Scan on compress_hyper_14_1278_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_576_chunk r_545 + -> Seq Scan on compress_hyper_14_1279_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_577_chunk r_546 + -> Seq Scan on compress_hyper_14_1280_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_578_chunk r_547 + -> Seq Scan on compress_hyper_14_1281_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_579_chunk r_548 + -> Seq Scan on compress_hyper_14_1282_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_580_chunk r_549 + -> Seq Scan on compress_hyper_14_1283_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_581_chunk r_550 + -> Seq Scan on compress_hyper_14_1284_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_582_chunk r_551 + -> Seq Scan on compress_hyper_14_1285_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_583_chunk r_552 + -> Seq Scan on compress_hyper_14_1286_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_584_chunk r_553 + -> Seq Scan on compress_hyper_14_1287_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_585_chunk r_554 + -> Seq Scan on compress_hyper_14_1288_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_586_chunk r_555 + -> Seq Scan on compress_hyper_14_1289_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_587_chunk r_556 + -> Seq Scan on compress_hyper_14_1290_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_588_chunk r_557 + -> Seq Scan on compress_hyper_14_1291_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_589_chunk r_558 + -> Seq Scan on compress_hyper_14_1292_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_590_chunk r_559 + -> Seq Scan on compress_hyper_14_1293_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_591_chunk r_560 + -> Seq Scan on compress_hyper_14_1294_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_592_chunk r_561 + -> Seq Scan on compress_hyper_14_1295_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_593_chunk r_562 + -> Seq Scan on compress_hyper_14_1296_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_594_chunk r_563 + -> Seq Scan on compress_hyper_14_1297_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_595_chunk r_564 + -> Seq Scan on compress_hyper_14_1298_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_596_chunk r_565 + -> Seq Scan on compress_hyper_14_1299_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_597_chunk r_566 + -> Seq Scan on compress_hyper_14_1300_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_598_chunk r_567 + -> Seq Scan on compress_hyper_14_1301_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_599_chunk r_568 + -> Seq Scan on compress_hyper_14_1302_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_600_chunk r_569 + -> Seq Scan on compress_hyper_14_1303_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_601_chunk r_570 + -> Seq Scan on compress_hyper_14_1304_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_602_chunk r_571 + -> Seq Scan on compress_hyper_14_1305_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_603_chunk r_572 + -> Seq Scan on compress_hyper_14_1306_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_604_chunk r_573 + -> Seq Scan on compress_hyper_14_1307_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_605_chunk r_574 + -> Seq Scan on compress_hyper_14_1308_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_606_chunk r_575 + -> Seq Scan on compress_hyper_14_1309_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_607_chunk r_576 + -> Seq Scan on compress_hyper_14_1310_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_608_chunk r_577 + -> Seq Scan on compress_hyper_14_1311_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_609_chunk r_578 + -> Seq Scan on compress_hyper_14_1312_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_610_chunk r_579 + -> Seq Scan on compress_hyper_14_1313_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_611_chunk r_580 + -> Seq Scan on compress_hyper_14_1314_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_612_chunk r_581 + -> Seq Scan on compress_hyper_14_1315_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_613_chunk r_582 + -> Seq Scan on compress_hyper_14_1316_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_614_chunk r_583 + -> Seq Scan on compress_hyper_14_1317_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_615_chunk r_584 + -> Seq Scan on compress_hyper_14_1318_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_616_chunk r_585 + -> Seq Scan on compress_hyper_14_1319_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_617_chunk r_586 + -> Seq Scan on compress_hyper_14_1320_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_618_chunk r_587 + -> Seq Scan on compress_hyper_14_1321_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_619_chunk r_588 + -> Seq Scan on compress_hyper_14_1322_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_620_chunk r_589 + -> Seq Scan on compress_hyper_14_1323_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_621_chunk r_590 + -> Seq Scan on compress_hyper_14_1324_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_622_chunk r_591 + -> Seq Scan on compress_hyper_14_1325_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_623_chunk r_592 + -> Seq Scan on compress_hyper_14_1326_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_624_chunk r_593 + -> Seq Scan on compress_hyper_14_1327_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_625_chunk r_594 + -> Seq Scan on compress_hyper_14_1328_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_626_chunk r_595 + -> Seq Scan on compress_hyper_14_1329_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_627_chunk r_596 + -> Seq Scan on compress_hyper_14_1330_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_628_chunk r_597 + -> Seq Scan on compress_hyper_14_1331_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_629_chunk r_598 + -> Seq Scan on compress_hyper_14_1332_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_630_chunk r_599 + -> Seq Scan on compress_hyper_14_1333_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_631_chunk r_600 + -> Seq Scan on compress_hyper_14_1334_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_632_chunk r_601 + -> Seq Scan on compress_hyper_14_1335_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_633_chunk r_602 + -> Seq Scan on compress_hyper_14_1336_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_634_chunk r_603 + -> Seq Scan on compress_hyper_14_1337_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_635_chunk r_604 + -> Seq Scan on compress_hyper_14_1338_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_636_chunk r_605 + -> Seq Scan on compress_hyper_14_1339_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_637_chunk r_606 + -> Seq Scan on compress_hyper_14_1340_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_638_chunk r_607 + -> Seq Scan on compress_hyper_14_1341_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_639_chunk r_608 + -> Seq Scan on compress_hyper_14_1342_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_640_chunk r_609 + -> Seq Scan on compress_hyper_14_1343_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_641_chunk r_610 + -> Seq Scan on compress_hyper_14_1344_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_642_chunk r_611 + -> Seq Scan on compress_hyper_14_1345_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_643_chunk r_612 + -> Seq Scan on compress_hyper_14_1346_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_644_chunk r_613 + -> Seq Scan on compress_hyper_14_1347_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_645_chunk r_614 + -> Seq Scan on compress_hyper_14_1348_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_646_chunk r_615 + -> Seq Scan on compress_hyper_14_1349_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_647_chunk r_616 + -> Seq Scan on compress_hyper_14_1350_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_648_chunk r_617 + -> Seq Scan on compress_hyper_14_1351_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_649_chunk r_618 + -> Seq Scan on compress_hyper_14_1352_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_650_chunk r_619 + -> Seq Scan on compress_hyper_14_1353_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_651_chunk r_620 + -> Seq Scan on compress_hyper_14_1354_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_652_chunk r_621 + -> Seq Scan on compress_hyper_14_1355_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_653_chunk r_622 + -> Seq Scan on compress_hyper_14_1356_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_654_chunk r_623 + -> Seq Scan on compress_hyper_14_1357_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_655_chunk r_624 + -> Seq Scan on compress_hyper_14_1358_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_656_chunk r_625 + -> Seq Scan on compress_hyper_14_1359_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_657_chunk r_626 + -> Seq Scan on compress_hyper_14_1360_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_658_chunk r_627 + -> Seq Scan on compress_hyper_14_1361_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_659_chunk r_628 + -> Seq Scan on compress_hyper_14_1362_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_660_chunk r_629 + -> Seq Scan on compress_hyper_14_1363_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_661_chunk r_630 + -> Seq Scan on compress_hyper_14_1364_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_662_chunk r_631 + -> Seq Scan on compress_hyper_14_1365_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_663_chunk r_632 + -> Seq Scan on compress_hyper_14_1366_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_664_chunk r_633 + -> Seq Scan on compress_hyper_14_1367_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_665_chunk r_634 + -> Seq Scan on compress_hyper_14_1368_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_666_chunk r_635 + -> Seq Scan on compress_hyper_14_1369_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_667_chunk r_636 + -> Seq Scan on compress_hyper_14_1370_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_668_chunk r_637 + -> Seq Scan on compress_hyper_14_1371_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_669_chunk r_638 + -> Seq Scan on compress_hyper_14_1372_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_670_chunk r_639 + -> Seq Scan on compress_hyper_14_1373_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_671_chunk r_640 + -> Seq Scan on compress_hyper_14_1374_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_672_chunk r_641 + -> Seq Scan on compress_hyper_14_1375_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_673_chunk r_642 + -> Seq Scan on compress_hyper_14_1376_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_674_chunk r_643 + -> Seq Scan on compress_hyper_14_1377_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_675_chunk r_644 + -> Seq Scan on compress_hyper_14_1378_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_676_chunk r_645 + -> Seq Scan on compress_hyper_14_1379_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_677_chunk r_646 + -> Seq Scan on compress_hyper_14_1380_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_678_chunk r_647 + -> Seq Scan on compress_hyper_14_1381_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_679_chunk r_648 + -> Seq Scan on compress_hyper_14_1382_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_680_chunk r_649 + -> Seq Scan on compress_hyper_14_1383_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_681_chunk r_650 + -> Seq Scan on compress_hyper_14_1384_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_682_chunk r_651 + -> Seq Scan on compress_hyper_14_1385_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_683_chunk r_652 + -> Seq Scan on compress_hyper_14_1386_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_684_chunk r_653 + -> Seq Scan on compress_hyper_14_1387_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_685_chunk r_654 + -> Seq Scan on compress_hyper_14_1388_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_686_chunk r_655 + -> Seq Scan on compress_hyper_14_1389_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_687_chunk r_656 + -> Seq Scan on compress_hyper_14_1390_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_688_chunk r_657 + -> Seq Scan on compress_hyper_14_1391_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_689_chunk r_658 + -> Seq Scan on compress_hyper_14_1392_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_690_chunk r_659 + -> Seq Scan on compress_hyper_14_1393_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_691_chunk r_660 + -> Seq Scan on compress_hyper_14_1394_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_692_chunk r_661 + -> Seq Scan on compress_hyper_14_1395_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_693_chunk r_662 + -> Seq Scan on compress_hyper_14_1396_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_694_chunk r_663 + -> Seq Scan on compress_hyper_14_1397_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_695_chunk r_664 + -> Seq Scan on compress_hyper_14_1398_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_696_chunk r_665 + -> Seq Scan on compress_hyper_14_1399_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_697_chunk r_666 + -> Seq Scan on compress_hyper_14_1400_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_698_chunk r_667 + -> Seq Scan on compress_hyper_14_1401_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_699_chunk r_668 + -> Seq Scan on compress_hyper_14_1402_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_700_chunk r_669 + -> Seq Scan on compress_hyper_14_1403_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_701_chunk r_670 + -> Seq Scan on compress_hyper_14_1404_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_702_chunk r_671 + -> Seq Scan on compress_hyper_14_1405_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_703_chunk r_672 + -> Seq Scan on compress_hyper_14_1406_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_704_chunk r_673 + -> Seq Scan on compress_hyper_14_1407_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_705_chunk r_674 + -> Seq Scan on compress_hyper_14_1408_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_706_chunk r_675 + -> Seq Scan on compress_hyper_14_1409_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_707_chunk r_676 + -> Seq Scan on compress_hyper_14_1410_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_708_chunk r_677 + -> Seq Scan on compress_hyper_14_1411_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_709_chunk r_678 + -> Seq Scan on compress_hyper_14_1412_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_710_chunk r_679 + -> Seq Scan on compress_hyper_14_1413_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_711_chunk r_680 + -> Seq Scan on compress_hyper_14_1414_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_712_chunk r_681 + -> Seq Scan on compress_hyper_14_1415_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_713_chunk r_682 + -> Seq Scan on compress_hyper_14_1416_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_714_chunk r_683 + -> Seq Scan on compress_hyper_14_1417_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_715_chunk r_684 + -> Seq Scan on compress_hyper_14_1418_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_716_chunk r_685 + -> Seq Scan on compress_hyper_14_1419_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_717_chunk r_686 + -> Seq Scan on compress_hyper_14_1420_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_718_chunk r_687 + -> Seq Scan on compress_hyper_14_1421_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_719_chunk r_688 + -> Seq Scan on compress_hyper_14_1422_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_720_chunk r_689 + -> Seq Scan on compress_hyper_14_1423_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_721_chunk r_690 + -> Seq Scan on compress_hyper_14_1424_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_722_chunk r_691 + -> Seq Scan on compress_hyper_14_1425_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_723_chunk r_692 + -> Seq Scan on compress_hyper_14_1426_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_724_chunk r_693 + -> Seq Scan on compress_hyper_14_1427_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_725_chunk r_694 + -> Seq Scan on compress_hyper_14_1428_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_726_chunk r_695 + -> Seq Scan on compress_hyper_14_1429_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_727_chunk r_696 + -> Seq Scan on compress_hyper_14_1430_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_728_chunk r_697 + -> Seq Scan on compress_hyper_14_1431_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_729_chunk r_698 + -> Seq Scan on compress_hyper_14_1432_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_730_chunk r_699 + -> Seq Scan on compress_hyper_14_1433_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_731_chunk r_700 + -> Seq Scan on compress_hyper_14_1434_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_732_chunk r_701 + -> Seq Scan on compress_hyper_14_1435_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_733_chunk r_702 + -> Seq Scan on compress_hyper_14_1436_chunk + -> Custom Scan (DecompressChunk) on _hyper_13_734_chunk r_703 + -> Seq Scan on compress_hyper_14_1437_chunk + -> Hash + -> Seq Scan on tags t +(1413 rows) + +-- run query with parallel enabled to ensure nothing is preventing parallel execution +-- this is just a sanity check, the result queries dont run with parallel disabled +SET max_parallel_workers_per_gather TO 4; +SET parallel_setup_cost = 0; +SET parallel_tuple_cost = 0; +SET min_parallel_table_scan_size TO '0'; +EXPLAIN (costs off) SELECT * FROM metrics ORDER BY time, device_id; + QUERY PLAN +------------------------------------------------------------------------ + Gather Merge + Workers Planned: 3 + -> Sort + Sort Key: _hyper_1_1_chunk."time", _hyper_1_1_chunk.device_id + -> Parallel Append + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk + -> Parallel Seq Scan on compress_hyper_5_15_chunk + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk + -> Parallel Seq Scan on compress_hyper_5_16_chunk + -> Parallel Seq Scan on _hyper_1_2_chunk +(10 rows) + +EXPLAIN (costs off) SELECT time_bucket('10 minutes', time) bucket, avg(v0) avg_v0 FROM metrics GROUP BY bucket; + QUERY PLAN +-------------------------------------------------------------------------------------- + Finalize HashAggregate + Group Key: (time_bucket('@ 10 mins'::interval, _hyper_1_1_chunk."time")) + -> Gather + Workers Planned: 3 + -> Partial HashAggregate + Group Key: time_bucket('@ 10 mins'::interval, _hyper_1_1_chunk."time") + -> Result + -> Parallel Append + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk + -> Parallel Seq Scan on compress_hyper_5_15_chunk + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk + -> Parallel Seq Scan on compress_hyper_5_16_chunk + -> Parallel Seq Scan on _hyper_1_2_chunk +(13 rows) + +EXPLAIN (costs off) SELECT * FROM metrics_space ORDER BY time, device_id; + QUERY PLAN +------------------------------------------------------------------------ + Gather Merge + Workers Planned: 4 + -> Sort + Sort Key: _hyper_2_4_chunk."time", _hyper_2_4_chunk.device_id + -> Parallel Append + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk + -> Parallel Seq Scan on compress_hyper_6_17_chunk + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk + -> Parallel Seq Scan on compress_hyper_6_19_chunk + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk + -> Parallel Seq Scan on compress_hyper_6_20_chunk + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk + -> Parallel Seq Scan on compress_hyper_6_18_chunk + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk + -> Parallel Seq Scan on compress_hyper_6_21_chunk + -> Parallel Seq Scan on _hyper_2_8_chunk + -> Parallel Seq Scan on _hyper_2_7_chunk + -> Parallel Seq Scan on _hyper_2_9_chunk + -> Parallel Seq Scan on _hyper_2_12_chunk +(19 rows) + +RESET min_parallel_table_scan_size; +RESET parallel_setup_cost; +RESET parallel_tuple_cost; +SET enable_seqscan TO false; +-- should order compressed chunks using index +-- (we only EXPLAIN here b/c the resulting order is too inconsistent) +EXPLAIN (costs off) SELECT * FROM metrics WHERE time > '2000-01-08' ORDER BY device_id; + QUERY PLAN +--------------------------------------------------------------------------------------------------- + Merge Append + Sort Key: _hyper_1_2_chunk.device_id + -> Sort + Sort Key: _hyper_1_2_chunk.device_id + -> Index Scan using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk + Index Cond: ("time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk + Vectorized Filter: ("time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Index Scan using compress_hyper_5_16_chunk_c_index_2 on compress_hyper_5_16_chunk + Filter: (_ts_meta_max_3 > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) +(10 rows) + +EXPLAIN (costs off) SELECT * FROM metrics_space WHERE time > '2000-01-08' ORDER BY device_id; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------- + Merge Append + Sort Key: _hyper_2_7_chunk.device_id + -> Index Scan Backward using _hyper_2_7_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _hyper_2_7_chunk + Index Cond: ("time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Index Scan Backward using _hyper_2_8_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _hyper_2_8_chunk + Index Cond: ("time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Index Scan Backward using _hyper_2_9_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _hyper_2_9_chunk + Index Cond: ("time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk + Vectorized Filter: ("time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Index Scan using compress_hyper_6_20_chunk_c_space_index_2 on compress_hyper_6_20_chunk + Filter: (_ts_meta_max_3 > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk + Vectorized Filter: ("time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Index Scan using compress_hyper_6_21_chunk_c_space_index_2 on compress_hyper_6_21_chunk + Filter: (_ts_meta_max_3 > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Index Scan Backward using _hyper_2_12_chunk_metrics_space_device_id_device_id_peer_v0_v_2 on _hyper_2_12_chunk + Index Cond: ("time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) +(18 rows) + +SET enable_seqscan TO true; +-- diff compressed and uncompressed results +:DIFF_CMD +-- check hypertable detection in views +CREATE VIEW ht_view AS +SELECT *, + 0 AS res +FROM metrics +UNION ALL +SELECT *, + 1 AS res +FROM metrics; +CREATE FUNCTION ht_func() RETURNS SETOF metrics LANGUAGE SQL STABLE AS +$sql$ + SELECT time, + device_id, + device_id_peer, + v0, v1, v2, v3 + FROM ht_view + WHERE res = 0; +$sql$; +-- should have decompresschunk node +:PREFIX SELECT * FROM ht_func(); + QUERY PLAN +------------------------------------------------------------------------------------ + Append (actual rows=6840 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=1800 loops=1) + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=5 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2520 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=2520 loops=1) + -> Seq Scan on compress_hyper_5_16_chunk (actual rows=5 loops=1) +(6 rows) + +\c +-- plan should be identical to previous plan in fresh session +:PREFIX SELECT * FROM ht_func(); + QUERY PLAN +------------------------------------------------------------------------------------ + Append (actual rows=6840 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=1800 loops=1) + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=5 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2520 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=2520 loops=1) + -> Seq Scan on compress_hyper_5_16_chunk (actual rows=5 loops=1) +(6 rows) + +-- repro for core dump related to total_table_pages setting that get +-- adjusted during decompress path. +CREATE SEQUENCE vessel_id_seq + INCREMENT 1 + START 1 MINVALUE 1 + MAXVALUE 9223372036854775807 + CACHE 1; +CREATE TABLE motion_table( + id bigint NOT NULL DEFAULT nextval('vessel_id_seq'::regclass) , + datalogger_id bigint , + vessel_id bigint , + bus_id smallint , + src_id smallint , + dataloggertime timestamp with time zone , + interval_s real ); +CREATE INDEX +motion_table_t2_datalogger_id_idx on motion_table (datalogger_id); +CREATE INDEX motion_table_t2_dataloggertime_idx on motion_table(dataloggertime DESC); +CREATE INDEX motion_table_t2_vessel_id_idx on motion_table(vessel_id); +SELECT create_hypertable( 'motion_table', 'dataloggertime', chunk_time_interval=> '7 days'::interval); +NOTICE: adding not-null constraint to column "dataloggertime" + create_hypertable +---------------------------- + (15,public,motion_table,t) +(1 row) + +--- do not modify the data. We need atleast this volume to reproduce issues with pages/tuple counts etc. --- +INSERT into motion_table(datalogger_id, vessel_id, bus_id, src_id, + dataloggertime, interval_s) +SELECT 1, random(), random() , random() , + generate_series( '2020-01-02 10:00'::timestamp, '2020-01-10 10::00'::timestamp, '1 min'::interval), 1.1; +INSERT into motion_table(datalogger_id, vessel_id, bus_id, src_id, + dataloggertime, interval_s) +SELECT 1, random(), 2, 3, + generate_series( '2020-01-10 8:00'::timestamp, '2020-01-10 10::00'::timestamp, '1 min'::interval), 1.1; +ALTER TABLE motion_table SET ( timescaledb.compress, + timescaledb.compress_segmentby = 'vessel_id, datalogger_id, bus_id, src_id' , timescaledb.compress_orderby = 'dataloggertime' ); +--have 2 chunks -- +SELECT COUNT(*) +FROM timescaledb_information.chunks +WHERE hypertable_name = 'motion_table'; + count +------- + 2 +(1 row) + +-- compress only the first one --- +SELECT compress_chunk( chunk_table) +FROM ( SELECT chunk_schema || '.' || chunk_name as chunk_table + FROM timescaledb_information.chunks + WHERE hypertable_name = 'motion_table' ORDER BY range_start limit 1 ) q; + compress_chunk +-------------------------------------------- + _timescaledb_internal._hyper_15_1438_chunk +(1 row) + +--call to decompress chunk on 1 of the chunks +SELECT count(*) from motion_table; + count +------- + 11642 +(1 row) + +--END of test for page settings diff --git a/tsl/test/expected/transparent_decompression_ordered_index-16.out b/tsl/test/expected/transparent_decompression_ordered_index-16.out new file mode 100644 index 00000000000..4ce20a80bd0 --- /dev/null +++ b/tsl/test/expected/transparent_decompression_ordered_index-16.out @@ -0,0 +1,1189 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\set TEST_BASE_NAME transparent_decompression_ordered_index +SELECT format('include/%s_query.sql', :'TEST_BASE_NAME') AS "TEST_QUERY_NAME", + format('%s/results/%s_results_uncompressed.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') AS "TEST_RESULTS_UNCOMPRESSED", + format('%s/results/%s_results_compressed.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') AS "TEST_RESULTS_COMPRESSED" \gset +SELECT format('\! diff %s %s', :'TEST_RESULTS_UNCOMPRESSED', :'TEST_RESULTS_COMPRESSED') AS "DIFF_CMD" \gset +-- disable memoize node to make EXPLAIN output comparable between PG14 and previous versions +SELECT CASE WHEN current_setting('server_version_num')::int/10000 >= 14 THEN set_config('enable_memoize','off',false) ELSE 'off' END AS enable_memoize; + enable_memoize +---------------- + off +(1 row) + +-- Testing Index Scan backwards ---- +-- We want more than 1 segment in atleast 1 of the chunks +CREATE TABLE metrics_ordered_idx ( + time timestamptz NOT NULL, + device_id int, + device_id_peer int, + v0 int +); +SELECT create_hypertable ('metrics_ordered_idx', 'time', chunk_time_interval => '2days'::interval); + create_hypertable +---------------------------------- + (1,public,metrics_ordered_idx,t) +(1 row) + +ALTER TABLE metrics_ordered_idx SET (timescaledb.compress, timescaledb.compress_orderby = 'time ASC', timescaledb.compress_segmentby = 'device_id,device_id_peer'); +INSERT INTO metrics_ordered_idx (time, device_id, device_id_peer, v0) +SELECT time, + device_id, + 0, + device_id +FROM generate_series('2000-01-13 0:00:00+0'::timestamptz, '2000-01-15 23:55:00+0', '15m') gtime (time), + generate_series(1, 5, 1) gdevice (device_id); +INSERT INTO metrics_ordered_idx (time, device_id, device_id_peer, v0) +SELECT generate_series('2000-01-20 0:00:00+0'::timestamptz, '2000-01-20 11:55:00+0', '15m'), + 3, + 3, + generate_series(1, 5, 1); +INSERT INTO metrics_ordered_idx (time, device_id, device_id_peer, v0) +SELECT generate_series('2018-01-20 0:00:00+0'::timestamptz, '2018-01-20 11:55:00+0', '15m'), + 4, + 5, + generate_series(1, 5, 1); +INSERT INTO metrics_ordered_idx (time, device_id, device_id_peer, v0) +SELECT '2020-01-01 0:00:00+0', + generate_series(4, 7, 1), + 5, + generate_series(1, 5, 1); +-- misisng values device_id = 7 +CREATE TABLE device_tbl ( + device_id int, + descr text +); +INSERT INTO device_tbl +SELECT generate_series(1, 6, 1), + 'devicex'; +INSERT INTO device_tbl +SELECT 8, + 'device8'; +ANALYZE device_tbl; +-- table for joins --- +CREATE TABLE nodetime ( + node int, + start_time timestamp, + stop_time timestamp +); +INSERT INTO nodetime + VALUES (4, '2018-01-06 00:00'::timestamp, '2018-12-02 12:00'::timestamp); +-- run queries on uncompressed hypertable and store result +\set PREFIX '' +\set PREFIX_VERBOSE '' +\set ECHO none +--compress all chunks for metrics_ordered_idx table -- +SELECT compress_chunk (c.schema_name || '.' || c.table_name) +FROM _timescaledb_catalog.chunk c + INNER JOIN _timescaledb_catalog.hypertable ht ON c.hypertable_id = ht.id +WHERE ht.table_name = 'metrics_ordered_idx' +ORDER BY c.id; + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk + _timescaledb_internal._hyper_1_2_chunk + _timescaledb_internal._hyper_1_3_chunk + _timescaledb_internal._hyper_1_4_chunk + _timescaledb_internal._hyper_1_5_chunk +(5 rows) + +-- reindexing compressed hypertable to update statistics +DO +$$ +DECLARE + hyper_id int; +BEGIN + SELECT h.compressed_hypertable_id + INTO hyper_id + FROM _timescaledb_catalog.hypertable h + WHERE h.table_name = 'metrics_ordered_idx'; + EXECUTE format('REINDEX TABLE _timescaledb_internal._compressed_hypertable_%s', + hyper_id); +END; +$$; +-- run queries on compressed hypertable and store result +\set PREFIX '' +\set PREFIX_VERBOSE '' +\set ECHO none +-- diff compressed and uncompressed results +:DIFF_CMD +-- This is to illustrate that we have some null device_id values. This fact +-- might influence the runtime chunk exclusion when doing joins on device_id. +select count(*) from metrics_ordered_idx +where extract(minute from time) = 0 and device_id is null +; + count +------- + 1 +(1 row) + +\set PREFIX 'EXPLAIN (analyze, costs off, timing off, summary off)' +\set PREFIX_VERBOSE 'EXPLAIN (analyze, costs off, timing off, summary off, verbose)' +-- we disable parallelism here otherwise EXPLAIN ANALYZE output +-- will be not stable and differ depending on worker assignment +SET max_parallel_workers_per_gather TO 0; +SET enable_seqscan = FALSE; +-- get explain for queries on hypertable with compression +\ir include/transparent_decompression_ordered_indexplan.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- tests for explain plan only -- +---check index backward scans instead of seq scans ------------ +CREATE TABLE metrics_ordered_idx2(time timestamptz NOT NULL, device_id int, device_id_peer int, v0 int, v1 int); +SELECT create_hypertable('metrics_ordered_idx2','time', chunk_time_interval=>'2days'::interval); + create_hypertable +----------------------------------- + (3,public,metrics_ordered_idx2,t) +(1 row) + +ALTER TABLE metrics_ordered_idx2 SET (timescaledb.compress, timescaledb.compress_orderby='time ASC, v0 desc',timescaledb.compress_segmentby='device_id,device_id_peer'); +INSERT INTO metrics_ordered_idx2(time,device_id,device_id_peer,v0, v1) SELECT generate_series('2000-01-20 0:00:00+0'::timestamptz,'2000-01-20 11:55:00+0','10s') , 3, 3, generate_series(1,5,1) , generate_series(555,559,1); +SELECT + compress_chunk(c.schema_name || '.' || c.table_name) +FROM _timescaledb_catalog.chunk c + INNER JOIN _timescaledb_catalog.hypertable ht ON c.hypertable_id=ht.id +WHERE ht.table_name = 'metrics_ordered_idx2' +ORDER BY c.id; + compress_chunk +----------------------------------------- + _timescaledb_internal._hyper_3_11_chunk +(1 row) + +--all queries have only prefix of compress_orderby in ORDER BY clause +-- should have ordered DecompressChunk path because segmentby columns have equality constraints +:PREFIX SELECT * FROM metrics_ordered_idx2 WHERE device_id = 3 AND device_id_peer = 3 ORDER BY time DESC LIMIT 10; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_3_11_chunk (actual rows=10 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: compress_hyper_4_12_chunk._ts_meta_sequence_num DESC + Sort Method: quicksort + -> Index Scan using compress_hyper_4_12_chunk__compressed_hypertable_4_device_id_de on compress_hyper_4_12_chunk (actual rows=5 loops=1) + Index Cond: ((device_id = 3) AND (device_id_peer = 3)) +(7 rows) + +:PREFIX SELECT * FROM metrics_ordered_idx2 WHERE device_id = 3 AND device_id_peer = 3 ORDER BY time DESC , v0 asc LIMIT 10; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_3_11_chunk (actual rows=10 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: compress_hyper_4_12_chunk._ts_meta_sequence_num DESC + Sort Method: quicksort + -> Index Scan using compress_hyper_4_12_chunk__compressed_hypertable_4_device_id_de on compress_hyper_4_12_chunk (actual rows=5 loops=1) + Index Cond: ((device_id = 3) AND (device_id_peer = 3)) +(7 rows) + +:PREFIX SELECT * FROM metrics_ordered_idx2 WHERE device_id = 3 AND device_id_peer = 3 ORDER BY time DESC , v0 desc LIMIT 10; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Sort (actual rows=10 loops=1) + Sort Key: _hyper_3_11_chunk."time" DESC, _hyper_3_11_chunk.v0 DESC + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _hyper_3_11_chunk (actual rows=4291 loops=1) + -> Index Scan using compress_hyper_4_12_chunk__compressed_hypertable_4_device_id_de on compress_hyper_4_12_chunk (actual rows=5 loops=1) + Index Cond: ((device_id = 3) AND (device_id_peer = 3)) +(7 rows) + +:PREFIX SELECT d.device_id, m.time, m.time + FROM metrics_ordered_idx2 d INNER JOIN LATERAL (SELECT * FROM metrics_ordered_idx2 m WHERE m.device_id=d.device_id AND m.device_id_peer = 3 ORDER BY time DESC LIMIT 1 ) m ON m.device_id_peer = d.device_id_peer; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Nested Loop (actual rows=4291 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_3_11_chunk d (actual rows=4291 loops=1) + -> Seq Scan on compress_hyper_4_12_chunk (actual rows=5 loops=1) + -> Subquery Scan on m (actual rows=1 loops=4291) + Filter: (d.device_id_peer = m.device_id_peer) + -> Limit (actual rows=1 loops=4291) + -> Result (actual rows=1 loops=4291) + -> Custom Scan (ChunkAppend) on metrics_ordered_idx2 m_1 (actual rows=1 loops=4291) + Order: m_1."time" DESC + Hypertables excluded during runtime: 0 + -> Custom Scan (DecompressChunk) on _hyper_3_11_chunk m_2 (actual rows=1 loops=4291) + -> Index Scan Backward using compress_hyper_4_12_chunk__compressed_hypertable_4_device_id_de on compress_hyper_4_12_chunk compress_hyper_4_12_chunk_1 (actual rows=1 loops=4291) + Index Cond: ((device_id = d.device_id) AND (device_id_peer = 3)) +(13 rows) + +SET enable_seqscan = FALSE; +\ir include/transparent_decompression_ordered_index.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +SET work_mem TO '50MB'; +---Lets test for index backward scans instead of seq scans ------------ +-- for ordered append tests on compressed chunks we need a hypertable with time as compress_orderby column +-- should not have ordered DecompressChunk path because segmentby columns are not part of pathkeys +:PREFIX +SELECT * +FROM ( + SELECT * + FROM metrics_ordered_idx + ORDER BY time DESC + LIMIT 10) AS q +ORDER BY 1, + 2, + 3, + 4; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------- + Sort (actual rows=10 loops=1) + Sort Key: metrics_ordered_idx."time", metrics_ordered_idx.device_id, metrics_ordered_idx.device_id_peer, metrics_ordered_idx.v0 + Sort Method: quicksort + -> Limit (actual rows=10 loops=1) + -> Custom Scan (ChunkAppend) on metrics_ordered_idx (actual rows=10 loops=1) + Order: metrics_ordered_idx."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_1_5_chunk (actual rows=5 loops=1) + -> Sort (actual rows=5 loops=1) + Sort Key: compress_hyper_2_10_chunk._ts_meta_max_1 DESC + Sort Method: quicksort + -> Seq Scan on compress_hyper_2_10_chunk (actual rows=5 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_4_chunk (actual rows=5 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: compress_hyper_2_9_chunk._ts_meta_max_1 DESC + Sort Method: quicksort + -> Seq Scan on compress_hyper_2_9_chunk (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (never executed) + -> Sort (never executed) + Sort Key: compress_hyper_2_8_chunk._ts_meta_max_1 DESC + -> Seq Scan on compress_hyper_2_8_chunk (never executed) + -> Custom Scan (DecompressChunk) on _hyper_1_2_chunk (never executed) + -> Sort (never executed) + Sort Key: compress_hyper_2_7_chunk._ts_meta_max_1 DESC + -> Seq Scan on compress_hyper_2_7_chunk (never executed) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (never executed) + -> Sort (never executed) + Sort Key: compress_hyper_2_6_chunk._ts_meta_max_1 DESC + -> Seq Scan on compress_hyper_2_6_chunk (never executed) +(28 rows) + +-- should have ordered DecompressChunk path because segmentby columns have equality constraints +:PREFIX +SELECT * +FROM ( + SELECT * + FROM metrics_ordered_idx + WHERE device_id = 3 + AND device_id_peer = 3 + ORDER BY time DESC + LIMIT 10) AS q +ORDER BY 1, + 2, + 3, + 4; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort (actual rows=10 loops=1) + Sort Key: metrics_ordered_idx."time", metrics_ordered_idx.device_id, metrics_ordered_idx.device_id_peer, metrics_ordered_idx.v0 + Sort Method: quicksort + -> Limit (actual rows=10 loops=1) + -> Custom Scan (ChunkAppend) on metrics_ordered_idx (actual rows=10 loops=1) + Order: metrics_ordered_idx."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_1_5_chunk (actual rows=0 loops=1) + -> Sort (actual rows=0 loops=1) + Sort Key: compress_hyper_2_10_chunk._ts_meta_sequence_num DESC + Sort Method: quicksort + -> Index Scan using compress_hyper_2_10_chunk__compressed_hypertable_2_device_id_de on compress_hyper_2_10_chunk (actual rows=0 loops=1) + Index Cond: ((device_id = 3) AND (device_id_peer = 3)) + -> Custom Scan (DecompressChunk) on _hyper_1_4_chunk (actual rows=0 loops=1) + -> Sort (actual rows=0 loops=1) + Sort Key: compress_hyper_2_9_chunk._ts_meta_sequence_num DESC + Sort Method: quicksort + -> Index Scan using compress_hyper_2_9_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_9_chunk (actual rows=0 loops=1) + Index Cond: ((device_id = 3) AND (device_id_peer = 3)) + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=10 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: compress_hyper_2_8_chunk._ts_meta_sequence_num DESC + Sort Method: quicksort + -> Index Scan using compress_hyper_2_8_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_8_chunk (actual rows=1 loops=1) + Index Cond: ((device_id = 3) AND (device_id_peer = 3)) + -> Custom Scan (DecompressChunk) on _hyper_1_2_chunk (never executed) + -> Sort (never executed) + Sort Key: compress_hyper_2_7_chunk._ts_meta_sequence_num DESC + -> Index Scan using compress_hyper_2_7_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_7_chunk (never executed) + Index Cond: ((device_id = 3) AND (device_id_peer = 3)) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (never executed) + -> Sort (never executed) + Sort Key: compress_hyper_2_6_chunk._ts_meta_sequence_num DESC + -> Index Scan using compress_hyper_2_6_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_6_chunk (never executed) + Index Cond: ((device_id = 3) AND (device_id_peer = 3)) +(34 rows) + +:PREFIX SELECT DISTINCT ON (d.device_id) + * +FROM metrics_ordered_idx d + INNER JOIN LATERAL ( + SELECT * + FROM metrics_ordered_idx m + WHERE m.device_id = d.device_id + AND m.device_id_peer = 3 + ORDER BY time DESC + LIMIT 1) m ON m.device_id_peer = d.device_id_peer +WHERE extract(minute FROM d.time) = 0; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Unique (actual rows=1 loops=1) + -> Nested Loop (actual rows=12 loops=1) + -> Custom Scan (ConstraintAwareAppend) (actual rows=389 loops=1) + Hypertable: metrics_ordered_idx + Chunks excluded during startup: 0 + -> Merge Append (actual rows=389 loops=1) + Sort Key: d_1.device_id + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk d_1 (actual rows=120 loops=1) + Filter: (EXTRACT(minute FROM "time") = '0'::numeric) + Rows Removed by Filter: 360 + -> Index Scan using compress_hyper_2_6_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_6_chunk (actual rows=5 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_2_chunk d_2 (actual rows=240 loops=1) + Filter: (EXTRACT(minute FROM "time") = '0'::numeric) + Rows Removed by Filter: 720 + -> Index Scan using compress_hyper_2_7_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_7_chunk (actual rows=5 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk d_3 (actual rows=12 loops=1) + Filter: (EXTRACT(minute FROM "time") = '0'::numeric) + Rows Removed by Filter: 36 + -> Index Scan using compress_hyper_2_8_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_8_chunk (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_4_chunk d_4 (actual rows=12 loops=1) + Filter: (EXTRACT(minute FROM "time") = '0'::numeric) + Rows Removed by Filter: 36 + -> Index Scan using compress_hyper_2_9_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_9_chunk (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_5_chunk d_5 (actual rows=5 loops=1) + Filter: (EXTRACT(minute FROM "time") = '0'::numeric) + -> Index Scan using compress_hyper_2_10_chunk__compressed_hypertable_2_device_id_de on compress_hyper_2_10_chunk (actual rows=5 loops=1) + -> Subquery Scan on m (actual rows=0 loops=389) + Filter: (d.device_id_peer = m.device_id_peer) + Rows Removed by Filter: 0 + -> Limit (actual rows=0 loops=389) + -> Custom Scan (ChunkAppend) on metrics_ordered_idx m_1 (actual rows=0 loops=389) + Order: m_1."time" DESC + Hypertables excluded during runtime: 0 + -> Custom Scan (DecompressChunk) on _hyper_1_5_chunk m_2 (actual rows=0 loops=388) + -> Index Scan Backward using compress_hyper_2_10_chunk__compressed_hypertable_2_device_id_de on compress_hyper_2_10_chunk compress_hyper_2_10_chunk_1 (actual rows=0 loops=388) + Index Cond: ((device_id = d.device_id) AND (device_id_peer = 3)) + -> Custom Scan (DecompressChunk) on _hyper_1_4_chunk m_3 (actual rows=0 loops=388) + -> Index Scan Backward using compress_hyper_2_9_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_9_chunk compress_hyper_2_9_chunk_1 (actual rows=0 loops=388) + Index Cond: ((device_id = d.device_id) AND (device_id_peer = 3)) + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk m_4 (actual rows=0 loops=388) + -> Index Scan Backward using compress_hyper_2_8_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_8_chunk compress_hyper_2_8_chunk_1 (actual rows=0 loops=388) + Index Cond: ((device_id = d.device_id) AND (device_id_peer = 3)) + -> Custom Scan (DecompressChunk) on _hyper_1_2_chunk m_5 (actual rows=0 loops=304) + -> Index Scan Backward using compress_hyper_2_7_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_7_chunk compress_hyper_2_7_chunk_1 (actual rows=0 loops=304) + Index Cond: ((device_id = d.device_id) AND (device_id_peer = 3)) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk m_6 (actual rows=0 loops=304) + -> Index Scan Backward using compress_hyper_2_6_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_6_chunk compress_hyper_2_6_chunk_1 (actual rows=0 loops=304) + Index Cond: ((device_id = d.device_id) AND (device_id_peer = 3)) +(48 rows) + +:PREFIX +SELECT d.device_id, + m.time, + m.time +FROM metrics_ordered_idx d + INNER JOIN LATERAL ( + SELECT * + FROM metrics_ordered_idx m + WHERE m.device_id = d.device_id + AND m.device_id_peer = 3 + ORDER BY time DESC + LIMIT 1) m ON m.device_id_peer = d.device_id_peer +WHERE extract(minute FROM d.time) = 0; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Nested Loop (actual rows=12 loops=1) + -> Custom Scan (ChunkAppend) on metrics_ordered_idx d (actual rows=389 loops=1) + Chunks excluded during startup: 0 + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk d_1 (actual rows=120 loops=1) + Filter: (EXTRACT(minute FROM "time") = '0'::numeric) + Rows Removed by Filter: 360 + -> Seq Scan on compress_hyper_2_6_chunk (actual rows=5 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_2_chunk d_2 (actual rows=240 loops=1) + Filter: (EXTRACT(minute FROM "time") = '0'::numeric) + Rows Removed by Filter: 720 + -> Seq Scan on compress_hyper_2_7_chunk (actual rows=5 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk d_3 (actual rows=12 loops=1) + Filter: (EXTRACT(minute FROM "time") = '0'::numeric) + Rows Removed by Filter: 36 + -> Seq Scan on compress_hyper_2_8_chunk (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_4_chunk d_4 (actual rows=12 loops=1) + Filter: (EXTRACT(minute FROM "time") = '0'::numeric) + Rows Removed by Filter: 36 + -> Seq Scan on compress_hyper_2_9_chunk (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_5_chunk d_5 (actual rows=5 loops=1) + Filter: (EXTRACT(minute FROM "time") = '0'::numeric) + -> Seq Scan on compress_hyper_2_10_chunk (actual rows=5 loops=1) + -> Subquery Scan on m (actual rows=0 loops=389) + Filter: (d.device_id_peer = m.device_id_peer) + Rows Removed by Filter: 0 + -> Limit (actual rows=0 loops=389) + -> Result (actual rows=0 loops=389) + -> Custom Scan (ChunkAppend) on metrics_ordered_idx m_1 (actual rows=0 loops=389) + Order: m_1."time" DESC + Hypertables excluded during runtime: 0 + -> Custom Scan (DecompressChunk) on _hyper_1_5_chunk m_2 (actual rows=0 loops=388) + -> Index Scan Backward using compress_hyper_2_10_chunk__compressed_hypertable_2_device_id_de on compress_hyper_2_10_chunk compress_hyper_2_10_chunk_1 (actual rows=0 loops=388) + Index Cond: ((device_id = d.device_id) AND (device_id_peer = 3)) + -> Custom Scan (DecompressChunk) on _hyper_1_4_chunk m_3 (actual rows=0 loops=388) + -> Index Scan Backward using compress_hyper_2_9_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_9_chunk compress_hyper_2_9_chunk_1 (actual rows=0 loops=388) + Index Cond: ((device_id = d.device_id) AND (device_id_peer = 3)) + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk m_4 (actual rows=0 loops=388) + -> Index Scan Backward using compress_hyper_2_8_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_8_chunk compress_hyper_2_8_chunk_1 (actual rows=0 loops=388) + Index Cond: ((device_id = d.device_id) AND (device_id_peer = 3)) + -> Custom Scan (DecompressChunk) on _hyper_1_2_chunk m_5 (actual rows=0 loops=304) + -> Index Scan Backward using compress_hyper_2_7_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_7_chunk compress_hyper_2_7_chunk_1 (actual rows=0 loops=304) + Index Cond: ((device_id = d.device_id) AND (device_id_peer = 3)) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk m_6 (actual rows=0 loops=304) + -> Index Scan Backward using compress_hyper_2_6_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_6_chunk compress_hyper_2_6_chunk_1 (actual rows=0 loops=304) + Index Cond: ((device_id = d.device_id) AND (device_id_peer = 3)) +(45 rows) + +--github issue 1558 +SET enable_seqscan = FALSE; +SET enable_bitmapscan = FALSE; +SET max_parallel_workers_per_gather = 0; +SET enable_hashjoin = FALSE; +SET enable_mergejoin = FALSE; +:PREFIX +SELECT device_id, + count(*) +FROM ( + SELECT * + FROM metrics_ordered_idx mt, + nodetime nd + WHERE mt.time > nd.start_time + AND mt.device_id = nd.node + AND mt.time < nd.stop_time) AS subq +GROUP BY device_id; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate (actual rows=1 loops=1) + Group Key: mt_1.device_id + -> Nested Loop (actual rows=48 loops=1) + Join Filter: ((mt_1."time" > nd.start_time) AND (mt_1."time" < nd.stop_time) AND (mt_1.device_id = nd.node)) + Rows Removed by Join Filter: 1493 + -> Merge Append (actual rows=1541 loops=1) + Sort Key: mt_1.device_id + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk mt_1 (actual rows=480 loops=1) + -> Index Scan using compress_hyper_2_6_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_6_chunk (actual rows=5 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_2_chunk mt_2 (actual rows=960 loops=1) + -> Index Scan using compress_hyper_2_7_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_7_chunk (actual rows=5 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk mt_3 (actual rows=48 loops=1) + -> Index Scan using compress_hyper_2_8_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_8_chunk (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_4_chunk mt_4 (actual rows=48 loops=1) + -> Index Scan using compress_hyper_2_9_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_9_chunk (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_5_chunk mt_5 (actual rows=5 loops=1) + -> Index Scan using compress_hyper_2_10_chunk__compressed_hypertable_2_device_id_de on compress_hyper_2_10_chunk (actual rows=5 loops=1) + -> Materialize (actual rows=1 loops=1541) + -> Seq Scan on nodetime nd (actual rows=1 loops=1) +(19 rows) + +:PREFIX +SELECT nd.node, + mt.* +FROM metrics_ordered_idx mt, + nodetime nd +WHERE mt.time > nd.start_time + AND mt.device_id = nd.node + AND mt.time < nd.stop_time +ORDER BY time; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------- + Nested Loop (actual rows=48 loops=1) + Join Filter: ((mt."time" > nd.start_time) AND (mt."time" < nd.stop_time) AND (mt.device_id = nd.node)) + Rows Removed by Join Filter: 1493 + -> Custom Scan (ChunkAppend) on metrics_ordered_idx mt (actual rows=1541 loops=1) + Order: mt."time" + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk mt_1 (actual rows=480 loops=1) + -> Sort (actual rows=5 loops=1) + Sort Key: compress_hyper_2_6_chunk._ts_meta_min_1 + Sort Method: quicksort + -> Index Scan using compress_hyper_2_6_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_6_chunk (actual rows=5 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_2_chunk mt_2 (actual rows=960 loops=1) + -> Sort (actual rows=5 loops=1) + Sort Key: compress_hyper_2_7_chunk._ts_meta_min_1 + Sort Method: quicksort + -> Index Scan using compress_hyper_2_7_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_7_chunk (actual rows=5 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk mt_3 (actual rows=48 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: compress_hyper_2_8_chunk._ts_meta_min_1 + Sort Method: quicksort + -> Index Scan using compress_hyper_2_8_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_8_chunk (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_4_chunk mt_4 (actual rows=48 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: compress_hyper_2_9_chunk._ts_meta_min_1 + Sort Method: quicksort + -> Index Scan using compress_hyper_2_9_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_9_chunk (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_5_chunk mt_5 (actual rows=5 loops=1) + -> Sort (actual rows=5 loops=1) + Sort Key: compress_hyper_2_10_chunk._ts_meta_min_1 + Sort Method: quicksort + -> Index Scan using compress_hyper_2_10_chunk__compressed_hypertable_2_device_id_de on compress_hyper_2_10_chunk (actual rows=5 loops=1) + -> Materialize (actual rows=1 loops=1541) + -> Seq Scan on nodetime nd (actual rows=1 loops=1) +(32 rows) + +SET enable_seqscan = TRUE; +SET enable_bitmapscan = TRUE; +SET enable_seqscan = TRUE; +SET enable_bitmapscan = TRUE; +SET max_parallel_workers_per_gather = 0; +SET enable_mergejoin = TRUE; +SET enable_hashjoin = FALSE; +:PREFIX +SELECT nd.node, + mt.* +FROM metrics_ordered_idx mt, + nodetime nd +WHERE mt.time > nd.start_time + AND mt.device_id = nd.node + AND mt.time < nd.stop_time +ORDER BY time; + QUERY PLAN +---------------------------------------------------------------------------------------------------------- + Sort (actual rows=48 loops=1) + Sort Key: mt."time" + Sort Method: quicksort + -> Merge Join (actual rows=48 loops=1) + Merge Cond: (nd.node = mt.device_id) + Join Filter: ((mt."time" > nd.start_time) AND (mt."time" < nd.stop_time)) + Rows Removed by Join Filter: 289 + -> Sort (actual rows=1 loops=1) + Sort Key: nd.node + Sort Method: quicksort + -> Seq Scan on nodetime nd (actual rows=1 loops=1) + -> Sort (actual rows=1250 loops=1) + Sort Key: mt.device_id + Sort Method: quicksort + -> Custom Scan (ChunkAppend) on metrics_ordered_idx mt (actual rows=1541 loops=1) + Order: mt."time" + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk mt_1 (actual rows=480 loops=1) + -> Sort (actual rows=5 loops=1) + Sort Key: compress_hyper_2_6_chunk._ts_meta_min_1 + Sort Method: quicksort + -> Seq Scan on compress_hyper_2_6_chunk (actual rows=5 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_2_chunk mt_2 (actual rows=960 loops=1) + -> Sort (actual rows=5 loops=1) + Sort Key: compress_hyper_2_7_chunk._ts_meta_min_1 + Sort Method: quicksort + -> Seq Scan on compress_hyper_2_7_chunk (actual rows=5 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk mt_3 (actual rows=48 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: compress_hyper_2_8_chunk._ts_meta_min_1 + Sort Method: quicksort + -> Seq Scan on compress_hyper_2_8_chunk (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_4_chunk mt_4 (actual rows=48 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: compress_hyper_2_9_chunk._ts_meta_min_1 + Sort Method: quicksort + -> Seq Scan on compress_hyper_2_9_chunk (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_5_chunk mt_5 (actual rows=5 loops=1) + -> Sort (actual rows=5 loops=1) + Sort Key: compress_hyper_2_10_chunk._ts_meta_min_1 + Sort Method: quicksort + -> Seq Scan on compress_hyper_2_10_chunk (actual rows=5 loops=1) +(41 rows) + +SET enable_mergejoin = FALSE; +SET enable_hashjoin = TRUE; +:PREFIX +SELECT nd.node, + mt.* +FROM metrics_ordered_idx mt, + nodetime nd +WHERE mt.time > nd.start_time + AND mt.device_id = nd.node + AND mt.time < nd.stop_time +ORDER BY time; + QUERY PLAN +---------------------------------------------------------------------------------------------------- + Sort (actual rows=48 loops=1) + Sort Key: mt_1."time" + Sort Method: quicksort + -> Hash Join (actual rows=48 loops=1) + Hash Cond: (mt_1.device_id = nd.node) + Join Filter: ((mt_1."time" > nd.start_time) AND (mt_1."time" < nd.stop_time)) + Rows Removed by Join Filter: 289 + -> Append (actual rows=1541 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk mt_1 (actual rows=480 loops=1) + -> Seq Scan on compress_hyper_2_6_chunk (actual rows=5 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_2_chunk mt_2 (actual rows=960 loops=1) + -> Seq Scan on compress_hyper_2_7_chunk (actual rows=5 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk mt_3 (actual rows=48 loops=1) + -> Seq Scan on compress_hyper_2_8_chunk (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_4_chunk mt_4 (actual rows=48 loops=1) + -> Seq Scan on compress_hyper_2_9_chunk (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_5_chunk mt_5 (actual rows=5 loops=1) + -> Seq Scan on compress_hyper_2_10_chunk (actual rows=5 loops=1) + -> Hash (actual rows=1 loops=1) + Buckets: 2048 Batches: 1 + -> Seq Scan on nodetime nd (actual rows=1 loops=1) +(21 rows) + +--enable all joins after the tests +SET enable_mergejoin = TRUE; +SET enable_hashjoin = TRUE; +--end github issue 1558 +-- github issue 2673 +-- nested loop join with parameterized path +-- join condition has a segment by column and another column. +SET enable_hashjoin = false; +SET enable_mergejoin=false; +SET enable_material = false; +SET enable_seqscan = false; +-- restrict so that we select only 1 chunk. +:PREFIX +WITH lookup as ( SELECT * from (values( 3, 5) , (3, 4) ) as lu( did, version) ) +SELECT met.*, lookup.* +FROM metrics_ordered_idx met join lookup +ON met.device_id = lookup.did and met.v0 = lookup.version +WHERE met.time > '2000-01-19 19:00:00-05' + and met.time < '2000-01-20 20:00:00-05'; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Nested Loop (actual rows=2 loops=1) + -> Values Scan on "*VALUES*" (actual rows=2 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk met (actual rows=1 loops=2) + Filter: ("*VALUES*".column2 = v0) + Rows Removed by Filter: 47 + Vectorized Filter: (("time" > 'Wed Jan 19 16:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Thu Jan 20 17:00:00 2000 PST'::timestamp with time zone)) + -> Index Scan using compress_hyper_2_8_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_8_chunk (actual rows=1 loops=2) + Index Cond: (device_id = "*VALUES*".column1) + Filter: ((_ts_meta_max_1 > 'Wed Jan 19 16:00:00 2000 PST'::timestamp with time zone) AND (_ts_meta_min_1 < 'Thu Jan 20 17:00:00 2000 PST'::timestamp with time zone)) +(9 rows) + +--add filter to segment by (device_id) and compressed attr column (v0) +:PREFIX +WITH lookup as ( SELECT * from (values( 3, 5) , (3, 4) ) as lu( did, version) ) +SELECT met.*, lookup.* +FROM metrics_ordered_idx met join lookup +ON met.device_id = lookup.did and met.v0 = lookup.version +WHERE met.time > '2000-01-19 19:00:00-05' + and met.time < '2000-01-20 20:00:00-05' + and met.device_id = 3 and met.v0 = 5; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Nested Loop (actual rows=1 loops=1) + -> Values Scan on "*VALUES*" (actual rows=1 loops=1) + Filter: ((column1 = 3) AND (column2 = 5)) + Rows Removed by Filter: 1 + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk met (actual rows=1 loops=1) + Vectorized Filter: (("time" > 'Wed Jan 19 16:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Thu Jan 20 17:00:00 2000 PST'::timestamp with time zone) AND (v0 = 5)) + Rows Removed by Filter: 47 + -> Index Scan using compress_hyper_2_8_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_8_chunk (actual rows=1 loops=1) + Index Cond: (device_id = 3) + Filter: ((_ts_meta_max_1 > 'Wed Jan 19 16:00:00 2000 PST'::timestamp with time zone) AND (_ts_meta_min_1 < 'Thu Jan 20 17:00:00 2000 PST'::timestamp with time zone)) +(10 rows) + +:PREFIX +WITH lookup as ( SELECT * from (values( 3, 5) , (3, 4) ) as lu( did, version) ) +SELECT met.*, lookup.* +FROM metrics_ordered_idx met join lookup +ON met.device_id = lookup.did and met.v0 = lookup.version +WHERE met.time = '2000-01-19 19:00:00-05' + and met.device_id = 3 + and met.device_id_peer = 3 and met.v0 = 5; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Nested Loop (actual rows=0 loops=1) + -> Values Scan on "*VALUES*" (actual rows=1 loops=1) + Filter: ((column1 = 3) AND (column2 = 5)) + Rows Removed by Filter: 1 + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk met (actual rows=0 loops=1) + Vectorized Filter: ((v0 = 5) AND ("time" = 'Wed Jan 19 16:00:00 2000 PST'::timestamp with time zone)) + Rows Removed by Filter: 48 + -> Index Scan using compress_hyper_2_8_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_8_chunk (actual rows=1 loops=1) + Index Cond: ((device_id = 3) AND (device_id_peer = 3)) + Filter: ((_ts_meta_min_1 <= 'Wed Jan 19 16:00:00 2000 PST'::timestamp with time zone) AND (_ts_meta_max_1 >= 'Wed Jan 19 16:00:00 2000 PST'::timestamp with time zone)) +(10 rows) + +-- lateral subquery +:PREFIX +WITH f1 as ( SELECT * from (values( 7, 5, 4) , (4, 5, 5) ) as lu( device_id, device_id_peer, v0) ) +SELECT * FROM metrics_ordered_idx met +JOIN LATERAL + ( SELECT node, f1.* from nodetime , f1 + WHERE node = f1.device_id) q +ON met.device_id = q.node and met.device_id_peer = q.device_id_peer + and met.v0 = q.v0 and met.v0 > 2 and time = '2018-01-19 20:00:00-05'; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Nested Loop (actual rows=1 loops=1) + Join Filter: (nodetime.node = met.device_id) + -> Nested Loop (actual rows=1 loops=1) + Join Filter: (nodetime.node = "*VALUES*".column1) + Rows Removed by Join Filter: 1 + -> Seq Scan on nodetime (actual rows=1 loops=1) + -> Values Scan on "*VALUES*" (actual rows=2 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_4_chunk met (actual rows=1 loops=1) + Filter: ("*VALUES*".column3 = v0) + Rows Removed by Filter: 47 + Vectorized Filter: ((v0 > 2) AND ("time" = 'Fri Jan 19 17:00:00 2018 PST'::timestamp with time zone)) + -> Index Scan using compress_hyper_2_9_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_9_chunk (actual rows=1 loops=1) + Index Cond: ((device_id = "*VALUES*".column1) AND (device_id_peer = "*VALUES*".column2)) + Filter: ((_ts_meta_min_1 <= 'Fri Jan 19 17:00:00 2018 PST'::timestamp with time zone) AND (_ts_meta_max_1 >= 'Fri Jan 19 17:00:00 2018 PST'::timestamp with time zone)) +(14 rows) + +-- filter on compressed attr (v0) with seqscan enabled and indexscan +-- disabled. filters on compressed attr should be above the seq scan. +SET enable_seqscan = true; +SET enable_indexscan = false; +:PREFIX +WITH lookup as ( SELECT * from (values( 3, 5) , (3, 4) ) as lu( did, version) ) +SELECT met.*, lookup.* +FROM metrics_ordered_idx met join lookup +ON met.device_id = lookup.did and met.v0 = lookup.version + and met.device_id = 3 +WHERE met.time > '2000-01-19 19:00:00-05' + and met.time < '2000-01-20 20:00:00-05' + and met.device_id = 3 + and met.device_id_peer = 3 and met.v0 = 5; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Nested Loop (actual rows=1 loops=1) + -> Values Scan on "*VALUES*" (actual rows=1 loops=1) + Filter: ((column1 = 3) AND (column2 = 5)) + Rows Removed by Filter: 1 + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk met (actual rows=1 loops=1) + Vectorized Filter: (("time" > 'Wed Jan 19 16:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Thu Jan 20 17:00:00 2000 PST'::timestamp with time zone) AND (v0 = 5)) + Rows Removed by Filter: 47 + -> Seq Scan on compress_hyper_2_8_chunk (actual rows=1 loops=1) + Filter: ((_ts_meta_max_1 > 'Wed Jan 19 16:00:00 2000 PST'::timestamp with time zone) AND (_ts_meta_min_1 < 'Thu Jan 20 17:00:00 2000 PST'::timestamp with time zone) AND (device_id = 3) AND (device_id_peer = 3)) +(9 rows) + +RESET enable_hashjoin ; +RESET enable_mergejoin; +RESET enable_material ; +RESET enable_indexscan ; +--end github issue 2673 +SET enable_seqscan = TRUE; +\ir include/transparent_decompression_constraintaware.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +--- TEST for constraint aware append ------------ +--should select only newly added chunk -- +SET timescaledb.enable_chunk_append TO FALSE; +:PREFIX +SELECT * +FROM ( + SELECT * + FROM metrics_ordered_idx + WHERE time > '2002-01-01' + AND time < now() + ORDER BY time DESC + LIMIT 10) AS q +ORDER BY 1, + 2, + 3, + 4; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------- + Sort (actual rows=10 loops=1) + Sort Key: metrics_ordered_idx."time", metrics_ordered_idx.device_id, metrics_ordered_idx.device_id_peer, metrics_ordered_idx.v0 + Sort Method: quicksort + -> Limit (actual rows=10 loops=1) + -> Custom Scan (ConstraintAwareAppend) (actual rows=10 loops=1) + Hypertable: metrics_ordered_idx + Chunks excluded during startup: 0 + -> Merge Append (actual rows=10 loops=1) + Sort Key: _hyper_1_4_chunk."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_1_4_chunk (actual rows=5 loops=1) + Filter: (("time" > 'Tue Jan 01 00:00:00 2002 PST'::timestamp with time zone) AND ("time" < now())) + -> Sort (actual rows=1 loops=1) + Sort Key: compress_hyper_2_9_chunk._ts_meta_max_1 DESC + Sort Method: quicksort + -> Seq Scan on compress_hyper_2_9_chunk (actual rows=1 loops=1) + Filter: (_ts_meta_max_1 > 'Tue Jan 01 00:00:00 2002 PST'::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _hyper_1_5_chunk (actual rows=5 loops=1) + Filter: (("time" > 'Tue Jan 01 00:00:00 2002 PST'::timestamp with time zone) AND ("time" < now())) + -> Sort (actual rows=5 loops=1) + Sort Key: compress_hyper_2_10_chunk._ts_meta_max_1 DESC + Sort Method: quicksort + -> Seq Scan on compress_hyper_2_10_chunk (actual rows=5 loops=1) + Filter: (_ts_meta_max_1 > 'Tue Jan 01 00:00:00 2002 PST'::timestamp with time zone) +(23 rows) + +-- DecompressChunk path because segmentby columns have equality constraints +:PREFIX +SELECT * +FROM ( + SELECT * + FROM metrics_ordered_idx + WHERE device_id = 4 + AND device_id_peer = 5 + AND time > '2002-01-01' + AND time < now() + ORDER BY time DESC + LIMIT 10) AS q +ORDER BY 1, + 2, + 3, + 4; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort (actual rows=10 loops=1) + Sort Key: metrics_ordered_idx."time", metrics_ordered_idx.device_id, metrics_ordered_idx.device_id_peer, metrics_ordered_idx.v0 + Sort Method: quicksort + -> Limit (actual rows=10 loops=1) + -> Custom Scan (ConstraintAwareAppend) (actual rows=10 loops=1) + Hypertable: metrics_ordered_idx + Chunks excluded during startup: 0 + -> Merge Append (actual rows=10 loops=1) + Sort Key: _hyper_1_4_chunk."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_1_4_chunk (actual rows=9 loops=1) + Filter: ("time" < now()) + Vectorized Filter: ("time" > 'Tue Jan 01 00:00:00 2002 PST'::timestamp with time zone) + -> Sort (actual rows=1 loops=1) + Sort Key: compress_hyper_2_9_chunk._ts_meta_sequence_num DESC + Sort Method: quicksort + -> Seq Scan on compress_hyper_2_9_chunk (actual rows=1 loops=1) + Filter: ((_ts_meta_max_1 > 'Tue Jan 01 00:00:00 2002 PST'::timestamp with time zone) AND (device_id = 4) AND (device_id_peer = 5)) + -> Custom Scan (DecompressChunk) on _hyper_1_5_chunk (actual rows=1 loops=1) + Filter: ("time" < now()) + Vectorized Filter: ("time" > 'Tue Jan 01 00:00:00 2002 PST'::timestamp with time zone) + -> Sort (actual rows=1 loops=1) + Sort Key: compress_hyper_2_10_chunk._ts_meta_sequence_num DESC + Sort Method: quicksort + -> Seq Scan on compress_hyper_2_10_chunk (actual rows=1 loops=1) + Filter: ((_ts_meta_max_1 > 'Tue Jan 01 00:00:00 2002 PST'::timestamp with time zone) AND (device_id = 4) AND (device_id_peer = 5)) + Rows Removed by Filter: 4 +(26 rows) + +:PREFIX +SELECT m.device_id, + d.v0, + count(*) +FROM metrics_ordered_idx d, + metrics_ordered_idx m +WHERE m.device_id = d.device_id + AND m.device_id_peer = 5 + AND m.time = d.time + AND m.time > '2002-01-01' + AND m.time < '2000-01-01 0:00:00+0'::text::timestamptz + AND m.device_id_peer = d.device_id_peer +GROUP BY m.device_id, + d.v0 +ORDER BY 1, + 2, + 3; + QUERY PLAN +---------------------------------------------------------------------------------- + Sort (actual rows=0 loops=1) + Sort Key: m.device_id, d.v0, (count(*)) + Sort Method: quicksort + -> HashAggregate (actual rows=0 loops=1) + Group Key: m.device_id, d.v0 + Batches: 1 + -> Hash Join (actual rows=0 loops=1) + Hash Cond: ((d.device_id = m.device_id) AND (d."time" = m."time")) + -> Custom Scan (ConstraintAwareAppend) (actual rows=0 loops=1) + Hypertable: metrics_ordered_idx + Chunks excluded during startup: 2 + -> Hash (never executed) + -> Custom Scan (ConstraintAwareAppend) (never executed) + Hypertable: metrics_ordered_idx + Chunks excluded during startup: 2 +(15 rows) + +--query with no results -- +:PREFIX +SELECT m.device_id, + d.v0, + count(*) +FROM metrics_ordered_idx d, + metrics_ordered_idx m +WHERE m.time = d.time + AND m.time > '2000-01-01 0:00:00+0'::text::timestamptz +GROUP BY m.device_id, + d.v0 +ORDER BY 1, + 2, + 3; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------- + Sort (actual rows=42 loops=1) + Sort Key: m.device_id, d.v0, (count(*)) + Sort Method: quicksort + -> HashAggregate (actual rows=42 loops=1) + Group Key: m.device_id, d.v0 + Batches: 1 + -> Merge Join (actual rows=7321 loops=1) + Merge Cond: (d."time" = m."time") + -> Sort (actual rows=1541 loops=1) + Sort Key: d."time" + Sort Method: quicksort + -> Custom Scan (ConstraintAwareAppend) (actual rows=1541 loops=1) + Hypertable: metrics_ordered_idx + Chunks excluded during startup: 0 + -> Append (actual rows=1541 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk d_1 (actual rows=480 loops=1) + Filter: ("time" > ('2000-01-01 0:00:00+0'::cstring)::timestamp with time zone) + -> Seq Scan on compress_hyper_2_6_chunk (actual rows=5 loops=1) + Filter: (_ts_meta_max_1 > ('2000-01-01 0:00:00+0'::cstring)::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _hyper_1_2_chunk d_2 (actual rows=960 loops=1) + Filter: ("time" > ('2000-01-01 0:00:00+0'::cstring)::timestamp with time zone) + -> Seq Scan on compress_hyper_2_7_chunk (actual rows=5 loops=1) + Filter: (_ts_meta_max_1 > ('2000-01-01 0:00:00+0'::cstring)::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk d_3 (actual rows=48 loops=1) + Filter: ("time" > ('2000-01-01 0:00:00+0'::cstring)::timestamp with time zone) + -> Seq Scan on compress_hyper_2_8_chunk (actual rows=1 loops=1) + Filter: (_ts_meta_max_1 > ('2000-01-01 0:00:00+0'::cstring)::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _hyper_1_4_chunk d_4 (actual rows=48 loops=1) + Filter: ("time" > ('2000-01-01 0:00:00+0'::cstring)::timestamp with time zone) + -> Seq Scan on compress_hyper_2_9_chunk (actual rows=1 loops=1) + Filter: (_ts_meta_max_1 > ('2000-01-01 0:00:00+0'::cstring)::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _hyper_1_5_chunk d_5 (actual rows=5 loops=1) + Filter: ("time" > ('2000-01-01 0:00:00+0'::cstring)::timestamp with time zone) + -> Seq Scan on compress_hyper_2_10_chunk (actual rows=5 loops=1) + Filter: (_ts_meta_max_1 > ('2000-01-01 0:00:00+0'::cstring)::timestamp with time zone) + -> Sort (actual rows=7317 loops=1) + Sort Key: m."time" + Sort Method: quicksort + -> Custom Scan (ConstraintAwareAppend) (actual rows=1541 loops=1) + Hypertable: metrics_ordered_idx + Chunks excluded during startup: 0 + -> Append (actual rows=1541 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk m_1 (actual rows=480 loops=1) + Filter: ("time" > ('2000-01-01 0:00:00+0'::cstring)::timestamp with time zone) + -> Seq Scan on compress_hyper_2_6_chunk compress_hyper_2_6_chunk_1 (actual rows=5 loops=1) + Filter: (_ts_meta_max_1 > ('2000-01-01 0:00:00+0'::cstring)::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _hyper_1_2_chunk m_2 (actual rows=960 loops=1) + Filter: ("time" > ('2000-01-01 0:00:00+0'::cstring)::timestamp with time zone) + -> Seq Scan on compress_hyper_2_7_chunk compress_hyper_2_7_chunk_1 (actual rows=5 loops=1) + Filter: (_ts_meta_max_1 > ('2000-01-01 0:00:00+0'::cstring)::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk m_3 (actual rows=48 loops=1) + Filter: ("time" > ('2000-01-01 0:00:00+0'::cstring)::timestamp with time zone) + -> Seq Scan on compress_hyper_2_8_chunk compress_hyper_2_8_chunk_1 (actual rows=1 loops=1) + Filter: (_ts_meta_max_1 > ('2000-01-01 0:00:00+0'::cstring)::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _hyper_1_4_chunk m_4 (actual rows=48 loops=1) + Filter: ("time" > ('2000-01-01 0:00:00+0'::cstring)::timestamp with time zone) + -> Seq Scan on compress_hyper_2_9_chunk compress_hyper_2_9_chunk_1 (actual rows=1 loops=1) + Filter: (_ts_meta_max_1 > ('2000-01-01 0:00:00+0'::cstring)::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _hyper_1_5_chunk m_5 (actual rows=5 loops=1) + Filter: ("time" > ('2000-01-01 0:00:00+0'::cstring)::timestamp with time zone) + -> Seq Scan on compress_hyper_2_10_chunk compress_hyper_2_10_chunk_1 (actual rows=5 loops=1) + Filter: (_ts_meta_max_1 > ('2000-01-01 0:00:00+0'::cstring)::timestamp with time zone) +(62 rows) + +--query with all chunks but 1 excluded at plan time -- +:PREFIX +SELECT d.*, + m.* +FROM device_tbl d, + metrics_ordered_idx m +WHERE m.device_id = d.device_id + AND m.time > '2019-01-01' + AND m.time < '2000-01-01 0:00:00+0'::text::timestamptz +ORDER BY m.v0; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort (actual rows=0 loops=1) + Sort Key: m.v0 + Sort Method: quicksort + -> Hash Join (actual rows=0 loops=1) + Hash Cond: (m.device_id = d.device_id) + -> Custom Scan (DecompressChunk) on _hyper_1_5_chunk m (actual rows=0 loops=1) + Filter: ("time" < ('2000-01-01 0:00:00+0'::cstring)::timestamp with time zone) + Vectorized Filter: ("time" > 'Tue Jan 01 00:00:00 2019 PST'::timestamp with time zone) + -> Seq Scan on compress_hyper_2_10_chunk (actual rows=0 loops=1) + Filter: ((_ts_meta_max_1 > 'Tue Jan 01 00:00:00 2019 PST'::timestamp with time zone) AND (_ts_meta_min_1 < ('2000-01-01 0:00:00+0'::cstring)::timestamp with time zone)) + Rows Removed by Filter: 5 + -> Hash (actual rows=7 loops=1) + Buckets: 1024 Batches: 1 + -> Seq Scan on device_tbl d (actual rows=7 loops=1) +(14 rows) + +-- no matches in metrics_ordered_idx but one row in device_tbl +:PREFIX +SELECT d.*, + m.* +FROM device_tbl d + LEFT OUTER JOIN metrics_ordered_idx m ON m.device_id = d.device_id + AND m.time > '2019-01-01' + AND m.time < '2000-01-01 0:00:00+0'::text::timestamptz +WHERE d.device_id = 8 +ORDER BY m.v0; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Sort (actual rows=1 loops=1) + Sort Key: m.v0 + Sort Method: quicksort + -> Nested Loop Left Join (actual rows=1 loops=1) + Join Filter: (m.device_id = d.device_id) + -> Seq Scan on device_tbl d (actual rows=1 loops=1) + Filter: (device_id = 8) + Rows Removed by Filter: 6 + -> Custom Scan (DecompressChunk) on _hyper_1_5_chunk m (actual rows=0 loops=1) + Filter: ("time" < ('2000-01-01 0:00:00+0'::cstring)::timestamp with time zone) + Vectorized Filter: ("time" > 'Tue Jan 01 00:00:00 2019 PST'::timestamp with time zone) + -> Seq Scan on compress_hyper_2_10_chunk (actual rows=0 loops=1) + Filter: ((_ts_meta_max_1 > 'Tue Jan 01 00:00:00 2019 PST'::timestamp with time zone) AND (device_id = 8) AND (_ts_meta_min_1 < ('2000-01-01 0:00:00+0'::cstring)::timestamp with time zone)) + Rows Removed by Filter: 5 +(14 rows) + +-- no matches in device_tbl but 1 row in metrics_ordered_idx +:PREFIX +SELECT d.*, + m.* +FROM device_tbl d + FULL OUTER JOIN metrics_ordered_idx m ON m.device_id = d.device_id + AND m.time > '2019-01-01' + AND m.time < '2000-01-01 0:00:00+0'::text::timestamptz +WHERE m.device_id = 7 +ORDER BY m.v0; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort (actual rows=1 loops=1) + Sort Key: m_1.v0 + Sort Method: quicksort + -> Hash Left Join (actual rows=1 loops=1) + Hash Cond: (m_1.device_id = d.device_id) + Join Filter: ((m_1."time" > 'Tue Jan 01 00:00:00 2019 PST'::timestamp with time zone) AND (m_1."time" < ('2000-01-01 0:00:00+0'::cstring)::timestamp with time zone)) + -> Append (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk m_1 (actual rows=0 loops=1) + -> Seq Scan on compress_hyper_2_6_chunk (actual rows=0 loops=1) + Filter: (device_id = 7) + Rows Removed by Filter: 5 + -> Custom Scan (DecompressChunk) on _hyper_1_2_chunk m_2 (actual rows=0 loops=1) + -> Seq Scan on compress_hyper_2_7_chunk (actual rows=0 loops=1) + Filter: (device_id = 7) + Rows Removed by Filter: 5 + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk m_3 (actual rows=0 loops=1) + -> Seq Scan on compress_hyper_2_8_chunk (actual rows=0 loops=1) + Filter: (device_id = 7) + Rows Removed by Filter: 1 + -> Custom Scan (DecompressChunk) on _hyper_1_4_chunk m_4 (actual rows=0 loops=1) + -> Seq Scan on compress_hyper_2_9_chunk (actual rows=0 loops=1) + Filter: (device_id = 7) + Rows Removed by Filter: 1 + -> Custom Scan (DecompressChunk) on _hyper_1_5_chunk m_5 (actual rows=1 loops=1) + -> Seq Scan on compress_hyper_2_10_chunk (actual rows=1 loops=1) + Filter: (device_id = 7) + Rows Removed by Filter: 4 + -> Hash (actual rows=0 loops=1) + Buckets: 1024 Batches: 1 + -> Seq Scan on device_tbl d (actual rows=0 loops=1) + Filter: (device_id = 7) + Rows Removed by Filter: 7 +(32 rows) + +SET timescaledb.enable_chunk_append TO TRUE; +-- github bug 2917 with UNION ALL that references compressed ht +CREATE TABLE entity +( + oid bigint PRIMARY KEY, + type text, + name text +); +INSERT INTO entity values(10, 'VMEM', 'cpu'); +CREATE TABLE entity_m2 +( + timec timestamp with time zone NOT NULL, + entity_oid bigint , + entity_hash bigint , + type text , + current double precision, + capacity double precision, + utilization double precision, + peak double precision +); +SELECT create_hypertable('entity_m2', 'timec', chunk_time_interval=>'30 days'::interval); + create_hypertable +------------------------ + (5,public,entity_m2,t) +(1 row) + +INSERT INTO entity_m2 values ( + '2020-12-21 15:47:58.778-05' , 10 , -7792214420424674003 , 'VMEM' , 0, 2097152 , 0 , 100); +INSERT INTO entity_m2 values ( + '2020-12-21 16:47:58.778-05' , 10 , -7792214420424674003 , 'VMEM' , 0, 2097152 , 0 , 100); +ALTER TABLE entity_m2 SET (timescaledb.compress, +timescaledb.compress_segmentby = 'entity_oid', +timescaledb.compress_orderby = 'type, timec'); +SELECT compress_chunk(c) FROM show_chunks('entity_m2') c; + compress_chunk +----------------------------------------- + _timescaledb_internal._hyper_5_13_chunk +(1 row) + +CREATE TABLE entity_m1 +( +timec timestamp with time zone , + entity_oid bigint , + entity_hash bigint , + type text , + current double precision, + capacity double precision, + utilization double precision +); +SELECT create_hypertable('entity_m1', 'timec', chunk_time_interval=>'30 days'::interval); +NOTICE: adding not-null constraint to column "timec" + create_hypertable +------------------------ + (7,public,entity_m1,t) +(1 row) + +INSERT INTO entity_m1 values ( + '2020-12-21 16:47:58.778-05' , 10 , -7792214420424674003 , 'VMEM' , 0, 100 , 0 ); +create view metric_view as + SELECT m2.timec, + m2.entity_oid, + m2.entity_hash, + m2.type, + m2.current, + m2.capacity, + m2.utilization, + m2.peak + FROM entity_m2 m2 +UNION ALL + SELECT m1.timec, + m1.entity_oid, + m1.entity_hash, + m1.type, + m1.current, + m1.capacity, + m1.utilization, + NULL::double precision AS peak + FROM entity_m1 m1; +SET enable_bitmapscan = false; +SET enable_hashjoin = false; +SET enable_mergejoin = false; +SELECT m.timec, avg(m.utilization) AS avg_util + FROM metric_view m, entity e + WHERE m.type = 'VMEM' + AND m.timec BETWEEN '2020-12-21T00:00:00'::timestamptz - interval '7 day' AND date_trunc('day', '2020-12-22T00:00:00'::timestamptz) + AND m.entity_oid = e.oid + GROUP BY 1 ORDER BY 1; + timec | avg_util +----------------------------------+---------- + Mon Dec 21 12:47:58.778 2020 PST | 0 + Mon Dec 21 13:47:58.778 2020 PST | 0 +(2 rows) + +--now compress the other table too and rerun the query -- +ALTER TABLE entity_m1 SET (timescaledb.compress, +timescaledb.compress_segmentby = 'entity_oid', +timescaledb.compress_orderby = 'type, timec'); +SELECT compress_chunk(c) FROM show_chunks('entity_m1') c; + compress_chunk +----------------------------------------- + _timescaledb_internal._hyper_7_15_chunk +(1 row) + +SELECT m.timec, avg(m.utilization) AS avg_util + FROM metric_view m, entity e + WHERE m.type = 'VMEM' + AND m.timec BETWEEN '2020-12-21T00:00:00'::timestamptz - interval '7 day' AND date_trunc('day', '2020-12-22T00:00:00'::timestamptz) + AND m.entity_oid = e.oid + GROUP BY 1 ORDER BY 1; + timec | avg_util +----------------------------------+---------- + Mon Dec 21 12:47:58.778 2020 PST | 0 + Mon Dec 21 13:47:58.778 2020 PST | 0 +(2 rows) + +RESET enable_bitmapscan ; +RESET enable_hashjoin ; +RESET enable_mergejoin; +-- end github bug 2917 diff --git a/tsl/test/shared/expected/constify_now-16.out b/tsl/test/shared/expected/constify_now-16.out new file mode 100644 index 00000000000..c529cf1e310 --- /dev/null +++ b/tsl/test/shared/expected/constify_now-16.out @@ -0,0 +1,605 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +SET timescaledb.enable_chunk_append TO false; +SET timescaledb.enable_constraint_aware_append TO false; +SET timescaledb.current_timestamp_mock TO '1990-01-01'; +\set PREFIX 'EXPLAIN (COSTS OFF, SUMMARY OFF, TIMING OFF)' +-- create a test table +-- any query with successful now_constify will have 1 chunk while +-- others will have 2 chunks in plan +CREATE TABLE const_now(time timestamptz, time2 timestamptz, device_id int, value float); +SELECT table_name FROM create_hypertable('const_now','time'); +NOTICE: adding not-null constraint to column "time" + table_name + const_now +(1 row) + +INSERT INTO const_now SELECT '1000-01-01','1000-01-01',1,0.5; +INSERT INTO const_now SELECT '1000-01-01','1000-01-01',2,0.5; +INSERT INTO const_now SELECT '3000-01-01','3000-01-01',1,0.5; +INSERT INTO const_now SELECT '3000-01-01','3000-01-01',2,0.5; +-- test valid variants we are optimizing +-- all of these should have a constified value as filter +-- none of these initial tests will actually exclude chunks +-- because we want to see the constified now expression in +-- EXPLAIN output +:PREFIX SELECT FROM const_now WHERE time > now(); +QUERY PLAN + Index Only Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk + Index Cond: ("time" > now()) +(2 rows) + +:PREFIX SELECT FROM const_now WHERE time >= now(); +QUERY PLAN + Index Only Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk + Index Cond: ("time" >= now()) +(2 rows) + +:PREFIX SELECT FROM const_now WHERE time > now() - '24h'::interval; +QUERY PLAN + Index Only Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk + Index Cond: ("time" > (now() - '@ 24 hours'::interval)) +(2 rows) + +:PREFIX SELECT FROM const_now WHERE time > now() + '10m'::interval; +QUERY PLAN + Index Only Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk + Index Cond: ("time" > (now() + '@ 10 mins'::interval)) +(2 rows) + +:PREFIX SELECT FROM const_now WHERE time >= now() - '10m'::interval; +QUERY PLAN + Index Only Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk + Index Cond: ("time" >= (now() - '@ 10 mins'::interval)) +(2 rows) + +:PREFIX SELECT FROM const_now WHERE time >= now() + '10m'::interval; +QUERY PLAN + Index Only Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk + Index Cond: ("time" >= (now() + '@ 10 mins'::interval)) +(2 rows) + +:PREFIX SELECT FROM const_now WHERE time > now() - '2d'::interval; +QUERY PLAN + Index Only Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk + Index Cond: ("time" > (now() - '@ 2 days'::interval)) +(2 rows) + +:PREFIX SELECT FROM const_now WHERE time > now() + '3d'::interval; +QUERY PLAN + Index Only Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk + Index Cond: ("time" > (now() + '@ 3 days'::interval)) +(2 rows) + +:PREFIX SELECT FROM const_now WHERE time > now() - '1week'::interval; +QUERY PLAN + Index Only Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk + Index Cond: ("time" > (now() - '@ 7 days'::interval)) +(2 rows) + +:PREFIX SELECT FROM const_now WHERE time > now() - '1month'::interval; +QUERY PLAN + Index Only Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk + Index Cond: ("time" > (now() - '@ 1 mon'::interval)) +(2 rows) + +:PREFIX SELECT FROM const_now WHERE time > CURRENT_TIMESTAMP; +QUERY PLAN + Append + -> Index Only Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk + Index Cond: ("time" > CURRENT_TIMESTAMP) + -> Index Only Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk + Index Cond: ("time" > CURRENT_TIMESTAMP) +(5 rows) + +:PREFIX SELECT FROM const_now WHERE time >= CURRENT_TIMESTAMP; +QUERY PLAN + Append + -> Index Only Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk + Index Cond: ("time" >= CURRENT_TIMESTAMP) + -> Index Only Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk + Index Cond: ("time" >= CURRENT_TIMESTAMP) +(5 rows) + +:PREFIX SELECT FROM const_now WHERE time > CURRENT_TIMESTAMP - '24h'::interval; +QUERY PLAN + Append + -> Index Only Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk + Index Cond: ("time" > (CURRENT_TIMESTAMP - '@ 24 hours'::interval)) + -> Index Only Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk + Index Cond: ("time" > (CURRENT_TIMESTAMP - '@ 24 hours'::interval)) +(5 rows) + +:PREFIX SELECT FROM const_now WHERE time > CURRENT_TIMESTAMP + '10m'::interval; +QUERY PLAN + Append + -> Index Only Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk + Index Cond: ("time" > (CURRENT_TIMESTAMP + '@ 10 mins'::interval)) + -> Index Only Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk + Index Cond: ("time" > (CURRENT_TIMESTAMP + '@ 10 mins'::interval)) +(5 rows) + +:PREFIX SELECT FROM const_now WHERE time >= CURRENT_TIMESTAMP - '10m'::interval; +QUERY PLAN + Append + -> Index Only Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk + Index Cond: ("time" >= (CURRENT_TIMESTAMP - '@ 10 mins'::interval)) + -> Index Only Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk + Index Cond: ("time" >= (CURRENT_TIMESTAMP - '@ 10 mins'::interval)) +(5 rows) + +:PREFIX SELECT FROM const_now WHERE time >= CURRENT_TIMESTAMP + '10m'::interval; +QUERY PLAN + Append + -> Index Only Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk + Index Cond: ("time" >= (CURRENT_TIMESTAMP + '@ 10 mins'::interval)) + -> Index Only Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk + Index Cond: ("time" >= (CURRENT_TIMESTAMP + '@ 10 mins'::interval)) +(5 rows) + +:PREFIX SELECT FROM const_now WHERE time > CURRENT_TIMESTAMP - '2d'::interval; +QUERY PLAN + Append + -> Index Only Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk + Index Cond: ("time" > (CURRENT_TIMESTAMP - '@ 2 days'::interval)) + -> Index Only Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk + Index Cond: ("time" > (CURRENT_TIMESTAMP - '@ 2 days'::interval)) +(5 rows) + +:PREFIX SELECT FROM const_now WHERE time > CURRENT_TIMESTAMP + '3d'::interval; +QUERY PLAN + Append + -> Index Only Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk + Index Cond: ("time" > (CURRENT_TIMESTAMP + '@ 3 days'::interval)) + -> Index Only Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk + Index Cond: ("time" > (CURRENT_TIMESTAMP + '@ 3 days'::interval)) +(5 rows) + +:PREFIX SELECT FROM const_now WHERE time > CURRENT_TIMESTAMP - '1week'::interval; +QUERY PLAN + Append + -> Index Only Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk + Index Cond: ("time" > (CURRENT_TIMESTAMP - '@ 7 days'::interval)) + -> Index Only Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk + Index Cond: ("time" > (CURRENT_TIMESTAMP - '@ 7 days'::interval)) +(5 rows) + +:PREFIX SELECT FROM const_now WHERE time > CURRENT_TIMESTAMP - '1month'::interval; +QUERY PLAN + Append + -> Index Only Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk + Index Cond: ("time" > (CURRENT_TIMESTAMP - '@ 1 mon'::interval)) + -> Index Only Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk + Index Cond: ("time" > (CURRENT_TIMESTAMP - '@ 1 mon'::interval)) +(5 rows) + +-- test bitmapheapscan +SET enable_indexscan TO false; +:PREFIX SELECT FROM const_now WHERE time > now(); +QUERY PLAN + Bitmap Heap Scan on _hyper_X_X_chunk + Recheck Cond: ("time" > now()) + -> Bitmap Index Scan on _hyper_X_X_chunk_const_now_time_idx + Index Cond: ("time" > now()) +(4 rows) + +RESET enable_indexscan; +-- test multiple constraints +:PREFIX SELECT FROM const_now WHERE time >= now() + '10m'::interval AND device_id = 2; +QUERY PLAN + Index Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk + Index Cond: ("time" >= (now() + '@ 10 mins'::interval)) + Filter: (device_id = 2) +(3 rows) + +:PREFIX SELECT FROM const_now WHERE time >= now() + '10m'::interval AND (device_id = 2 OR device_id = 3); +QUERY PLAN + Index Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk + Index Cond: ("time" >= (now() + '@ 10 mins'::interval)) + Filter: ((device_id = 2) OR (device_id = 3)) +(3 rows) + +:PREFIX SELECT FROM const_now WHERE time >= now() + '10m'::interval AND time >= now() - '10m'::interval; +QUERY PLAN + Index Only Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk + Index Cond: (("time" >= (now() + '@ 10 mins'::interval)) AND ("time" >= (now() - '@ 10 mins'::interval))) +(2 rows) + +-- variants we don't optimize +:PREFIX SELECT FROM const_now WHERE time > now()::date; +QUERY PLAN + Append + -> Index Only Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk + Index Cond: ("time" > (now())::date) + -> Index Only Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk + Index Cond: ("time" > (now())::date) +(5 rows) + +:PREFIX SELECT FROM const_now WHERE round(EXTRACT(EPOCH FROM now())) > 0.5; +QUERY PLAN + Append + -> Result + One-Time Filter: (round(EXTRACT(epoch FROM now()), 0) > 0.5) + -> Seq Scan on _hyper_X_X_chunk + -> Result + One-Time Filter: (round(EXTRACT(epoch FROM now()), 0) > 0.5) + -> Seq Scan on _hyper_X_X_chunk +(7 rows) + +-- we only modify top-level ANDed now() expressions +:PREFIX SELECT FROM const_now WHERE time > now() - '1m'::interval OR time > now() + '1m'::interval; +QUERY PLAN + Append + -> Bitmap Heap Scan on _hyper_X_X_chunk + Recheck Cond: (("time" > (now() - '@ 1 min'::interval)) OR ("time" > (now() + '@ 1 min'::interval))) + -> BitmapOr + -> Bitmap Index Scan on _hyper_X_X_chunk_const_now_time_idx + Index Cond: ("time" > (now() - '@ 1 min'::interval)) + -> Bitmap Index Scan on _hyper_X_X_chunk_const_now_time_idx + Index Cond: ("time" > (now() + '@ 1 min'::interval)) + -> Bitmap Heap Scan on _hyper_X_X_chunk + Recheck Cond: (("time" > (now() - '@ 1 min'::interval)) OR ("time" > (now() + '@ 1 min'::interval))) + -> BitmapOr + -> Bitmap Index Scan on _hyper_X_X_chunk_const_now_time_idx + Index Cond: ("time" > (now() - '@ 1 min'::interval)) + -> Bitmap Index Scan on _hyper_X_X_chunk_const_now_time_idx + Index Cond: ("time" > (now() + '@ 1 min'::interval)) +(15 rows) + +:PREFIX SELECT FROM const_now WHERE device_id = 2 OR (time > now() - '1m'::interval AND time > now() + '1m'::interval); +QUERY PLAN + Append + -> Seq Scan on _hyper_X_X_chunk + Filter: ((device_id = 2) OR (("time" > (now() - '@ 1 min'::interval)) AND ("time" > (now() + '@ 1 min'::interval)))) + -> Seq Scan on _hyper_X_X_chunk + Filter: ((device_id = 2) OR (("time" > (now() - '@ 1 min'::interval)) AND ("time" > (now() + '@ 1 min'::interval)))) +(5 rows) + +-- CTE +:PREFIX WITH q1 AS ( + SELECT * FROM const_now WHERE time > now() +) SELECT FROM q1; +QUERY PLAN + Index Only Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk + Index Cond: ("time" > now()) +(2 rows) + +:PREFIX WITH q1 AS ( + SELECT * FROM const_now +) SELECT FROM q1 WHERE time > now(); +QUERY PLAN + Append + -> Index Only Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk + Index Cond: ("time" > now()) + -> Index Only Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk + Index Cond: ("time" > now()) +(5 rows) + +-- JOIN +:PREFIX SELECT FROM const_now m1, const_now m2 WHERE m1.time > now(); +QUERY PLAN + Nested Loop + -> Append + -> Seq Scan on _hyper_X_X_chunk m2_1 + -> Seq Scan on _hyper_X_X_chunk m2_2 + -> Materialize + -> Index Only Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk m1 + Index Cond: ("time" > now()) +(7 rows) + +:PREFIX SELECT FROM const_now m1, const_now m2 WHERE m2.time > now(); +QUERY PLAN + Nested Loop + -> Append + -> Seq Scan on _hyper_X_X_chunk m1_1 + -> Seq Scan on _hyper_X_X_chunk m1_2 + -> Materialize + -> Index Only Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk m2 + Index Cond: ("time" > now()) +(7 rows) + +:PREFIX SELECT FROM const_now m1, const_now m2 WHERE m1.time > now() AND m2.time > now(); +QUERY PLAN + Nested Loop + -> Index Only Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk m1 + Index Cond: ("time" > now()) + -> Materialize + -> Index Only Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk m2 + Index Cond: ("time" > now()) +(6 rows) + +-- only top-level constraints in WHERE clause are constified +:PREFIX SELECT FROM const_now m1 INNER JOIN const_now m2 ON (m1.time > now()); +QUERY PLAN + Nested Loop + -> Append + -> Seq Scan on _hyper_X_X_chunk m2_1 + -> Seq Scan on _hyper_X_X_chunk m2_2 + -> Materialize + -> Append + -> Index Only Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk m1_1 + Index Cond: ("time" > now()) + -> Index Only Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk m1_2 + Index Cond: ("time" > now()) +(10 rows) + +:PREFIX SELECT FROM const_now m1 INNER JOIN const_now m2 ON (m1.time > now()) WHERE m2.time > now(); +QUERY PLAN + Nested Loop + -> Append + -> Index Only Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk m1_1 + Index Cond: ("time" > now()) + -> Index Only Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk m1_2 + Index Cond: ("time" > now()) + -> Materialize + -> Index Only Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk m2 + Index Cond: ("time" > now()) +(9 rows) + +-- test UPDATE +:PREFIX UPDATE const_now SET value = EXTRACT(EPOCH FROM now()) WHERE time > now(); +QUERY PLAN + Custom Scan (HypertableModify) + -> Update on const_now + Update on _hyper_X_X_chunk const_now_1 + -> Result + -> Index Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk const_now_1 + Index Cond: ("time" > now()) +(6 rows) + +-- test DELETE +:PREFIX DELETE FROM const_now WHERE time > now(); +QUERY PLAN + Custom Scan (HypertableModify) + -> Delete on const_now + Delete on _hyper_X_X_chunk const_now_1 + -> Index Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk const_now_1 + Index Cond: ("time" > now()) +(5 rows) + +-- test chunks actually get excluded +-- should exclude all +SET timescaledb.current_timestamp_mock TO '2010-01-01'; +:PREFIX SELECT FROM const_now WHERE time > now(); +QUERY PLAN + Index Only Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk + Index Cond: ("time" > now()) +(2 rows) + +-- should exclude all but 1 chunk +SET timescaledb.current_timestamp_mock TO '2000-01-14'; +:PREFIX SELECT FROM const_now WHERE time > now(); +QUERY PLAN + Index Only Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk + Index Cond: ("time" > now()) +(2 rows) + +-- should have one time filter false +:PREFIX SELECT FROM const_now WHERE time > now(); +QUERY PLAN + Index Only Scan using _hyper_X_X_chunk_const_now_time_idx on _hyper_X_X_chunk + Index Cond: ("time" > now()) +(2 rows) + +-- no constification because it's not partitioning column +:PREFIX SELECT FROM const_now WHERE time2 > now(); +QUERY PLAN + Append + -> Seq Scan on _hyper_X_X_chunk + Filter: (time2 > now()) + -> Seq Scan on _hyper_X_X_chunk + Filter: (time2 > now()) +(5 rows) + +DROP TABLE const_now; +-- test prepared statements +CREATE TABLE prep_const_now(time timestamptz, device int, value float); +SELECT table_name FROM create_hypertable('prep_const_now', 'time'); +NOTICE: adding not-null constraint to column "time" + table_name + prep_const_now +(1 row) + +INSERT INTO prep_const_now SELECT '3000-01-02', 1, 0.2; +INSERT INTO prep_const_now SELECT '3001-01-02', 2, 0.3; +INSERT INTO prep_const_now SELECT '3002-01-02', 3, 0.4; +SET timescaledb.current_timestamp_mock TO '3001-01-01'; +PREPARE p1 AS SELECT FROM prep_const_now WHERE time > now(); +:PREFIX EXECUTE p1; +QUERY PLAN + Append + -> Index Only Scan using _hyper_X_X_chunk_prep_const_now_time_idx on _hyper_X_X_chunk + Index Cond: ("time" > now()) + -> Index Only Scan using _hyper_X_X_chunk_prep_const_now_time_idx on _hyper_X_X_chunk + Index Cond: ("time" > now()) +(5 rows) + +EXECUTE p1; +(2 rows) + +SET timescaledb.current_timestamp_mock TO '3002-01-01'; +-- plan won't change cause the query didnt get replanned +:PREFIX EXECUTE p1; +QUERY PLAN + Append + -> Index Only Scan using _hyper_X_X_chunk_prep_const_now_time_idx on _hyper_X_X_chunk + Index Cond: ("time" > now()) + -> Index Only Scan using _hyper_X_X_chunk_prep_const_now_time_idx on _hyper_X_X_chunk + Index Cond: ("time" > now()) +(5 rows) + +EXECUTE p1; +(2 rows) + +DROP TABLE prep_const_now; +-- test outer var references dont trip up constify_now +-- no optimization is done in this case +:PREFIX SELECT * FROM + metrics_tstz m1 + INNER JOIN metrics_tstz as m2 on (true) +WHERE + EXISTS (SELECT * FROM metrics_tstz AS m3 WHERE m2.time > now()); +QUERY PLAN + Nested Loop + -> Seq Scan on _hyper_X_X_chunk m1 + -> Materialize + -> Nested Loop Semi Join + -> Index Scan using _hyper_X_X_chunk_metrics_tstz_time_idx on _hyper_X_X_chunk m2 + Index Cond: ("time" > now()) + -> Materialize + -> Seq Scan on _hyper_X_X_chunk m3 +(8 rows) + +-- test dst interaction with day intervals +SET timezone TO 'Europe/Berlin'; +CREATE TABLE const_now_dst(time timestamptz not null); +SELECT table_name FROM create_hypertable('const_now_dst','time',chunk_time_interval:='30minutes'::interval); + table_name + const_now_dst +(1 row) + +-- create 2 chunks +INSERT INTO const_now_dst SELECT '2022-03-27 03:15:00+02'; +INSERT INTO const_now_dst SELECT '2022-03-27 03:45:00+02'; +SELECT * FROM const_now_dst WHERE time >= '2022-03-28 0:45+0'::timestamptz - '1d'::interval; + time + Sun Mar 27 03:45:00 2022 CEST +(1 row) + +SELECT * FROM const_now_dst WHERE time >= '2022-03-28 1:15+0'::timestamptz - '1d'::interval; + time + Sun Mar 27 03:15:00 2022 CEST + Sun Mar 27 03:45:00 2022 CEST +(2 rows) + +SET timescaledb.current_timestamp_mock TO '2022-03-28 0:45+0'; +-- must have 2 chunks in plan +:PREFIX SELECT FROM const_now_dst WHERE time > now() - '1day'::interval; +QUERY PLAN + Append + -> Index Only Scan using _hyper_X_X_chunk_const_now_dst_time_idx on _hyper_X_X_chunk + Index Cond: ("time" > (now() - '@ 1 day'::interval)) + -> Index Only Scan using _hyper_X_X_chunk_const_now_dst_time_idx on _hyper_X_X_chunk + Index Cond: ("time" > (now() - '@ 1 day'::interval)) +(5 rows) + +SET timescaledb.current_timestamp_mock TO '2022-03-28 1:15+0'; +-- must have 2 chunks in plan +:PREFIX SELECT FROM const_now_dst WHERE time > now() - '1day'::interval; +QUERY PLAN + Append + -> Index Only Scan using _hyper_X_X_chunk_const_now_dst_time_idx on _hyper_X_X_chunk + Index Cond: ("time" > (now() - '@ 1 day'::interval)) + -> Index Only Scan using _hyper_X_X_chunk_const_now_dst_time_idx on _hyper_X_X_chunk + Index Cond: ("time" > (now() - '@ 1 day'::interval)) +(5 rows) + +TRUNCATE const_now_dst; +SELECT set_chunk_time_interval('const_now_dst','1 day'::interval, 'time'); + set_chunk_time_interval + +(1 row) + +-- test month calculation safety buffer +SET timescaledb.current_timestamp_mock TO '2001-03-1 0:30:00+00'; +INSERT INTO const_now_dst SELECT generate_series('2001-01-28'::timestamptz, '2001-02-01', '1day'::interval); +set timezone to 'utc+1'; +-- must have 5 chunks in plan +:PREFIX SELECT * FROM const_now_dst WHERE time > now() - '1 month'::interval; +QUERY PLAN + Append + -> Index Only Scan using _hyper_X_X_chunk_const_now_dst_time_idx on _hyper_X_X_chunk + Index Cond: ("time" > (now() - '@ 1 mon'::interval)) + -> Index Only Scan using _hyper_X_X_chunk_const_now_dst_time_idx on _hyper_X_X_chunk + Index Cond: ("time" > (now() - '@ 1 mon'::interval)) + -> Index Only Scan using _hyper_X_X_chunk_const_now_dst_time_idx on _hyper_X_X_chunk + Index Cond: ("time" > (now() - '@ 1 mon'::interval)) + -> Index Only Scan using _hyper_X_X_chunk_const_now_dst_time_idx on _hyper_X_X_chunk + Index Cond: ("time" > (now() - '@ 1 mon'::interval)) + -> Index Only Scan using _hyper_X_X_chunk_const_now_dst_time_idx on _hyper_X_X_chunk + Index Cond: ("time" > (now() - '@ 1 mon'::interval)) +(11 rows) + +set timezone to 'utc-1'; +-- must have 5 chunks in plan +:PREFIX SELECT * FROM const_now_dst WHERE time > now() - '1 month'::interval; +QUERY PLAN + Append + -> Index Only Scan using _hyper_X_X_chunk_const_now_dst_time_idx on _hyper_X_X_chunk + Index Cond: ("time" > (now() - '@ 1 mon'::interval)) + -> Index Only Scan using _hyper_X_X_chunk_const_now_dst_time_idx on _hyper_X_X_chunk + Index Cond: ("time" > (now() - '@ 1 mon'::interval)) + -> Index Only Scan using _hyper_X_X_chunk_const_now_dst_time_idx on _hyper_X_X_chunk + Index Cond: ("time" > (now() - '@ 1 mon'::interval)) + -> Index Only Scan using _hyper_X_X_chunk_const_now_dst_time_idx on _hyper_X_X_chunk + Index Cond: ("time" > (now() - '@ 1 mon'::interval)) + -> Index Only Scan using _hyper_X_X_chunk_const_now_dst_time_idx on _hyper_X_X_chunk + Index Cond: ("time" > (now() - '@ 1 mon'::interval)) +(11 rows) + +DROP TABLE const_now_dst; +-- test now constification with VIEWs +SET timescaledb.current_timestamp_mock TO '2003-01-01 0:30:00+00'; +CREATE TABLE now_view_test(time timestamptz,device text, value float); +SELECT table_name FROM create_hypertable('now_view_test','time'); +NOTICE: adding not-null constraint to column "time" + table_name + now_view_test +(1 row) + +-- create 5 chunks +INSERT INTO now_view_test SELECT generate_series('2000-01-01'::timestamptz,'2004-01-01'::timestamptz,'1year'::interval), 'a', 0.5; +CREATE VIEW now_view AS SELECT time, device, avg(value) from now_view_test GROUP BY 1,2; +-- should have all 5 chunks in EXPLAIN +:PREFIX SELECT * FROM now_view; +QUERY PLAN + Finalize HashAggregate + Group Key: _hyper_X_X_chunk."time", _hyper_X_X_chunk.device + -> Append + -> Partial HashAggregate + Group Key: _hyper_X_X_chunk."time", _hyper_X_X_chunk.device + -> Seq Scan on _hyper_X_X_chunk + -> Partial HashAggregate + Group Key: _hyper_X_X_chunk."time", _hyper_X_X_chunk.device + -> Seq Scan on _hyper_X_X_chunk + -> Partial HashAggregate + Group Key: _hyper_X_X_chunk."time", _hyper_X_X_chunk.device + -> Seq Scan on _hyper_X_X_chunk + -> Partial HashAggregate + Group Key: _hyper_X_X_chunk."time", _hyper_X_X_chunk.device + -> Seq Scan on _hyper_X_X_chunk + -> Partial HashAggregate + Group Key: _hyper_X_X_chunk."time", _hyper_X_X_chunk.device + -> Seq Scan on _hyper_X_X_chunk +(18 rows) + +-- should have 2 chunks in EXPLAIN +:PREFIX SELECT * FROM now_view WHERE time > now() - '168h'::interval; +QUERY PLAN + Finalize HashAggregate + Group Key: _hyper_X_X_chunk."time", _hyper_X_X_chunk.device + -> Append + -> Partial HashAggregate + Group Key: _hyper_X_X_chunk."time", _hyper_X_X_chunk.device + -> Index Scan Backward using _hyper_X_X_chunk_now_view_test_time_idx on _hyper_X_X_chunk + Index Cond: ("time" > (now() - '@ 168 hours'::interval)) + -> Partial HashAggregate + Group Key: _hyper_X_X_chunk."time", _hyper_X_X_chunk.device + -> Index Scan Backward using _hyper_X_X_chunk_now_view_test_time_idx on _hyper_X_X_chunk + Index Cond: ("time" > (now() - '@ 168 hours'::interval)) +(11 rows) + +DROP TABLE now_view_test CASCADE; +NOTICE: drop cascades to view now_view +-- #4709 +-- test queries with constraints involving columns from different nesting levels +SELECT * FROM + (SELECT * FROM metrics m1 LIMIT 1) m1 + INNER JOIN (SELECT * FROM metrics m2 LIMIT 1) m2 ON true, + LATERAL (SELECT m2.time FROM devices LIMIT 1) as subq_1 +WHERE subq_1.time > m1.time; + time | device_id | v0 | v1 | v2 | v3 | time | device_id | v0 | v1 | v2 | v3 | time +------+-----------+----+----+----+----+------+-----------+----+----+----+----+------ +(0 rows) + diff --git a/tsl/test/shared/expected/dist_distinct-16.out b/tsl/test/shared/expected/dist_distinct-16.out new file mode 100644 index 00000000000..dd2fbedc49e --- /dev/null +++ b/tsl/test/shared/expected/dist_distinct-16.out @@ -0,0 +1,949 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\set TEST_BASE_NAME dist_distinct +-- Run +SELECT format('include/%s_run.sql', :'TEST_BASE_NAME') AS "TEST_QUERY_NAME", + format('%s/shared/results/%s_results_reference.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') AS "TEST_RESULTS_REFERENCE", + format('%s/shared/results/%s_results_distributed.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') AS "TEST_RESULTS_DIST" +\gset +SELECT format('\! diff -u --label "Distributed results" --label "Local results" %s %s', :'TEST_RESULTS_DIST', :'TEST_RESULTS_REFERENCE') AS "DIFF_CMD_DIST" +\gset +\set PREFIX 'EXPLAIN (verbose, costs off)' +\set ORDER_BY_1 'ORDER BY 1' +\set ORDER_BY_1_2 'ORDER BY 1,2' +\set ECHO queries +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%% RUNNING TESTS on table: metrics_dist +%%% PREFIX: EXPLAIN (verbose, costs off) +%%% ORDER_BY_1: ORDER BY 1 +%%% ORDER_BY_1_2: ORDER BY 1,2 +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +SET enable_hashagg TO false; +Unique plan on access node for SELECT DISTINCT +EXPLAIN (verbose, costs off) +SELECT DISTINCT device_id +FROM metrics_dist +ORDER BY 1 +LIMIT 10; +QUERY PLAN + Limit + Output: metrics_dist.device_id + -> Unique + Output: metrics_dist.device_id + -> Custom Scan (AsyncAppend) + Output: metrics_dist.device_id + -> Merge Append + Sort Key: metrics_dist_1.device_id + -> Custom Scan (DataNodeScan) on public.metrics_dist metrics_dist_1 + Output: metrics_dist_1.device_id + Data node: data_node_1 + Chunks: _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk + Remote SQL: SELECT DISTINCT device_id FROM public.metrics_dist WHERE _timescaledb_functions.chunks_in(public.metrics_dist.*, ARRAY[..]) ORDER BY device_id ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.metrics_dist metrics_dist_2 + Output: metrics_dist_2.device_id + Data node: data_node_2 + Chunks: _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk + Remote SQL: SELECT DISTINCT device_id FROM public.metrics_dist WHERE _timescaledb_functions.chunks_in(public.metrics_dist.*, ARRAY[..]) ORDER BY device_id ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.metrics_dist metrics_dist_3 + Output: metrics_dist_3.device_id + Data node: data_node_3 + Chunks: _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk + Remote SQL: SELECT DISTINCT device_id FROM public.metrics_dist WHERE _timescaledb_functions.chunks_in(public.metrics_dist.*, ARRAY[..]) ORDER BY device_id ASC NULLS LAST +(23 rows) + +RESET enable_hashagg; +SET timescaledb.enable_per_data_node_queries = true; +SELECT DISTINCT on expressions is not pushed down +EXPLAIN (verbose, costs off) +SELECT DISTINCT device_id*v1 +FROM metrics_dist +ORDER BY 1 +LIMIT 10; +QUERY PLAN + Limit + Output: ((metrics_dist.device_id * metrics_dist.v1)) + -> Unique + Output: ((metrics_dist.device_id * metrics_dist.v1)) + -> Custom Scan (AsyncAppend) + Output: ((metrics_dist.device_id * metrics_dist.v1)) + -> Merge Append + Sort Key: ((metrics_dist_1.device_id * metrics_dist_1.v1)) + -> Result + Output: (metrics_dist_1.device_id * metrics_dist_1.v1) + -> Custom Scan (DataNodeScan) on public.metrics_dist metrics_dist_1 + Output: metrics_dist_1.device_id, metrics_dist_1.v1 + Data node: data_node_1 + Chunks: _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk + Remote SQL: SELECT device_id, v1 FROM public.metrics_dist WHERE _timescaledb_functions.chunks_in(public.metrics_dist.*, ARRAY[..]) ORDER BY (device_id * v1) ASC NULLS LAST + -> Result + Output: (metrics_dist_2.device_id * metrics_dist_2.v1) + -> Custom Scan (DataNodeScan) on public.metrics_dist metrics_dist_2 + Output: metrics_dist_2.device_id, metrics_dist_2.v1 + Data node: data_node_2 + Chunks: _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk + Remote SQL: SELECT device_id, v1 FROM public.metrics_dist WHERE _timescaledb_functions.chunks_in(public.metrics_dist.*, ARRAY[..]) ORDER BY (device_id * v1) ASC NULLS LAST + -> Result + Output: (metrics_dist_3.device_id * metrics_dist_3.v1) + -> Custom Scan (DataNodeScan) on public.metrics_dist metrics_dist_3 + Output: metrics_dist_3.device_id, metrics_dist_3.v1 + Data node: data_node_3 + Chunks: _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk + Remote SQL: SELECT device_id, v1 FROM public.metrics_dist WHERE _timescaledb_functions.chunks_in(public.metrics_dist.*, ARRAY[..]) ORDER BY (device_id * v1) ASC NULLS LAST +(29 rows) + +SET timescaledb.enable_remote_explain = ON; +SELECT DISTINCT on column with index uses SkipScan +EXPLAIN (verbose, costs off) +SELECT DISTINCT device_id +FROM metrics_dist +ORDER BY 1 +LIMIT 10; +QUERY PLAN + Limit + Output: metrics_dist.device_id + -> Unique + Output: metrics_dist.device_id + -> Custom Scan (AsyncAppend) + Output: metrics_dist.device_id + -> Merge Append + Sort Key: metrics_dist_1.device_id + -> Custom Scan (DataNodeScan) on public.metrics_dist metrics_dist_1 + Output: metrics_dist_1.device_id + Data node: data_node_1 + Chunks: _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk + Remote SQL: SELECT DISTINCT device_id FROM public.metrics_dist WHERE _timescaledb_functions.chunks_in(public.metrics_dist.*, ARRAY[..]) ORDER BY device_id ASC NULLS LAST + Remote EXPLAIN: + Unique + Output: _dist_hyper_X_X_chunk.device_id + -> Merge Append + Sort Key: _dist_hyper_X_X_chunk.device_id + -> Custom Scan (SkipScan) on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk.device_id + -> Index Only Scan using _dist_hyper_X_X_chunk_metrics_dist_device_id_time_idx on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk.device_id + Index Cond: (_dist_hyper_X_X_chunk.device_id > NULL::integer) + -> Custom Scan (SkipScan) on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk.device_id + -> Index Only Scan using _dist_hyper_X_X_chunk_metrics_dist_device_id_time_idx on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk.device_id + Index Cond: (_dist_hyper_X_X_chunk.device_id > NULL::integer) + -> Custom Scan (SkipScan) on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk.device_id + -> Index Only Scan using _dist_hyper_X_X_chunk_metrics_dist_device_id_time_idx on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk.device_id + Index Cond: (_dist_hyper_X_X_chunk.device_id > NULL::integer) + + -> Custom Scan (DataNodeScan) on public.metrics_dist metrics_dist_2 + Output: metrics_dist_2.device_id + Data node: data_node_2 + Chunks: _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk + Remote SQL: SELECT DISTINCT device_id FROM public.metrics_dist WHERE _timescaledb_functions.chunks_in(public.metrics_dist.*, ARRAY[..]) ORDER BY device_id ASC NULLS LAST + Remote EXPLAIN: + Unique + Output: _dist_hyper_X_X_chunk.device_id + -> Merge Append + Sort Key: _dist_hyper_X_X_chunk.device_id + -> Custom Scan (SkipScan) on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk.device_id + -> Index Only Scan using _dist_hyper_X_X_chunk_metrics_dist_device_id_time_idx on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk.device_id + Index Cond: (_dist_hyper_X_X_chunk.device_id > NULL::integer) + -> Custom Scan (SkipScan) on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk.device_id + -> Index Only Scan using _dist_hyper_X_X_chunk_metrics_dist_device_id_time_idx on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk.device_id + Index Cond: (_dist_hyper_X_X_chunk.device_id > NULL::integer) + -> Custom Scan (SkipScan) on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk.device_id + -> Index Only Scan using _dist_hyper_X_X_chunk_metrics_dist_device_id_time_idx on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk.device_id + Index Cond: (_dist_hyper_X_X_chunk.device_id > NULL::integer) + + -> Custom Scan (DataNodeScan) on public.metrics_dist metrics_dist_3 + Output: metrics_dist_3.device_id + Data node: data_node_3 + Chunks: _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk + Remote SQL: SELECT DISTINCT device_id FROM public.metrics_dist WHERE _timescaledb_functions.chunks_in(public.metrics_dist.*, ARRAY[..]) ORDER BY device_id ASC NULLS LAST + Remote EXPLAIN: + Unique + Output: _dist_hyper_X_X_chunk.device_id + -> Merge Append + Sort Key: _dist_hyper_X_X_chunk.device_id + -> Custom Scan (SkipScan) on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk.device_id + -> Index Only Scan using _dist_hyper_X_X_chunk_metrics_dist_device_id_time_idx on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk.device_id + Index Cond: (_dist_hyper_X_X_chunk.device_id > NULL::integer) + -> Custom Scan (SkipScan) on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk.device_id + -> Index Only Scan using _dist_hyper_X_X_chunk_metrics_dist_device_id_time_idx on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk.device_id + Index Cond: (_dist_hyper_X_X_chunk.device_id > NULL::integer) + -> Custom Scan (SkipScan) on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk.device_id + -> Index Only Scan using _dist_hyper_X_X_chunk_metrics_dist_device_id_time_idx on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk.device_id + Index Cond: (_dist_hyper_X_X_chunk.device_id > NULL::integer) + +(86 rows) + +SELECT DISTINCT with constants and NULLs in targetlist uses SkipScan +EXPLAIN (verbose, costs off) +SELECT DISTINCT device_id, NULL, 'const1' +FROM metrics_dist +ORDER BY 1 +LIMIT 10; +QUERY PLAN + Limit + Output: metrics_dist.device_id, NULL::text, 'const1'::text + -> Unique + Output: metrics_dist.device_id, NULL::text, 'const1'::text + -> Custom Scan (AsyncAppend) + Output: metrics_dist.device_id, NULL::text, 'const1'::text + -> Merge Append + Sort Key: metrics_dist_1.device_id + -> Result + Output: metrics_dist_1.device_id, NULL::text, 'const1'::text + -> Custom Scan (DataNodeScan) on public.metrics_dist metrics_dist_1 + Output: metrics_dist_1.device_id + Data node: data_node_1 + Chunks: _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk + Remote SQL: SELECT DISTINCT device_id FROM public.metrics_dist WHERE _timescaledb_functions.chunks_in(public.metrics_dist.*, ARRAY[..]) ORDER BY device_id ASC NULLS LAST + Remote EXPLAIN: + Unique + Output: _dist_hyper_X_X_chunk.device_id + -> Merge Append + Sort Key: _dist_hyper_X_X_chunk.device_id + -> Custom Scan (SkipScan) on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk.device_id + -> Index Only Scan using _dist_hyper_X_X_chunk_metrics_dist_device_id_time_idx on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk.device_id + Index Cond: (_dist_hyper_X_X_chunk.device_id > NULL::integer) + -> Custom Scan (SkipScan) on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk.device_id + -> Index Only Scan using _dist_hyper_X_X_chunk_metrics_dist_device_id_time_idx on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk.device_id + Index Cond: (_dist_hyper_X_X_chunk.device_id > NULL::integer) + -> Custom Scan (SkipScan) on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk.device_id + -> Index Only Scan using _dist_hyper_X_X_chunk_metrics_dist_device_id_time_idx on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk.device_id + Index Cond: (_dist_hyper_X_X_chunk.device_id > NULL::integer) + + -> Result + Output: metrics_dist_2.device_id, NULL::text, 'const1'::text + -> Custom Scan (DataNodeScan) on public.metrics_dist metrics_dist_2 + Output: metrics_dist_2.device_id + Data node: data_node_2 + Chunks: _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk + Remote SQL: SELECT DISTINCT device_id FROM public.metrics_dist WHERE _timescaledb_functions.chunks_in(public.metrics_dist.*, ARRAY[..]) ORDER BY device_id ASC NULLS LAST + Remote EXPLAIN: + Unique + Output: _dist_hyper_X_X_chunk.device_id + -> Merge Append + Sort Key: _dist_hyper_X_X_chunk.device_id + -> Custom Scan (SkipScan) on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk.device_id + -> Index Only Scan using _dist_hyper_X_X_chunk_metrics_dist_device_id_time_idx on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk.device_id + Index Cond: (_dist_hyper_X_X_chunk.device_id > NULL::integer) + -> Custom Scan (SkipScan) on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk.device_id + -> Index Only Scan using _dist_hyper_X_X_chunk_metrics_dist_device_id_time_idx on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk.device_id + Index Cond: (_dist_hyper_X_X_chunk.device_id > NULL::integer) + -> Custom Scan (SkipScan) on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk.device_id + -> Index Only Scan using _dist_hyper_X_X_chunk_metrics_dist_device_id_time_idx on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk.device_id + Index Cond: (_dist_hyper_X_X_chunk.device_id > NULL::integer) + + -> Result + Output: metrics_dist_3.device_id, NULL::text, 'const1'::text + -> Custom Scan (DataNodeScan) on public.metrics_dist metrics_dist_3 + Output: metrics_dist_3.device_id + Data node: data_node_3 + Chunks: _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk + Remote SQL: SELECT DISTINCT device_id FROM public.metrics_dist WHERE _timescaledb_functions.chunks_in(public.metrics_dist.*, ARRAY[..]) ORDER BY device_id ASC NULLS LAST + Remote EXPLAIN: + Unique + Output: _dist_hyper_X_X_chunk.device_id + -> Merge Append + Sort Key: _dist_hyper_X_X_chunk.device_id + -> Custom Scan (SkipScan) on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk.device_id + -> Index Only Scan using _dist_hyper_X_X_chunk_metrics_dist_device_id_time_idx on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk.device_id + Index Cond: (_dist_hyper_X_X_chunk.device_id > NULL::integer) + -> Custom Scan (SkipScan) on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk.device_id + -> Index Only Scan using _dist_hyper_X_X_chunk_metrics_dist_device_id_time_idx on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk.device_id + Index Cond: (_dist_hyper_X_X_chunk.device_id > NULL::integer) + -> Custom Scan (SkipScan) on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk.device_id + -> Index Only Scan using _dist_hyper_X_X_chunk_metrics_dist_device_id_time_idx on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk.device_id + Index Cond: (_dist_hyper_X_X_chunk.device_id > NULL::integer) + +(92 rows) + +SELECT DISTINCT only sends columns to the data nodes +EXPLAIN (verbose, costs off) +SELECT DISTINCT device_id, time, NULL, 'const1' +FROM metrics_dist +ORDER BY 1,2 +LIMIT 10; +QUERY PLAN + Limit + Output: metrics_dist.device_id, metrics_dist."time", NULL::text, 'const1'::text + -> Unique + Output: metrics_dist.device_id, metrics_dist."time", NULL::text, 'const1'::text + -> Custom Scan (AsyncAppend) + Output: metrics_dist.device_id, metrics_dist."time", NULL::text, 'const1'::text + -> Merge Append + Sort Key: metrics_dist_1.device_id, metrics_dist_1."time" + -> Result + Output: metrics_dist_1.device_id, metrics_dist_1."time", NULL::text, 'const1'::text + -> Custom Scan (DataNodeScan) on public.metrics_dist metrics_dist_1 + Output: metrics_dist_1.device_id, metrics_dist_1."time" + Data node: data_node_1 + Chunks: _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk + Remote SQL: SELECT DISTINCT "time", device_id FROM public.metrics_dist WHERE _timescaledb_functions.chunks_in(public.metrics_dist.*, ARRAY[..]) ORDER BY device_id ASC NULLS LAST, "time" ASC NULLS LAST + Remote EXPLAIN: + Unique + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + -> Sort + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + Sort Key: _dist_hyper_X_X_chunk.device_id, _dist_hyper_X_X_chunk."time" + -> Append + -> Seq Scan on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + -> Seq Scan on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + -> Seq Scan on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + + -> Result + Output: metrics_dist_2.device_id, metrics_dist_2."time", NULL::text, 'const1'::text + -> Custom Scan (DataNodeScan) on public.metrics_dist metrics_dist_2 + Output: metrics_dist_2.device_id, metrics_dist_2."time" + Data node: data_node_2 + Chunks: _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk + Remote SQL: SELECT DISTINCT "time", device_id FROM public.metrics_dist WHERE _timescaledb_functions.chunks_in(public.metrics_dist.*, ARRAY[..]) ORDER BY device_id ASC NULLS LAST, "time" ASC NULLS LAST + Remote EXPLAIN: + Sort + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + Sort Key: _dist_hyper_X_X_chunk.device_id, _dist_hyper_X_X_chunk."time" + -> HashAggregate + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + Group Key: _dist_hyper_X_X_chunk.device_id, _dist_hyper_X_X_chunk."time" + -> Append + -> Seq Scan on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + -> Seq Scan on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + -> Seq Scan on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + + -> Result + Output: metrics_dist_3.device_id, metrics_dist_3."time", NULL::text, 'const1'::text + -> Custom Scan (DataNodeScan) on public.metrics_dist metrics_dist_3 + Output: metrics_dist_3.device_id, metrics_dist_3."time" + Data node: data_node_3 + Chunks: _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk + Remote SQL: SELECT DISTINCT "time", device_id FROM public.metrics_dist WHERE _timescaledb_functions.chunks_in(public.metrics_dist.*, ARRAY[..]) ORDER BY device_id ASC NULLS LAST, "time" ASC NULLS LAST + Remote EXPLAIN: + Unique + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + -> Sort + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + Sort Key: _dist_hyper_X_X_chunk.device_id, _dist_hyper_X_X_chunk."time" + -> Append + -> Seq Scan on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + -> Seq Scan on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + -> Seq Scan on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + +(72 rows) + +SELECT DISTINCE is pushed down in attribute attno order +EXPLAIN (verbose, costs off) +SELECT DISTINCT device_id, time +FROM metrics_dist +ORDER BY 1,2 +LIMIT 10; +QUERY PLAN + Limit + Output: metrics_dist.device_id, metrics_dist."time" + -> Unique + Output: metrics_dist.device_id, metrics_dist."time" + -> Custom Scan (AsyncAppend) + Output: metrics_dist.device_id, metrics_dist."time" + -> Merge Append + Sort Key: metrics_dist_1.device_id, metrics_dist_1."time" + -> Custom Scan (DataNodeScan) on public.metrics_dist metrics_dist_1 + Output: metrics_dist_1.device_id, metrics_dist_1."time" + Data node: data_node_1 + Chunks: _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk + Remote SQL: SELECT DISTINCT "time", device_id FROM public.metrics_dist WHERE _timescaledb_functions.chunks_in(public.metrics_dist.*, ARRAY[..]) ORDER BY device_id ASC NULLS LAST, "time" ASC NULLS LAST + Remote EXPLAIN: + Unique + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + -> Sort + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + Sort Key: _dist_hyper_X_X_chunk.device_id, _dist_hyper_X_X_chunk."time" + -> Append + -> Seq Scan on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + -> Seq Scan on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + -> Seq Scan on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + + -> Custom Scan (DataNodeScan) on public.metrics_dist metrics_dist_2 + Output: metrics_dist_2.device_id, metrics_dist_2."time" + Data node: data_node_2 + Chunks: _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk + Remote SQL: SELECT DISTINCT "time", device_id FROM public.metrics_dist WHERE _timescaledb_functions.chunks_in(public.metrics_dist.*, ARRAY[..]) ORDER BY device_id ASC NULLS LAST, "time" ASC NULLS LAST + Remote EXPLAIN: + Sort + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + Sort Key: _dist_hyper_X_X_chunk.device_id, _dist_hyper_X_X_chunk."time" + -> HashAggregate + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + Group Key: _dist_hyper_X_X_chunk.device_id, _dist_hyper_X_X_chunk."time" + -> Append + -> Seq Scan on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + -> Seq Scan on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + -> Seq Scan on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + + -> Custom Scan (DataNodeScan) on public.metrics_dist metrics_dist_3 + Output: metrics_dist_3.device_id, metrics_dist_3."time" + Data node: data_node_3 + Chunks: _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk + Remote SQL: SELECT DISTINCT "time", device_id FROM public.metrics_dist WHERE _timescaledb_functions.chunks_in(public.metrics_dist.*, ARRAY[..]) ORDER BY device_id ASC NULLS LAST, "time" ASC NULLS LAST + Remote EXPLAIN: + Unique + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + -> Sort + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + Sort Key: _dist_hyper_X_X_chunk.device_id, _dist_hyper_X_X_chunk."time" + -> Append + -> Seq Scan on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + -> Seq Scan on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + -> Seq Scan on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + +(66 rows) + +SELECT DISTINCT ON multiple columns is pushed to data nodes +EXPLAIN (verbose, costs off) +SELECT DISTINCT ON (device_id, time) device_id, time +FROM metrics_dist +ORDER BY 1,2 +LIMIT 10; +QUERY PLAN + Limit + Output: metrics_dist.device_id, metrics_dist."time" + -> Unique + Output: metrics_dist.device_id, metrics_dist."time" + -> Custom Scan (AsyncAppend) + Output: metrics_dist.device_id, metrics_dist."time" + -> Merge Append + Sort Key: metrics_dist_1.device_id, metrics_dist_1."time" + -> Custom Scan (DataNodeScan) on public.metrics_dist metrics_dist_1 + Output: metrics_dist_1.device_id, metrics_dist_1."time" + Data node: data_node_1 + Chunks: _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk + Remote SQL: SELECT DISTINCT ON (device_id, "time") "time", device_id FROM public.metrics_dist WHERE _timescaledb_functions.chunks_in(public.metrics_dist.*, ARRAY[..]) ORDER BY device_id ASC NULLS LAST, "time" ASC NULLS LAST + Remote EXPLAIN: + Unique + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + -> Sort + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + Sort Key: _dist_hyper_X_X_chunk.device_id, _dist_hyper_X_X_chunk."time" + -> Append + -> Seq Scan on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + -> Seq Scan on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + -> Seq Scan on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + + -> Custom Scan (DataNodeScan) on public.metrics_dist metrics_dist_2 + Output: metrics_dist_2.device_id, metrics_dist_2."time" + Data node: data_node_2 + Chunks: _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk + Remote SQL: SELECT DISTINCT ON (device_id, "time") "time", device_id FROM public.metrics_dist WHERE _timescaledb_functions.chunks_in(public.metrics_dist.*, ARRAY[..]) ORDER BY device_id ASC NULLS LAST, "time" ASC NULLS LAST + Remote EXPLAIN: + Unique + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + -> Sort + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + Sort Key: _dist_hyper_X_X_chunk.device_id, _dist_hyper_X_X_chunk."time" + -> Append + -> Seq Scan on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + -> Seq Scan on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + -> Seq Scan on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + + -> Custom Scan (DataNodeScan) on public.metrics_dist metrics_dist_3 + Output: metrics_dist_3.device_id, metrics_dist_3."time" + Data node: data_node_3 + Chunks: _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk + Remote SQL: SELECT DISTINCT ON (device_id, "time") "time", device_id FROM public.metrics_dist WHERE _timescaledb_functions.chunks_in(public.metrics_dist.*, ARRAY[..]) ORDER BY device_id ASC NULLS LAST, "time" ASC NULLS LAST + Remote EXPLAIN: + Unique + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + -> Sort + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + Sort Key: _dist_hyper_X_X_chunk.device_id, _dist_hyper_X_X_chunk."time" + -> Append + -> Seq Scan on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + -> Seq Scan on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + -> Seq Scan on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + +(65 rows) + +SELECT DISTINCT within a sub-select +EXPLAIN (verbose, costs off) +SELECT device_id, time, 'const1' FROM (SELECT DISTINCT ON (device_id) device_id, time +FROM metrics_dist +ORDER BY 1,2 +LIMIT 10) a; +QUERY PLAN + Subquery Scan on a + Output: a.device_id, a."time", 'const1'::text + -> Limit + Output: metrics_dist.device_id, metrics_dist."time" + -> Unique + Output: metrics_dist.device_id, metrics_dist."time" + -> Custom Scan (AsyncAppend) + Output: metrics_dist.device_id, metrics_dist."time" + -> Merge Append + Sort Key: metrics_dist_1.device_id, metrics_dist_1."time" + -> Custom Scan (DataNodeScan) on public.metrics_dist metrics_dist_1 + Output: metrics_dist_1.device_id, metrics_dist_1."time" + Data node: data_node_1 + Chunks: _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk + Remote SQL: SELECT DISTINCT ON (device_id) "time", device_id FROM public.metrics_dist WHERE _timescaledb_functions.chunks_in(public.metrics_dist.*, ARRAY[..]) ORDER BY device_id ASC NULLS LAST, "time" ASC NULLS LAST + Remote EXPLAIN: + Unique + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + -> Sort + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + Sort Key: _dist_hyper_X_X_chunk.device_id, _dist_hyper_X_X_chunk."time" + -> Append + -> Seq Scan on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + -> Seq Scan on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + -> Seq Scan on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + + -> Custom Scan (DataNodeScan) on public.metrics_dist metrics_dist_2 + Output: metrics_dist_2.device_id, metrics_dist_2."time" + Data node: data_node_2 + Chunks: _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk + Remote SQL: SELECT DISTINCT ON (device_id) "time", device_id FROM public.metrics_dist WHERE _timescaledb_functions.chunks_in(public.metrics_dist.*, ARRAY[..]) ORDER BY device_id ASC NULLS LAST, "time" ASC NULLS LAST + Remote EXPLAIN: + Unique + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + -> Sort + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + Sort Key: _dist_hyper_X_X_chunk.device_id, _dist_hyper_X_X_chunk."time" + -> Append + -> Seq Scan on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + -> Seq Scan on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + -> Seq Scan on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + + -> Custom Scan (DataNodeScan) on public.metrics_dist metrics_dist_3 + Output: metrics_dist_3.device_id, metrics_dist_3."time" + Data node: data_node_3 + Chunks: _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk + Remote SQL: SELECT DISTINCT ON (device_id) "time", device_id FROM public.metrics_dist WHERE _timescaledb_functions.chunks_in(public.metrics_dist.*, ARRAY[..]) ORDER BY device_id ASC NULLS LAST, "time" ASC NULLS LAST + Remote EXPLAIN: + Unique + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + -> Sort + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + Sort Key: _dist_hyper_X_X_chunk.device_id, _dist_hyper_X_X_chunk."time" + -> Append + -> Seq Scan on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + -> Seq Scan on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + -> Seq Scan on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk."time", _dist_hyper_X_X_chunk.device_id + +(67 rows) + +SET timescaledb.enable_per_data_node_queries = false; +SELECT DISTINCT works with enable_per_data_node_queries disabled +EXPLAIN (verbose, costs off) +SELECT DISTINCT device_id +FROM metrics_dist +ORDER BY 1 +LIMIT 10; +QUERY PLAN + Limit + Output: _dist_hyper_X_X_chunk.device_id + -> Unique + Output: _dist_hyper_X_X_chunk.device_id + -> Merge Append + Sort Key: _dist_hyper_X_X_chunk.device_id + -> Foreign Scan on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk.device_id + Data node: data_node_1 + Remote SQL: SELECT DISTINCT device_id FROM _timescaledb_internal._dist_hyper_X_X_chunk ORDER BY device_id ASC NULLS LAST + Remote EXPLAIN: + Unique + Output: device_id + -> Custom Scan (SkipScan) on _timescaledb_internal._dist_hyper_X_X_chunk + Output: device_id + -> Index Only Scan using _dist_hyper_X_X_chunk_metrics_dist_device_id_time_idx on _timescaledb_internal._dist_hyper_X_X_chunk + Output: device_id + Index Cond: (_dist_hyper_X_X_chunk.device_id > NULL::integer) + + -> Foreign Scan on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk.device_id + Data node: data_node_2 + Remote SQL: SELECT DISTINCT device_id FROM _timescaledb_internal._dist_hyper_X_X_chunk ORDER BY device_id ASC NULLS LAST + Remote EXPLAIN: + Unique + Output: device_id + -> Custom Scan (SkipScan) on _timescaledb_internal._dist_hyper_X_X_chunk + Output: device_id + -> Index Only Scan using _dist_hyper_X_X_chunk_metrics_dist_device_id_time_idx on _timescaledb_internal._dist_hyper_X_X_chunk + Output: device_id + Index Cond: (_dist_hyper_X_X_chunk.device_id > NULL::integer) + + -> Foreign Scan on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk.device_id + Data node: data_node_3 + Remote SQL: SELECT DISTINCT device_id FROM _timescaledb_internal._dist_hyper_X_X_chunk ORDER BY device_id ASC NULLS LAST + Remote EXPLAIN: + Unique + Output: device_id + -> Custom Scan (SkipScan) on _timescaledb_internal._dist_hyper_X_X_chunk + Output: device_id + -> Index Only Scan using _dist_hyper_X_X_chunk_metrics_dist_device_id_time_idx on _timescaledb_internal._dist_hyper_X_X_chunk + Output: device_id + Index Cond: (_dist_hyper_X_X_chunk.device_id > NULL::integer) + + -> Foreign Scan on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk.device_id + Data node: data_node_1 + Remote SQL: SELECT DISTINCT device_id FROM _timescaledb_internal._dist_hyper_X_X_chunk ORDER BY device_id ASC NULLS LAST + Remote EXPLAIN: + Unique + Output: device_id + -> Custom Scan (SkipScan) on _timescaledb_internal._dist_hyper_X_X_chunk + Output: device_id + -> Index Only Scan using _dist_hyper_X_X_chunk_metrics_dist_device_id_time_idx on _timescaledb_internal._dist_hyper_X_X_chunk + Output: device_id + Index Cond: (_dist_hyper_X_X_chunk.device_id > NULL::integer) + + -> Foreign Scan on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk.device_id + Data node: data_node_2 + Remote SQL: SELECT DISTINCT device_id FROM _timescaledb_internal._dist_hyper_X_X_chunk ORDER BY device_id ASC NULLS LAST + Remote EXPLAIN: + Unique + Output: device_id + -> Custom Scan (SkipScan) on _timescaledb_internal._dist_hyper_X_X_chunk + Output: device_id + -> Index Only Scan using _dist_hyper_X_X_chunk_metrics_dist_device_id_time_idx on _timescaledb_internal._dist_hyper_X_X_chunk + Output: device_id + Index Cond: (_dist_hyper_X_X_chunk.device_id > NULL::integer) + + -> Foreign Scan on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk.device_id + Data node: data_node_3 + Remote SQL: SELECT DISTINCT device_id FROM _timescaledb_internal._dist_hyper_X_X_chunk ORDER BY device_id ASC NULLS LAST + Remote EXPLAIN: + Unique + Output: device_id + -> Custom Scan (SkipScan) on _timescaledb_internal._dist_hyper_X_X_chunk + Output: device_id + -> Index Only Scan using _dist_hyper_X_X_chunk_metrics_dist_device_id_time_idx on _timescaledb_internal._dist_hyper_X_X_chunk + Output: device_id + Index Cond: (_dist_hyper_X_X_chunk.device_id > NULL::integer) + + -> Foreign Scan on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk.device_id + Data node: data_node_1 + Remote SQL: SELECT DISTINCT device_id FROM _timescaledb_internal._dist_hyper_X_X_chunk ORDER BY device_id ASC NULLS LAST + Remote EXPLAIN: + Unique + Output: device_id + -> Custom Scan (SkipScan) on _timescaledb_internal._dist_hyper_X_X_chunk + Output: device_id + -> Index Only Scan using _dist_hyper_X_X_chunk_metrics_dist_device_id_time_idx on _timescaledb_internal._dist_hyper_X_X_chunk + Output: device_id + Index Cond: (_dist_hyper_X_X_chunk.device_id > NULL::integer) + + -> Foreign Scan on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk.device_id + Data node: data_node_2 + Remote SQL: SELECT DISTINCT device_id FROM _timescaledb_internal._dist_hyper_X_X_chunk ORDER BY device_id ASC NULLS LAST + Remote EXPLAIN: + Unique + Output: device_id + -> Custom Scan (SkipScan) on _timescaledb_internal._dist_hyper_X_X_chunk + Output: device_id + -> Index Only Scan using _dist_hyper_X_X_chunk_metrics_dist_device_id_time_idx on _timescaledb_internal._dist_hyper_X_X_chunk + Output: device_id + Index Cond: (_dist_hyper_X_X_chunk.device_id > NULL::integer) + + -> Foreign Scan on _timescaledb_internal._dist_hyper_X_X_chunk + Output: _dist_hyper_X_X_chunk.device_id + Data node: data_node_3 + Remote SQL: SELECT DISTINCT device_id FROM _timescaledb_internal._dist_hyper_X_X_chunk ORDER BY device_id ASC NULLS LAST + Remote EXPLAIN: + Unique + Output: device_id + -> Custom Scan (SkipScan) on _timescaledb_internal._dist_hyper_X_X_chunk + Output: device_id + -> Index Only Scan using _dist_hyper_X_X_chunk_metrics_dist_device_id_time_idx on _timescaledb_internal._dist_hyper_X_X_chunk + Output: device_id + Index Cond: (_dist_hyper_X_X_chunk.device_id > NULL::integer) + +(123 rows) + +SET timescaledb.enable_per_data_node_queries = true; +SET timescaledb.enable_remote_explain = OFF; +SELECT DISTINCT should not have duplicate columns +EXPLAIN (verbose, costs off) +SELECT DISTINCT device_id, device_id +FROM metrics_dist +ORDER BY 1; +QUERY PLAN + Unique + Output: metrics_dist.device_id, metrics_dist.device_id + -> Custom Scan (AsyncAppend) + Output: metrics_dist.device_id, metrics_dist.device_id + -> Merge Append + Sort Key: metrics_dist_1.device_id + -> Result + Output: metrics_dist_1.device_id, metrics_dist_1.device_id + -> Custom Scan (DataNodeScan) on public.metrics_dist metrics_dist_1 + Output: metrics_dist_1.device_id + Data node: data_node_1 + Chunks: _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk + Remote SQL: SELECT DISTINCT device_id FROM public.metrics_dist WHERE _timescaledb_functions.chunks_in(public.metrics_dist.*, ARRAY[..]) ORDER BY device_id ASC NULLS LAST + -> Result + Output: metrics_dist_2.device_id, metrics_dist_2.device_id + -> Custom Scan (DataNodeScan) on public.metrics_dist metrics_dist_2 + Output: metrics_dist_2.device_id + Data node: data_node_2 + Chunks: _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk + Remote SQL: SELECT DISTINCT device_id FROM public.metrics_dist WHERE _timescaledb_functions.chunks_in(public.metrics_dist.*, ARRAY[..]) ORDER BY device_id ASC NULLS LAST + -> Result + Output: metrics_dist_3.device_id, metrics_dist_3.device_id + -> Custom Scan (DataNodeScan) on public.metrics_dist metrics_dist_3 + Output: metrics_dist_3.device_id + Data node: data_node_3 + Chunks: _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk + Remote SQL: SELECT DISTINCT device_id FROM public.metrics_dist WHERE _timescaledb_functions.chunks_in(public.metrics_dist.*, ARRAY[..]) ORDER BY device_id ASC NULLS LAST +(27 rows) + +SELECT DISTINCT handles whole row correctly +EXPLAIN (verbose, costs off) +SELECT DISTINCT * +FROM metrics_dist +ORDER BY 1,2 +LIMIT 10; +QUERY PLAN + Limit + Output: metrics_dist."time", metrics_dist.device_id, metrics_dist.v0, metrics_dist.v1, metrics_dist.v2, metrics_dist.v3 + -> Unique + Output: metrics_dist."time", metrics_dist.device_id, metrics_dist.v0, metrics_dist.v1, metrics_dist.v2, metrics_dist.v3 + -> Custom Scan (AsyncAppend) + Output: metrics_dist."time", metrics_dist.device_id, metrics_dist.v0, metrics_dist.v1, metrics_dist.v2, metrics_dist.v3 + -> Merge Append + Sort Key: metrics_dist_1."time", metrics_dist_1.device_id, metrics_dist_1.v0, metrics_dist_1.v1, metrics_dist_1.v2, metrics_dist_1.v3 + -> Custom Scan (DataNodeScan) on public.metrics_dist metrics_dist_1 + Output: metrics_dist_1."time", metrics_dist_1.device_id, metrics_dist_1.v0, metrics_dist_1.v1, metrics_dist_1.v2, metrics_dist_1.v3 + Data node: data_node_1 + Chunks: _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk + Remote SQL: SELECT DISTINCT "time", device_id, v0, v1, v2, v3 FROM public.metrics_dist WHERE _timescaledb_functions.chunks_in(public.metrics_dist.*, ARRAY[..]) ORDER BY "time" ASC NULLS LAST, device_id ASC NULLS LAST, v0 ASC NULLS LAST, v1 ASC NULLS LAST, v2 ASC NULLS LAST, v3 ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.metrics_dist metrics_dist_2 + Output: metrics_dist_2."time", metrics_dist_2.device_id, metrics_dist_2.v0, metrics_dist_2.v1, metrics_dist_2.v2, metrics_dist_2.v3 + Data node: data_node_2 + Chunks: _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk + Remote SQL: SELECT DISTINCT "time", device_id, v0, v1, v2, v3 FROM public.metrics_dist WHERE _timescaledb_functions.chunks_in(public.metrics_dist.*, ARRAY[..]) ORDER BY "time" ASC NULLS LAST, device_id ASC NULLS LAST, v0 ASC NULLS LAST, v1 ASC NULLS LAST, v2 ASC NULLS LAST, v3 ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.metrics_dist metrics_dist_3 + Output: metrics_dist_3."time", metrics_dist_3.device_id, metrics_dist_3.v0, metrics_dist_3.v1, metrics_dist_3.v2, metrics_dist_3.v3 + Data node: data_node_3 + Chunks: _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk + Remote SQL: SELECT DISTINCT "time", device_id, v0, v1, v2, v3 FROM public.metrics_dist WHERE _timescaledb_functions.chunks_in(public.metrics_dist.*, ARRAY[..]) ORDER BY "time" ASC NULLS LAST, device_id ASC NULLS LAST, v0 ASC NULLS LAST, v1 ASC NULLS LAST, v2 ASC NULLS LAST, v3 ASC NULLS LAST +(23 rows) + +SELECT DISTINCT ON (expr) handles whole row correctly +EXPLAIN (verbose, costs off) +SELECT DISTINCT ON (device_id) * +FROM metrics_dist +ORDER BY device_id, time +LIMIT 10; +QUERY PLAN + Limit + Output: metrics_dist."time", metrics_dist.device_id, metrics_dist.v0, metrics_dist.v1, metrics_dist.v2, metrics_dist.v3 + -> Unique + Output: metrics_dist."time", metrics_dist.device_id, metrics_dist.v0, metrics_dist.v1, metrics_dist.v2, metrics_dist.v3 + -> Custom Scan (AsyncAppend) + Output: metrics_dist."time", metrics_dist.device_id, metrics_dist.v0, metrics_dist.v1, metrics_dist.v2, metrics_dist.v3 + -> Merge Append + Sort Key: metrics_dist_1.device_id, metrics_dist_1."time" + -> Custom Scan (DataNodeScan) on public.metrics_dist metrics_dist_1 + Output: metrics_dist_1."time", metrics_dist_1.device_id, metrics_dist_1.v0, metrics_dist_1.v1, metrics_dist_1.v2, metrics_dist_1.v3 + Data node: data_node_1 + Chunks: _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk + Remote SQL: SELECT DISTINCT ON (device_id) "time", device_id, v0, v1, v2, v3 FROM public.metrics_dist WHERE _timescaledb_functions.chunks_in(public.metrics_dist.*, ARRAY[..]) ORDER BY device_id ASC NULLS LAST, "time" ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.metrics_dist metrics_dist_2 + Output: metrics_dist_2."time", metrics_dist_2.device_id, metrics_dist_2.v0, metrics_dist_2.v1, metrics_dist_2.v2, metrics_dist_2.v3 + Data node: data_node_2 + Chunks: _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk + Remote SQL: SELECT DISTINCT ON (device_id) "time", device_id, v0, v1, v2, v3 FROM public.metrics_dist WHERE _timescaledb_functions.chunks_in(public.metrics_dist.*, ARRAY[..]) ORDER BY device_id ASC NULLS LAST, "time" ASC NULLS LAST + -> Custom Scan (DataNodeScan) on public.metrics_dist metrics_dist_3 + Output: metrics_dist_3."time", metrics_dist_3.device_id, metrics_dist_3.v0, metrics_dist_3.v1, metrics_dist_3.v2, metrics_dist_3.v3 + Data node: data_node_3 + Chunks: _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk + Remote SQL: SELECT DISTINCT ON (device_id) "time", device_id, v0, v1, v2, v3 FROM public.metrics_dist WHERE _timescaledb_functions.chunks_in(public.metrics_dist.*, ARRAY[..]) ORDER BY device_id ASC NULLS LAST, "time" ASC NULLS LAST +(23 rows) + +SELECT DISTINCT RECORD works correctly +SET enable_hashagg TO false; +EXPLAIN (verbose, costs off) +SELECT DISTINCT metrics_dist r +FROM metrics_dist +ORDER BY r +LIMIT 10; +QUERY PLAN + Limit + Output: metrics_dist.* + -> Unique + Output: metrics_dist.* + -> Sort + Output: metrics_dist.* + Sort Key: metrics_dist.* + -> Custom Scan (AsyncAppend) + Output: metrics_dist.* + -> Append + -> Custom Scan (DataNodeScan) on public.metrics_dist metrics_dist_1 + Output: metrics_dist_1.* + Data node: data_node_1 + Chunks: _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk + Remote SQL: SELECT DISTINCT "time", device_id, v0, v1, v2, v3 FROM public.metrics_dist WHERE _timescaledb_functions.chunks_in(public.metrics_dist.*, ARRAY[..]) + -> Custom Scan (DataNodeScan) on public.metrics_dist metrics_dist_2 + Output: metrics_dist_2.* + Data node: data_node_2 + Chunks: _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk + Remote SQL: SELECT DISTINCT "time", device_id, v0, v1, v2, v3 FROM public.metrics_dist WHERE _timescaledb_functions.chunks_in(public.metrics_dist.*, ARRAY[..]) + -> Custom Scan (DataNodeScan) on public.metrics_dist metrics_dist_3 + Output: metrics_dist_3.* + Data node: data_node_3 + Chunks: _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk + Remote SQL: SELECT DISTINCT "time", device_id, v0, v1, v2, v3 FROM public.metrics_dist WHERE _timescaledb_functions.chunks_in(public.metrics_dist.*, ARRAY[..]) +(25 rows) + +RESET enable_hashagg; +SELECT DISTINCT FUNCTION_EXPR not pushed down currently +EXPLAIN (verbose, costs off) +SELECT DISTINCT time_bucket('1h',time) col1 +FROM metrics_dist +ORDER BY col1 +LIMIT 10; +QUERY PLAN + Limit + Output: (time_bucket('@ 1 hour'::interval, metrics_dist."time")) + -> Unique + Output: (time_bucket('@ 1 hour'::interval, metrics_dist."time")) + -> Custom Scan (AsyncAppend) + Output: (time_bucket('@ 1 hour'::interval, metrics_dist."time")) + -> Merge Append + Sort Key: (time_bucket('@ 1 hour'::interval, metrics_dist_1."time")) + -> Result + Output: time_bucket('@ 1 hour'::interval, metrics_dist_1."time") + -> Custom Scan (DataNodeScan) on public.metrics_dist metrics_dist_1 + Output: metrics_dist_1."time" + Data node: data_node_1 + Chunks: _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk + Remote SQL: SELECT "time" FROM public.metrics_dist WHERE _timescaledb_functions.chunks_in(public.metrics_dist.*, ARRAY[..]) ORDER BY public.time_bucket('01:00:00'::interval, "time") ASC NULLS LAST + -> Result + Output: time_bucket('@ 1 hour'::interval, metrics_dist_2."time") + -> Custom Scan (DataNodeScan) on public.metrics_dist metrics_dist_2 + Output: metrics_dist_2."time" + Data node: data_node_2 + Chunks: _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk + Remote SQL: SELECT "time" FROM public.metrics_dist WHERE _timescaledb_functions.chunks_in(public.metrics_dist.*, ARRAY[..]) ORDER BY public.time_bucket('01:00:00'::interval, "time") ASC NULLS LAST + -> Result + Output: time_bucket('@ 1 hour'::interval, metrics_dist_3."time") + -> Custom Scan (DataNodeScan) on public.metrics_dist metrics_dist_3 + Output: metrics_dist_3."time" + Data node: data_node_3 + Chunks: _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk + Remote SQL: SELECT "time" FROM public.metrics_dist WHERE _timescaledb_functions.chunks_in(public.metrics_dist.*, ARRAY[..]) ORDER BY public.time_bucket('01:00:00'::interval, "time") ASC NULLS LAST +(29 rows) + +SELECT DISTINCT without any var references is handled correctly +EXPLAIN (verbose, costs off) +SELECT DISTINCT 1, 'constx' +FROM metrics_dist; +QUERY PLAN + Limit + Output: 1, 'constx'::text + -> Custom Scan (AsyncAppend) + Output: 1, 'constx'::text + -> Append + -> Result + Output: 1, 'constx'::text + -> Custom Scan (DataNodeScan) on public.metrics_dist metrics_dist_1 + Data node: data_node_1 + Chunks: _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk + Remote SQL: SELECT NULL FROM public.metrics_dist WHERE _timescaledb_functions.chunks_in(public.metrics_dist.*, ARRAY[..]) + -> Result + Output: 1, 'constx'::text + -> Custom Scan (DataNodeScan) on public.metrics_dist metrics_dist_2 + Data node: data_node_2 + Chunks: _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk + Remote SQL: SELECT NULL FROM public.metrics_dist WHERE _timescaledb_functions.chunks_in(public.metrics_dist.*, ARRAY[..]) + -> Result + Output: 1, 'constx'::text + -> Custom Scan (DataNodeScan) on public.metrics_dist metrics_dist_3 + Data node: data_node_3 + Chunks: _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk + Remote SQL: SELECT NULL FROM public.metrics_dist WHERE _timescaledb_functions.chunks_in(public.metrics_dist.*, ARRAY[..]) +(23 rows) + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%% RUNNING TESTS on table: metrics +%%% PREFIX: +%%% ORDER_BY_1: ORDER BY 1 +%%% ORDER_BY_1_2: ORDER BY 1,2 +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%% RUNNING TESTS on table: metrics_dist +%%% PREFIX: +%%% ORDER_BY_1: ORDER BY 1 +%%% ORDER_BY_1_2: ORDER BY 1,2 +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +-- diff distributed and reference results (should be exactly same) +:DIFF_CMD_DIST diff --git a/tsl/test/shared/expected/dist_fetcher_type-16.out b/tsl/test/shared/expected/dist_fetcher_type-16.out new file mode 100644 index 00000000000..cd18175d29f --- /dev/null +++ b/tsl/test/shared/expected/dist_fetcher_type-16.out @@ -0,0 +1,414 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\set ON_ERROR_STOP off +-- Test that we use the correct type of remote data fetcher. +set timescaledb.remote_data_fetcher = 'auto'; +select 1 x from distinct_on_distributed t1, distinct_on_distributed t2 +where t1.id = t2.id + 1 +limit 1; + x + 1 +(1 row) + +-- This query should choose COPY fetcher. +select 1 x from distinct_on_distributed t1 +limit 1; + x + 1 +(1 row) + +explain (analyze, verbose, costs off, timing off, summary off) +select 1 x from distinct_on_distributed t1 +limit 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + Output: 1 + -> Result (actual rows=1 loops=1) + Output: 1 + -> Custom Scan (DataNodeScan) on public.distinct_on_distributed t1 (actual rows=1 loops=1) + Data node: data_node_1 + Fetcher Type: COPY + Chunks: _dist_hyper_X_X_chunk + Remote SQL: SELECT NULL FROM public.distinct_on_distributed WHERE _timescaledb_functions.chunks_in(public.distinct_on_distributed.*, ARRAY[..]) LIMIT 1 +(9 rows) + +set timescaledb.remote_data_fetcher = 'cursor'; +select 1 x from distinct_on_distributed t1, distinct_on_distributed t2 +where t1.id = t2.id +limit 1; + x + 1 +(1 row) + +explain (analyze, verbose, costs off, timing off, summary off) +select 1 x from distinct_on_distributed t1, distinct_on_distributed t2 +where t1.id = t2.id +limit 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + Output: 1 + -> Nested Loop (actual rows=1 loops=1) + Output: 1 + Join Filter: (t1.id = t2.id) + -> Custom Scan (DataNodeScan) on public.distinct_on_distributed t1 (actual rows=1 loops=1) + Output: t1.id + Data node: data_node_1 + Fetcher Type: Cursor + Chunks: _dist_hyper_X_X_chunk + Remote SQL: SELECT id FROM public.distinct_on_distributed WHERE _timescaledb_functions.chunks_in(public.distinct_on_distributed.*, ARRAY[..]) + -> Materialize (actual rows=1 loops=1) + Output: t2.id + -> Custom Scan (DataNodeScan) on public.distinct_on_distributed t2 (actual rows=1 loops=1) + Output: t2.id + Data node: data_node_1 + Fetcher Type: Cursor + Chunks: _dist_hyper_X_X_chunk + Remote SQL: SELECT id FROM public.distinct_on_distributed WHERE _timescaledb_functions.chunks_in(public.distinct_on_distributed.*, ARRAY[..]) +(19 rows) + +-- This query can't work with copy or prepared fetcher. +set timescaledb.remote_data_fetcher = 'copy'; +select 1 x from distinct_on_distributed t1, distinct_on_distributed t2 +where t1.id = t2.id + 1 +limit 1; +ERROR: only cursor fetcher is supported for this query +set timescaledb.remote_data_fetcher = 'prepared'; +select 1 x from distinct_on_distributed t1, distinct_on_distributed t2 +where t1.id = t2.id + 1 +limit 1; +ERROR: only cursor fetcher is supported for this query +-- Check once again that 'auto' is used after 'copy'. +set timescaledb.remote_data_fetcher = 'auto'; +select 1 x from distinct_on_distributed t1, distinct_on_distributed t2 +where t1.id = t2.id + 1 +limit 1; + x + 1 +(1 row) + +reset timescaledb.remote_data_fetcher; +-- #3786 test for assertion failure in cursor_fetcher_rewind +SET jit TO off; +SELECT * +FROM devices AS d +WHERE + EXISTS( + SELECT 1 + FROM metrics_dist AS m, + LATERAL( + SELECT 1 + FROM insert_test it + WHERE + EXISTS( + SELECT 1 + FROM dist_chunk_copy AS ref_2 + WHERE + it.id IS NOT NULL AND + EXISTS(SELECT d.name AS c0 FROM metrics_int WHERE NULL::TIMESTAMP <= m.time) + ) + ) AS l + WHERE d.name ~~ d.name + ) +ORDER BY 1, 2; + device_id | name +-----------+------ +(0 rows) + +RESET jit; +-- COPY fetcher should fail on a custom type that has no binary +-- serialization. +set timescaledb.remote_data_fetcher = 'copy'; +explain (analyze, verbose, costs off, timing off, summary off) +select time, txn_id, val, substring(info for 20) from disttable_with_ct; +ERROR: cannot use COPY fetcher because some of the column types do not have binary serialization +-- Cursor fetcher should be chosen automatically if we have a data type with no +-- binary serialization. +set timescaledb.remote_data_fetcher = 'auto'; +explain (analyze, verbose, costs off, timing off, summary off) +select * from disttable_with_ct; +QUERY PLAN + Custom Scan (DataNodeScan) on public.disttable_with_ct (actual rows=2 loops=1) + Output: disttable_with_ct."time", disttable_with_ct.txn_id, disttable_with_ct.val, disttable_with_ct.info + Data node: data_node_2 + Fetcher Type: Cursor + Chunks: _dist_hyper_X_X_chunk + Remote SQL: SELECT "time", txn_id, val, info FROM public.disttable_with_ct WHERE _timescaledb_functions.chunks_in(public.disttable_with_ct.*, ARRAY[..]) +(6 rows) + +-- COPY fetcher with bytea data +set timescaledb.remote_data_fetcher = 'copy'; +explain (analyze, verbose, costs off, timing off, summary off) +select * from disttable_with_bytea; +QUERY PLAN + Custom Scan (DataNodeScan) on public.disttable_with_bytea (actual rows=2 loops=1) + Output: disttable_with_bytea."time", disttable_with_bytea.bdata + Data node: data_node_3 + Fetcher Type: COPY + Chunks: _dist_hyper_X_X_chunk + Remote SQL: SELECT "time", bdata FROM public.disttable_with_bytea WHERE _timescaledb_functions.chunks_in(public.disttable_with_bytea.*, ARRAY[..]) +(6 rows) + +select * from disttable_with_bytea; + time | bdata +------+------- + 1001 | \x + 1001 | +(2 rows) + +-- Cursor fetcher with bytea data +set timescaledb.remote_data_fetcher = 'cursor'; +explain (analyze, verbose, costs off, timing off, summary off) +select * from disttable_with_bytea; +QUERY PLAN + Custom Scan (DataNodeScan) on public.disttable_with_bytea (actual rows=2 loops=1) + Output: disttable_with_bytea."time", disttable_with_bytea.bdata + Data node: data_node_3 + Fetcher Type: Cursor + Chunks: _dist_hyper_X_X_chunk + Remote SQL: SELECT "time", bdata FROM public.disttable_with_bytea WHERE _timescaledb_functions.chunks_in(public.disttable_with_bytea.*, ARRAY[..]) +(6 rows) + +select * from disttable_with_bytea; + time | bdata +------+------- + 1001 | \x + 1001 | +(2 rows) + +-- Prepared statement fetcher with bytea data +set timescaledb.remote_data_fetcher = 'prepared'; +explain (analyze, verbose, costs off, timing off, summary off) +select * from disttable_with_bytea; +QUERY PLAN + Custom Scan (DataNodeScan) on public.disttable_with_bytea (actual rows=2 loops=1) + Output: disttable_with_bytea."time", disttable_with_bytea.bdata + Data node: data_node_3 + Fetcher Type: Prepared statement + Chunks: _dist_hyper_X_X_chunk + Remote SQL: SELECT "time", bdata FROM public.disttable_with_bytea WHERE _timescaledb_functions.chunks_in(public.disttable_with_bytea.*, ARRAY[..]) +(6 rows) + +select * from disttable_with_bytea; + time | bdata +------+------- + 1001 | \x + 1001 | +(2 rows) + +-- #4515 test for assertion failure in copy_fetcher_close +SET timescaledb.remote_data_fetcher = 'copy'; +SELECT * +FROM + conditions ref_0 +WHERE EXISTS ( + SELECT FROM + distinct_on_distributed, + LATERAL ( + SELECT * + FROM pg_class, + LATERAL ( + SELECT ref_0.device FROM pg_class WHERE false LIMIT 1) as lat_1 + ) as lat_2 + WHERE (SELECT 1 FROM pg_class LIMIT 1) >= ref_0.device +); + time | device | value +------+--------+------- +(0 rows) + +SET timescaledb.remote_data_fetcher = 'prepared'; +SELECT * +FROM + conditions ref_0 +WHERE EXISTS ( + SELECT FROM + distinct_on_distributed, + LATERAL ( + SELECT * + FROM pg_class, + LATERAL ( + SELECT ref_0.device FROM pg_class WHERE false LIMIT 1) as lat_1 + ) as lat_2 + WHERE (SELECT 1 FROM pg_class LIMIT 1) >= ref_0.device +); + time | device | value +------+--------+------- +(0 rows) + +SET timescaledb.remote_data_fetcher = 'cursor'; +SELECT * +FROM + conditions ref_0 +WHERE EXISTS ( + SELECT FROM + distinct_on_distributed, + LATERAL ( + SELECT * + FROM pg_class, + LATERAL ( + SELECT ref_0.device FROM pg_class WHERE false LIMIT 1) as lat_1 + ) as lat_2 + WHERE (SELECT 1 FROM pg_class LIMIT 1) >= ref_0.device +); + time | device | value +------+--------+------- +(0 rows) + +-- #4518 +-- test error handling for queries with multiple distributed hypertables +SET timescaledb.remote_data_fetcher = 'copy'; +SELECT * FROM + conditions_dist1 ref_0 +WHERE EXISTS ( + SELECT FROM + distinct_on_distributed as ref_1, + LATERAL (select * from metrics as ref_2) as subq_3 + WHERE + (SELECT device_id FROM metrics_compressed limit 1 offset 3) >= ref_0.device +) +ORDER BY 1, 2; +ERROR: only cursor fetcher is supported for this query +SET timescaledb.remote_data_fetcher = 'prepared'; +SELECT * FROM + conditions_dist1 ref_0 +WHERE EXISTS ( + SELECT FROM + distinct_on_distributed as ref_1, + LATERAL (select * from metrics as ref_2) as subq_3 + WHERE + (SELECT device_id FROM metrics_compressed limit 1 offset 3) >= ref_0.device +) +ORDER BY 1, 2; +ERROR: only cursor fetcher is supported for this query +SET timescaledb.remote_data_fetcher = 'auto'; +SELECT * FROM + conditions_dist1 ref_0 +WHERE EXISTS ( + SELECT FROM + distinct_on_distributed as ref_1, + LATERAL (select * from metrics as ref_2) as subq_3 + WHERE + (SELECT device_id FROM metrics_compressed limit 1 offset 3) >= ref_0.device +) +ORDER BY 1, 2; + time | device | value +------------------------------+--------+------- + Sun Jan 01 06:01:00 2017 PST | 1 | 1.2 + Sun Jan 01 08:01:00 2017 PST | 1 | 7.3 +(2 rows) + +-- Check that we don't use COPY fetcher for parameterized plans. +CREATE TABLE lookup (id SERIAL NOT NULL, key TEXT, val TEXT); +CREATE TABLE metric (ts TIMESTAMPTZ NOT NULL, val FLOAT8 NOT NULL, lookup_id INT NOT NULL); +SELECT 1 FROM create_distributed_hypertable('metric', 'ts'); + ?column? + 1 +(1 row) + +INSERT INTO lookup (key, val) VALUES ('host', 'localhost'); +INSERT INTO metric (ts, val, lookup_id) SELECT s.*, 3.14+1, 1 +FROM generate_series('2021-08-17 00:00:00'::timestamp, '2021-08-17 00:59:59'::timestamp, '1 s'::interval) s; +SELECT + m.ts, + m.val +FROM metric m +WHERE + ARRAY[m.lookup_id] && (SELECT array_agg(l.id)::int[] FROM lookup l WHERE l.key = 'host' AND l.val = 'localhost') + AND m.ts BETWEEN '2021-08-17 00:00:00' AND '2021-08-17 01:00:00' +ORDER BY 1 DESC LIMIT 1; + ts | val +------------------------------+------ + Tue Aug 17 00:59:59 2021 PDT | 4.14 +(1 row) + +SELECT + m.ts, + m.val +FROM metric m +WHERE + m.lookup_id = ANY((SELECT array_agg(l.id) FROM lookup l WHERE l.key = 'host' AND l.val = 'localhost')::int[]) + AND m.ts BETWEEN '2021-08-17 00:00:00' AND '2021-08-17 01:00:00' +ORDER BY 1 DESC LIMIT 1; + ts | val +------------------------------+------ + Tue Aug 17 00:59:59 2021 PDT | 4.14 +(1 row) + +SET timescaledb.remote_data_fetcher = 'copy'; +SELECT + m.ts, + m.val +FROM metric m +WHERE + m.lookup_id = ANY((SELECT array_agg(l.id) FROM lookup l WHERE l.key = 'host' AND l.val = 'localhost')::int[]) + AND m.ts BETWEEN '2021-08-17 00:00:00' AND '2021-08-17 01:00:00' +ORDER BY 1 DESC LIMIT 1; +ERROR: cannot use COPY fetcher because the plan is parameterized +-- Test fetcher when query is aborted before EOF due to LIMIT +SET timescaledb.remote_data_fetcher = 'copy'; +SELECT * FROM metrics_dist ORDER BY time, device_id LIMIT 11; + time | device_id | v0 | v1 | v2 | v3 +------------------------------+-----------+----+----+-----+---- + Fri Dec 31 16:00:00 1999 PST | 1 | 2 | 3 | 1.5 | + Fri Dec 31 16:00:00 1999 PST | 2 | 3 | 4 | 2.5 | + Fri Dec 31 16:00:00 1999 PST | 3 | 4 | 5 | 3.5 | + Fri Dec 31 16:00:00 1999 PST | 4 | 5 | 6 | 4.5 | + Fri Dec 31 16:00:00 1999 PST | 5 | 6 | 7 | 5.5 | + Fri Dec 31 16:02:00 1999 PST | 1 | 2 | 3 | 1.5 | + Fri Dec 31 16:02:00 1999 PST | 2 | 3 | 4 | 2.5 | + Fri Dec 31 16:02:00 1999 PST | 3 | 4 | 5 | 3.5 | + Fri Dec 31 16:02:00 1999 PST | 4 | 5 | 6 | 4.5 | + Fri Dec 31 16:02:00 1999 PST | 5 | 6 | 7 | 5.5 | + Fri Dec 31 16:04:00 1999 PST | 1 | 2 | 3 | 1.5 | +(11 rows) + +SET timescaledb.remote_data_fetcher = 'prepared'; +SELECT * FROM metrics_dist ORDER BY time, device_id LIMIT 11; + time | device_id | v0 | v1 | v2 | v3 +------------------------------+-----------+----+----+-----+---- + Fri Dec 31 16:00:00 1999 PST | 1 | 2 | 3 | 1.5 | + Fri Dec 31 16:00:00 1999 PST | 2 | 3 | 4 | 2.5 | + Fri Dec 31 16:00:00 1999 PST | 3 | 4 | 5 | 3.5 | + Fri Dec 31 16:00:00 1999 PST | 4 | 5 | 6 | 4.5 | + Fri Dec 31 16:00:00 1999 PST | 5 | 6 | 7 | 5.5 | + Fri Dec 31 16:02:00 1999 PST | 1 | 2 | 3 | 1.5 | + Fri Dec 31 16:02:00 1999 PST | 2 | 3 | 4 | 2.5 | + Fri Dec 31 16:02:00 1999 PST | 3 | 4 | 5 | 3.5 | + Fri Dec 31 16:02:00 1999 PST | 4 | 5 | 6 | 4.5 | + Fri Dec 31 16:02:00 1999 PST | 5 | 6 | 7 | 5.5 | + Fri Dec 31 16:04:00 1999 PST | 1 | 2 | 3 | 1.5 | +(11 rows) + +SET timescaledb.remote_data_fetcher = 'cursor'; +SELECT * FROM metrics_dist ORDER BY time, device_id LIMIT 11; + time | device_id | v0 | v1 | v2 | v3 +------------------------------+-----------+----+----+-----+---- + Fri Dec 31 16:00:00 1999 PST | 1 | 2 | 3 | 1.5 | + Fri Dec 31 16:00:00 1999 PST | 2 | 3 | 4 | 2.5 | + Fri Dec 31 16:00:00 1999 PST | 3 | 4 | 5 | 3.5 | + Fri Dec 31 16:00:00 1999 PST | 4 | 5 | 6 | 4.5 | + Fri Dec 31 16:00:00 1999 PST | 5 | 6 | 7 | 5.5 | + Fri Dec 31 16:02:00 1999 PST | 1 | 2 | 3 | 1.5 | + Fri Dec 31 16:02:00 1999 PST | 2 | 3 | 4 | 2.5 | + Fri Dec 31 16:02:00 1999 PST | 3 | 4 | 5 | 3.5 | + Fri Dec 31 16:02:00 1999 PST | 4 | 5 | 6 | 4.5 | + Fri Dec 31 16:02:00 1999 PST | 5 | 6 | 7 | 5.5 | + Fri Dec 31 16:04:00 1999 PST | 1 | 2 | 3 | 1.5 | +(11 rows) + +-- Verify that cursor fetcher can be rewind before EOF due to an +-- intermediate JOIN product reaching LIMIT +SET timescaledb.remote_data_fetcher = 'cursor'; +SELECT * FROM metrics_dist as m +WHERE EXISTS + (SELECT * + FROM (SELECT m.time as t + FROM metrics_dist a INNER JOIN devices b + ON a.device_id = b.device_id + LIMIT 50) as subq + WHERE subq.t is NULL) +LIMIT 1; + time | device_id | v0 | v1 | v2 | v3 +------+-----------+----+----+----+---- +(0 rows) + diff --git a/tsl/test/shared/expected/generated_columns-16.out b/tsl/test/shared/expected/generated_columns-16.out new file mode 100644 index 00000000000..fdc32307ecb --- /dev/null +++ b/tsl/test/shared/expected/generated_columns-16.out @@ -0,0 +1,62 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +CREATE TABLE gencol_tab ( + a INT NOT NULL, + b INT, + c INT GENERATED ALWAYS AS (a + b) STORED +); +SELECT table_name FROM create_hypertable('gencol_tab', 'a', chunk_time_interval=>10); + table_name + gencol_tab +(1 row) + +INSERT INTO gencol_tab(a, b) VALUES(1, 2); +INSERT INTO gencol_tab(a, b) VALUES(2, 3); +-- Ensure generated column cannot be updated +\set ON_ERROR_STOP 0 +INSERT INTO gencol_tab VALUES(3, 5, 8); +ERROR: cannot insert a non-DEFAULT value into column "c" +\set ON_ERROR_STOP 1 +SELECT * FROM gencol_tab ORDER BY a; + a | b | c +---+---+--- + 1 | 2 | 3 + 2 | 3 | 5 +(2 rows) + +DROP TABLE gencol_tab; +-- Ensure that generated column cannot be used for partitioning +-- Generated as expression +CREATE TABLE gencol_test ( + a INT NOT NULL, + b INT GENERATED ALWAYS AS (a + 123) STORED +); +\set ON_ERROR_STOP 0 +SELECT table_name FROM create_hypertable('gencol_test', 'a', 'b', 2, chunk_time_interval=>10); +ERROR: invalid partitioning column +\set ON_ERROR_STOP 1 +-- check if default generated expression can be dropped (works on >= PG13) +SELECT table_name FROM create_hypertable('gencol_test', 'a', chunk_time_interval=>10); + table_name + gencol_test +(1 row) + +SELECT attname, atthasdef, attidentity, attgenerated, attnotnull +FROM pg_attribute where attname = 'b' and attrelid = 'gencol_test'::regclass; + attname | atthasdef | attidentity | attgenerated | attnotnull +---------+-----------+-------------+--------------+------------ + b | t | | s | f +(1 row) + +\set ON_ERROR_STOP 0 +ALTER TABLE gencol_test ALTER COLUMN b DROP EXPRESSION; +\set ON_ERROR_STOP 1 +SELECT attname, atthasdef, attidentity, attgenerated, attnotnull +FROM pg_attribute where attname = 'b' and attrelid = 'gencol_test'::regclass; + attname | atthasdef | attidentity | attgenerated | attnotnull +---------+-----------+-------------+--------------+------------ + b | f | | | f +(1 row) + +DROP TABLE gencol_test; diff --git a/tsl/test/shared/expected/ordered_append-16.out b/tsl/test/shared/expected/ordered_append-16.out new file mode 100644 index 00000000000..8b4b303f3e0 --- /dev/null +++ b/tsl/test/shared/expected/ordered_append-16.out @@ -0,0 +1,4474 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +SELECT + format('include/%s.sql', :'TEST_BASE_NAME') as "TEST_QUERY_NAME", + format('%s/shared/results/%s_results_uncompressed.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') as "TEST_RESULTS_UNCOMPRESSED", + format('%s/shared/results/%s_results_compressed.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') as "TEST_RESULTS_COMPRESSED" +\gset +SELECT format('\! diff -u --label "Uncompressed results" --label "Compressed results" %s %s', :'TEST_RESULTS_UNCOMPRESSED', :'TEST_RESULTS_COMPRESSED') as "DIFF_CMD" +\gset +-- get EXPLAIN output for all variations +\set PREFIX 'EXPLAIN (analyze, costs off, timing off, summary off)' +\set PREFIX_VERBOSE 'EXPLAIN (analyze, costs off, timing off, summary off, verbose)' +set work_mem to '64MB'; +set max_parallel_workers_per_gather to 0; +\set TEST_TABLE 'metrics' +\ir :TEST_QUERY_NAME +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- In the following test cases, we test that certain indexes are used. By using the +-- timescaledb.enable_decompression_sorted_merge optimization, we are pushing a sort node +-- below the DecompressChunk node, which operates on the batches. This could lead to flaky +-- tests because the input data is small and PostgreSQL switches from IndexScans to +-- SequentialScans. Disable the optimization for the following tests to ensure we have +-- stable query plans in all CI environments. +SET timescaledb.enable_decompression_sorted_merge = 0; +-- test ASC for ordered chunks +:PREFIX +SELECT time +FROM :TEST_TABLE +ORDER BY time ASC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics (actual rows=1 loops=1) + Order: metrics."time" + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 +(9 rows) + +-- test DESC for ordered chunks +:PREFIX +SELECT time +FROM :TEST_TABLE +ORDER BY time DESC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics (actual rows=1 loops=1) + Order: metrics."time" DESC + -> Index Only Scan using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 +(9 rows) + +-- test query with ORDER BY column not in targetlist +:PREFIX +SELECT pg_typeof(device_id), + pg_typeof(v2) +FROM :TEST_TABLE +ORDER BY time ASC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Result (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics (actual rows=1 loops=1) + Order: metrics."time" + -> Index Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + -> Index Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (never executed) + -> Index Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (never executed) +(7 rows) + +-- ORDER BY may include other columns after time column +:PREFIX +SELECT time, + device_id, + v0 +FROM :TEST_TABLE +WHERE device_id = 1 +ORDER BY time DESC, + device_id +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics (actual rows=1 loops=1) + Order: metrics."time" DESC + -> Index Scan using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = 1) + -> Index Scan using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) + -> Index Scan using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) +(9 rows) + +-- test RECORD in targetlist +:PREFIX +SELECT (time, + device_id, + v0) +FROM :TEST_TABLE +WHERE device_id = 1 +ORDER BY time DESC, + device_id +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Result (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics (actual rows=1 loops=1) + Order: metrics."time" DESC + -> Index Scan using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = 1) + -> Index Scan using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) + -> Index Scan using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) +(10 rows) + +-- test sort column not in targetlist +:PREFIX +SELECT time_bucket('1h', time) +FROM :TEST_TABLE +ORDER BY time DESC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Result (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics (actual rows=1 loops=1) + Order: metrics."time" DESC + -> Index Only Scan using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 +(10 rows) + +-- queries with ORDER BY non-time column shouldn't use ordered append +:PREFIX +SELECT device_id +FROM :TEST_TABLE +ORDER BY device_id +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Merge Append (actual rows=1 loops=1) + Sort Key: _hyper_X_X_chunk.device_id + -> Index Only Scan using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 +(9 rows) + +-- time column must be primary sort order +:PREFIX +SELECT time, + device_id +FROM :TEST_TABLE +WHERE device_id IN (1, 2) +ORDER BY device_id, + time +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Incremental Sort (actual rows=1 loops=1) + Sort Key: _hyper_X_X_chunk.device_id, _hyper_X_X_chunk."time" + Presorted Key: _hyper_X_X_chunk.device_id + Full-sort Groups: 1 Sort Method: top-N heapsort + Pre-sorted Groups: 1 Sort Method: top-N heapsort + -> Merge Append (actual rows=13675 loops=1) + Sort Key: _hyper_X_X_chunk.device_id + -> Index Only Scan using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk (actual rows=3599 loops=1) + Index Cond: (device_id = ANY ('{1,2}'::integer[])) + Heap Fetches: 3599 + -> Index Only Scan using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk (actual rows=5039 loops=1) + Index Cond: (device_id = ANY ('{1,2}'::integer[])) + Heap Fetches: 5039 + -> Index Only Scan using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk (actual rows=5039 loops=1) + Index Cond: (device_id = ANY ('{1,2}'::integer[])) + Heap Fetches: 5039 +(17 rows) + +-- test equality constraint on ORDER BY prefix +-- currently not optimized +SET enable_seqscan TO false; +:PREFIX +SELECT time, + device_id +FROM :TEST_TABLE +WHERE device_id = 1 +ORDER BY device_id, + time +LIMIT 10; +QUERY PLAN + Limit (actual rows=10 loops=1) + -> Merge Append (actual rows=10 loops=1) + Sort Key: _hyper_X_X_chunk."time" + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk (actual rows=10 loops=1) + Index Cond: (device_id = 1) + Heap Fetches: 10 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = 1) + Heap Fetches: 1 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = 1) + Heap Fetches: 1 +(12 rows) + +RESET enable_seqscan; +-- queries without LIMIT should use ordered append +:PREFIX +SELECT time +FROM :TEST_TABLE +WHERE device_id IN (1, 2) +ORDER BY time ASC; +QUERY PLAN + Custom Scan (ChunkAppend) on metrics (actual rows=27348 loops=1) + Order: metrics."time" + -> Index Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (actual rows=7196 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 10794 + -> Index Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (actual rows=10076 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 15114 + -> Index Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (actual rows=10076 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 15114 +(11 rows) + +-- queries without ORDER BY shouldnt use ordered append +:PREFIX +SELECT pg_typeof(time) +FROM :TEST_TABLE +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Result (actual rows=1 loops=1) + -> Append (actual rows=1 loops=1) + -> Seq Scan on _hyper_X_X_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_X_X_chunk (never executed) + -> Seq Scan on _hyper_X_X_chunk (never executed) +(6 rows) + +-- test interaction with constraint exclusion +:PREFIX +SELECT time +FROM :TEST_TABLE +WHERE time > '2000-01-07' +ORDER BY time ASC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics (actual rows=1 loops=1) + Order: metrics."time" + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 1 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 0 +(9 rows) + +:PREFIX +SELECT time +FROM :TEST_TABLE +WHERE time > '2000-01-07' +ORDER BY time DESC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics (actual rows=1 loops=1) + Order: metrics."time" DESC + -> Index Only Scan using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 0 +(9 rows) + +-- test interaction with runtime exclusion +:PREFIX +SELECT time +FROM :TEST_TABLE +WHERE time > '2000-01-08'::text::timestamptz +ORDER BY time ASC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics (actual rows=1 loops=1) + Order: metrics."time" + Chunks excluded during startup: 1 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: ("time" > ('2000-01-08'::cstring)::timestamp with time zone) + Heap Fetches: 1 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" > ('2000-01-08'::cstring)::timestamp with time zone) + Heap Fetches: 0 +(10 rows) + +:PREFIX +SELECT time +FROM :TEST_TABLE +WHERE time < '2000-01-08'::text::timestamptz +ORDER BY time ASC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics (actual rows=1 loops=1) + Order: metrics."time" + Chunks excluded during startup: 1 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: ("time" < ('2000-01-08'::cstring)::timestamp with time zone) + Heap Fetches: 1 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" < ('2000-01-08'::cstring)::timestamp with time zone) + Heap Fetches: 0 +(10 rows) + +-- test constraint exclusion +:PREFIX +SELECT time +FROM :TEST_TABLE +WHERE time > '2000-01-08'::text::timestamptz + AND time < '2000-01-10' +ORDER BY time ASC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics (actual rows=1 loops=1) + Order: metrics."time" + Chunks excluded during startup: 1 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (("time" > ('2000-01-08'::cstring)::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + Heap Fetches: 1 +(7 rows) + +:PREFIX +SELECT time +FROM :TEST_TABLE +WHERE time < '2000-01-08'::text::timestamptz + AND time > '2000-01-07' +ORDER BY time ASC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics (actual rows=1 loops=1) + Order: metrics."time" + Chunks excluded during startup: 1 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (("time" < ('2000-01-08'::cstring)::timestamp with time zone) AND ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone)) + Heap Fetches: 1 +(7 rows) + +-- Disable hash aggregation to get a deterministic test output +SET enable_hashagg = OFF; +-- min/max queries +:PREFIX +SELECT max(time) +FROM :TEST_TABLE; +QUERY PLAN + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics (actual rows=1 loops=1) + Order: metrics."time" DESC + -> Index Only Scan using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 +(14 rows) + +:PREFIX +SELECT min(time) +FROM :TEST_TABLE; +QUERY PLAN + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics (actual rows=1 loops=1) + Order: metrics."time" + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 1 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 +(14 rows) + +-- test first/last (doesn't use ordered append yet) +:PREFIX +SELECT first(time, time) +FROM :TEST_TABLE; +QUERY PLAN + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Result (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics (actual rows=1 loops=1) + Order: metrics."time" + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 1 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 +(15 rows) + +:PREFIX +SELECT last(time, time) +FROM :TEST_TABLE; +QUERY PLAN + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Result (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics (actual rows=1 loops=1) + Order: metrics."time" DESC + -> Index Only Scan using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 +(15 rows) + +-- test query with time_bucket +:PREFIX +SELECT time_bucket('1d', time) +FROM :TEST_TABLE +ORDER BY time ASC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Result (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics (actual rows=1 loops=1) + Order: metrics."time" + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 +(10 rows) + +-- test query with ORDER BY time_bucket +:PREFIX +SELECT time_bucket('1d', time) +FROM :TEST_TABLE +ORDER BY 1 +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Result (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics (actual rows=1 loops=1) + Order: time_bucket('@ 1 day'::interval, metrics."time") + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 +(10 rows) + +-- test query with ORDER BY time_bucket, device_id +-- must not use ordered append +:PREFIX +SELECT time_bucket('1d', time), + device_id, + v0 +FROM :TEST_TABLE +WHERE device_id IN (1, 2) +ORDER BY time_bucket('1d', time), + device_id +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=27348 loops=1) + -> Append (actual rows=27348 loops=1) + -> Index Scan using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk (actual rows=7196 loops=1) + Index Cond: (device_id = ANY ('{1,2}'::integer[])) + -> Index Scan using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk (actual rows=10076 loops=1) + Index Cond: (device_id = ANY ('{1,2}'::integer[])) + -> Index Scan using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk (actual rows=10076 loops=1) + Index Cond: (device_id = ANY ('{1,2}'::integer[])) +(12 rows) + +-- test query with ORDER BY date_trunc +:PREFIX +SELECT time_bucket('1d', time) +FROM :TEST_TABLE +ORDER BY date_trunc('day', time) +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Result (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics (actual rows=1 loops=1) + Order: date_trunc('day'::text, metrics."time") + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 +(10 rows) + +-- test query with ORDER BY date_trunc +:PREFIX +SELECT date_trunc('day', time) +FROM :TEST_TABLE +ORDER BY 1 +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Result (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics (actual rows=1 loops=1) + Order: date_trunc('day'::text, metrics."time") + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 +(10 rows) + +-- test query with ORDER BY date_trunc, device_id +-- must not use ordered append +:PREFIX +SELECT date_trunc('day', time), + device_id, + v0 +FROM :TEST_TABLE +WHERE device_id IN (1, 2) +ORDER BY 1, + 2 +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=27348 loops=1) + -> Append (actual rows=27348 loops=1) + -> Index Scan using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk (actual rows=7196 loops=1) + Index Cond: (device_id = ANY ('{1,2}'::integer[])) + -> Index Scan using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk (actual rows=10076 loops=1) + Index Cond: (device_id = ANY ('{1,2}'::integer[])) + -> Index Scan using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk (actual rows=10076 loops=1) + Index Cond: (device_id = ANY ('{1,2}'::integer[])) +(12 rows) + +-- test query with now() should result in ordered ChunkAppend +:PREFIX +SELECT time +FROM :TEST_TABLE +WHERE time < now() + '1 month' +ORDER BY time DESC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics (actual rows=1 loops=1) + Order: metrics."time" DESC + Chunks excluded during startup: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: ("time" < (now() + '@ 1 mon'::interval)) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" < (now() + '@ 1 mon'::interval)) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" < (now() + '@ 1 mon'::interval)) + Heap Fetches: 0 +(13 rows) + +-- test CTE +:PREFIX WITH i AS ( + SELECT time + FROM :TEST_TABLE + WHERE time < now() + ORDER BY time DESC + LIMIT 100 +) +SELECT * +FROM i; +QUERY PLAN + Limit (actual rows=100 loops=1) + -> Custom Scan (ChunkAppend) on metrics (actual rows=100 loops=1) + Order: metrics."time" DESC + Chunks excluded during startup: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (actual rows=100 loops=1) + Index Cond: ("time" < now()) + Heap Fetches: 100 + -> Index Only Scan using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" < now()) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" < now()) + Heap Fetches: 0 +(13 rows) + +-- test CTE +-- no chunk exclusion for CTE because cte query is not pulled up +:PREFIX WITH cte AS ( + SELECT time + FROM :TEST_TABLE + WHERE device_id = 1 + ORDER BY time +) +SELECT * +FROM cte +WHERE time < '2000-02-01'::timestamptz; +QUERY PLAN + Custom Scan (ChunkAppend) on metrics (actual rows=13674 loops=1) + Order: metrics."time" + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk (actual rows=3598 loops=1) + Index Cond: ((device_id = 1) AND ("time" < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone)) + Heap Fetches: 3598 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk (actual rows=5038 loops=1) + Index Cond: ((device_id = 1) AND ("time" < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone)) + Heap Fetches: 5038 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk (actual rows=5038 loops=1) + Index Cond: ((device_id = 1) AND ("time" < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone)) + Heap Fetches: 5038 +(11 rows) + +-- test subquery +-- not ChunkAppend so no chunk exclusion +:PREFIX +SELECT time +FROM :TEST_TABLE +WHERE time = ( + SELECT max(time) + FROM :TEST_TABLE) +ORDER BY time; +QUERY PLAN + Custom Scan (ChunkAppend) on metrics (actual rows=5 loops=1) + Chunks excluded during runtime: 2 + InitPlan 2 (returns $1) + -> Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics metrics_1 (actual rows=1 loops=1) + Order: metrics_1."time" DESC + -> Index Only Scan using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk _hyper_X_X_chunk_1 (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk _hyper_X_X_chunk_1 (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk _hyper_X_X_chunk_1 (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" = $1) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" = $1) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (actual rows=5 loops=1) + Index Cond: ("time" = $1) + Heap Fetches: 5 +(26 rows) + +-- test ordered append with limit expression +:PREFIX +SELECT time +FROM :TEST_TABLE +ORDER BY time +LIMIT ( + SELECT length('four')); +QUERY PLAN + Limit (actual rows=4 loops=1) + InitPlan 1 (returns $0) + -> Result (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics (actual rows=4 loops=1) + Order: metrics."time" + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (actual rows=4 loops=1) + Heap Fetches: 4 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 +(11 rows) + +-- test with ordered guc disabled +SET timescaledb.enable_ordered_append TO OFF; +:PREFIX +SELECT time +FROM :TEST_TABLE +ORDER BY time +LIMIT 3; +QUERY PLAN + Limit (actual rows=3 loops=1) + -> Merge Append (actual rows=3 loops=1) + Sort Key: _hyper_X_X_chunk."time" + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (actual rows=3 loops=1) + Heap Fetches: 3 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 +(9 rows) + +RESET timescaledb.enable_ordered_append; +:PREFIX +SELECT time +FROM :TEST_TABLE +ORDER BY time +LIMIT 3; +QUERY PLAN + Limit (actual rows=3 loops=1) + -> Custom Scan (ChunkAppend) on metrics (actual rows=3 loops=1) + Order: metrics."time" + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (actual rows=3 loops=1) + Heap Fetches: 3 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 +(9 rows) + +-- test with chunk append disabled +SET timescaledb.enable_chunk_append TO OFF; +:PREFIX +SELECT time +FROM :TEST_TABLE +ORDER BY time +LIMIT 3; +QUERY PLAN + Limit (actual rows=3 loops=1) + -> Merge Append (actual rows=3 loops=1) + Sort Key: _hyper_X_X_chunk."time" + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (actual rows=3 loops=1) + Heap Fetches: 3 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 +(9 rows) + +RESET timescaledb.enable_chunk_append; +:PREFIX +SELECT time +FROM :TEST_TABLE +ORDER BY time +LIMIT 3; +QUERY PLAN + Limit (actual rows=3 loops=1) + -> Custom Scan (ChunkAppend) on metrics (actual rows=3 loops=1) + Order: metrics."time" + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (actual rows=3 loops=1) + Heap Fetches: 3 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 +(9 rows) + +\set TEST_TABLE 'metrics_space' +\ir :TEST_QUERY_NAME +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- In the following test cases, we test that certain indexes are used. By using the +-- timescaledb.enable_decompression_sorted_merge optimization, we are pushing a sort node +-- below the DecompressChunk node, which operates on the batches. This could lead to flaky +-- tests because the input data is small and PostgreSQL switches from IndexScans to +-- SequentialScans. Disable the optimization for the following tests to ensure we have +-- stable query plans in all CI environments. +SET timescaledb.enable_decompression_sorted_merge = 0; +-- test ASC for ordered chunks +:PREFIX +SELECT time +FROM :TEST_TABLE +ORDER BY time ASC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=1 loops=1) + Order: metrics_space."time" + -> Merge Append (actual rows=1 loops=1) + Sort Key: _hyper_X_X_chunk."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Merge Append (never executed) + Sort Key: _hyper_X_X_chunk."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Merge Append (never executed) + Sort Key: _hyper_X_X_chunk."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 +(27 rows) + +-- test DESC for ordered chunks +:PREFIX +SELECT time +FROM :TEST_TABLE +ORDER BY time DESC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=1 loops=1) + Order: metrics_space."time" DESC + -> Merge Append (actual rows=1 loops=1) + Sort Key: _hyper_X_X_chunk."time" DESC + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Merge Append (never executed) + Sort Key: _hyper_X_X_chunk."time" DESC + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Merge Append (never executed) + Sort Key: _hyper_X_X_chunk."time" DESC + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 +(27 rows) + +-- test query with ORDER BY column not in targetlist +:PREFIX +SELECT pg_typeof(device_id), + pg_typeof(v2) +FROM :TEST_TABLE +ORDER BY time ASC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Result (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=1 loops=1) + Order: metrics_space."time" + -> Merge Append (actual rows=1 loops=1) + Sort Key: _hyper_X_X_chunk."time" + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + -> Merge Append (never executed) + Sort Key: _hyper_X_X_chunk."time" + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + -> Merge Append (never executed) + Sort Key: _hyper_X_X_chunk."time" + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) +(19 rows) + +-- ORDER BY may include other columns after time column +:PREFIX +SELECT time, + device_id, + v0 +FROM :TEST_TABLE +WHERE device_id = 1 +ORDER BY time DESC, + device_id +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=1 loops=1) + Order: metrics_space."time" DESC + -> Index Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Filter: (device_id = 1) + -> Index Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Filter: (device_id = 1) + -> Index Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Filter: (device_id = 1) +(9 rows) + +-- test RECORD in targetlist +:PREFIX +SELECT (time, + device_id, + v0) +FROM :TEST_TABLE +WHERE device_id = 1 +ORDER BY time DESC, + device_id +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Result (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=1 loops=1) + Order: metrics_space."time" DESC + -> Index Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Filter: (device_id = 1) + -> Index Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Filter: (device_id = 1) + -> Index Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Filter: (device_id = 1) +(10 rows) + +-- test sort column not in targetlist +:PREFIX +SELECT time_bucket('1h', time) +FROM :TEST_TABLE +ORDER BY time DESC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Result (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=1 loops=1) + Order: metrics_space."time" DESC + -> Merge Append (actual rows=1 loops=1) + Sort Key: _hyper_X_X_chunk."time" DESC + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Merge Append (never executed) + Sort Key: _hyper_X_X_chunk."time" DESC + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Merge Append (never executed) + Sort Key: _hyper_X_X_chunk."time" DESC + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 +(28 rows) + +-- queries with ORDER BY non-time column shouldn't use ordered append +:PREFIX +SELECT device_id +FROM :TEST_TABLE +ORDER BY device_id +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Merge Append (actual rows=1 loops=1) + Sort Key: _hyper_X_X_chunk.device_id + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 +(21 rows) + +-- time column must be primary sort order +:PREFIX +SELECT time, + device_id +FROM :TEST_TABLE +WHERE device_id IN (1, 2) +ORDER BY device_id, + time +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Merge Append (actual rows=1 loops=1) + Sort Key: _hyper_X_X_chunk.device_id, _hyper_X_X_chunk."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = ANY ('{1,2}'::integer[])) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = ANY ('{1,2}'::integer[])) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = ANY ('{1,2}'::integer[])) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = ANY ('{1,2}'::integer[])) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = ANY ('{1,2}'::integer[])) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = ANY ('{1,2}'::integer[])) + Heap Fetches: 1 +(21 rows) + +-- test equality constraint on ORDER BY prefix +-- currently not optimized +SET enable_seqscan TO false; +:PREFIX +SELECT time, + device_id +FROM :TEST_TABLE +WHERE device_id = 1 +ORDER BY device_id, + time +LIMIT 10; +QUERY PLAN + Limit (actual rows=10 loops=1) + -> Merge Append (actual rows=10 loops=1) + Sort Key: _hyper_X_X_chunk."time" + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=10 loops=1) + Filter: (device_id = 1) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Filter: (device_id = 1) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Filter: (device_id = 1) +(9 rows) + +RESET enable_seqscan; +-- queries without LIMIT should use ordered append +:PREFIX +SELECT time +FROM :TEST_TABLE +WHERE device_id IN (1, 2) +ORDER BY time ASC; +QUERY PLAN + Custom Scan (ChunkAppend) on metrics_space (actual rows=27348 loops=1) + Order: metrics_space."time" + -> Merge Append (actual rows=7196 loops=1) + Sort Key: _hyper_X_X_chunk."time" + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=3598 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=3598 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 7196 + -> Merge Append (actual rows=10076 loops=1) + Sort Key: _hyper_X_X_chunk."time" + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=5038 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=5038 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 10076 + -> Merge Append (actual rows=10076 loops=1) + Sort Key: _hyper_X_X_chunk."time" + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=5038 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=5038 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 10076 +(23 rows) + +-- queries without ORDER BY shouldnt use ordered append +:PREFIX +SELECT pg_typeof(time) +FROM :TEST_TABLE +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Result (actual rows=1 loops=1) + -> Append (actual rows=1 loops=1) + -> Seq Scan on _hyper_X_X_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_X_X_chunk (never executed) + -> Seq Scan on _hyper_X_X_chunk (never executed) + -> Seq Scan on _hyper_X_X_chunk (never executed) + -> Seq Scan on _hyper_X_X_chunk (never executed) + -> Seq Scan on _hyper_X_X_chunk (never executed) + -> Seq Scan on _hyper_X_X_chunk (never executed) + -> Seq Scan on _hyper_X_X_chunk (never executed) + -> Seq Scan on _hyper_X_X_chunk (never executed) +(12 rows) + +-- test interaction with constraint exclusion +:PREFIX +SELECT time +FROM :TEST_TABLE +WHERE time > '2000-01-07' +ORDER BY time ASC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=1 loops=1) + Order: metrics_space."time" + -> Merge Append (actual rows=1 loops=1) + Sort Key: _hyper_X_X_chunk."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 1 + -> Merge Append (never executed) + Sort Key: _hyper_X_X_chunk."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 0 +(25 rows) + +:PREFIX +SELECT time +FROM :TEST_TABLE +WHERE time > '2000-01-07' +ORDER BY time DESC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=1 loops=1) + Order: metrics_space."time" DESC + -> Merge Append (actual rows=1 loops=1) + Sort Key: _hyper_X_X_chunk."time" DESC + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 1 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 1 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 1 + -> Merge Append (never executed) + Sort Key: _hyper_X_X_chunk."time" DESC + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 0 +(25 rows) + +-- test interaction with runtime exclusion +:PREFIX +SELECT time +FROM :TEST_TABLE +WHERE time > '2000-01-08'::text::timestamptz +ORDER BY time ASC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=1 loops=1) + Order: metrics_space."time" + -> Merge Append (actual rows=0 loops=1) + Sort Key: _hyper_X_X_chunk."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=0 loops=1) + Index Cond: ("time" > ('2000-01-08'::cstring)::timestamp with time zone) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=0 loops=1) + Index Cond: ("time" > ('2000-01-08'::cstring)::timestamp with time zone) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=0 loops=1) + Index Cond: ("time" > ('2000-01-08'::cstring)::timestamp with time zone) + Heap Fetches: 0 + -> Merge Append (actual rows=1 loops=1) + Sort Key: _hyper_X_X_chunk."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: ("time" > ('2000-01-08'::cstring)::timestamp with time zone) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: ("time" > ('2000-01-08'::cstring)::timestamp with time zone) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: ("time" > ('2000-01-08'::cstring)::timestamp with time zone) + Heap Fetches: 1 + -> Merge Append (never executed) + Sort Key: _hyper_X_X_chunk."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" > ('2000-01-08'::cstring)::timestamp with time zone) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" > ('2000-01-08'::cstring)::timestamp with time zone) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" > ('2000-01-08'::cstring)::timestamp with time zone) + Heap Fetches: 0 +(36 rows) + +:PREFIX +SELECT time +FROM :TEST_TABLE +WHERE time < '2000-01-08'::text::timestamptz +ORDER BY time ASC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=1 loops=1) + Order: metrics_space."time" + -> Merge Append (actual rows=1 loops=1) + Sort Key: _hyper_X_X_chunk."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: ("time" < ('2000-01-08'::cstring)::timestamp with time zone) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: ("time" < ('2000-01-08'::cstring)::timestamp with time zone) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: ("time" < ('2000-01-08'::cstring)::timestamp with time zone) + Heap Fetches: 1 + -> Merge Append (never executed) + Sort Key: _hyper_X_X_chunk."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" < ('2000-01-08'::cstring)::timestamp with time zone) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" < ('2000-01-08'::cstring)::timestamp with time zone) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" < ('2000-01-08'::cstring)::timestamp with time zone) + Heap Fetches: 0 + -> Merge Append (never executed) + Sort Key: _hyper_X_X_chunk."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" < ('2000-01-08'::cstring)::timestamp with time zone) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" < ('2000-01-08'::cstring)::timestamp with time zone) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" < ('2000-01-08'::cstring)::timestamp with time zone) + Heap Fetches: 0 +(36 rows) + +-- test constraint exclusion +:PREFIX +SELECT time +FROM :TEST_TABLE +WHERE time > '2000-01-08'::text::timestamptz + AND time < '2000-01-10' +ORDER BY time ASC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=1 loops=1) + Order: metrics_space."time" + -> Merge Append (actual rows=0 loops=1) + Sort Key: _hyper_X_X_chunk."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=0 loops=1) + Index Cond: (("time" > ('2000-01-08'::cstring)::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=0 loops=1) + Index Cond: (("time" > ('2000-01-08'::cstring)::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=0 loops=1) + Index Cond: (("time" > ('2000-01-08'::cstring)::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + Heap Fetches: 0 + -> Merge Append (actual rows=1 loops=1) + Sort Key: _hyper_X_X_chunk."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (("time" > ('2000-01-08'::cstring)::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (("time" > ('2000-01-08'::cstring)::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (("time" > ('2000-01-08'::cstring)::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + Heap Fetches: 1 +(25 rows) + +:PREFIX +SELECT time +FROM :TEST_TABLE +WHERE time < '2000-01-08'::text::timestamptz + AND time > '2000-01-07' +ORDER BY time ASC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=1 loops=1) + Order: metrics_space."time" + -> Merge Append (actual rows=1 loops=1) + Sort Key: _hyper_X_X_chunk."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (("time" < ('2000-01-08'::cstring)::timestamp with time zone) AND ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone)) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (("time" < ('2000-01-08'::cstring)::timestamp with time zone) AND ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone)) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (("time" < ('2000-01-08'::cstring)::timestamp with time zone) AND ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone)) + Heap Fetches: 1 + -> Merge Append (never executed) + Sort Key: _hyper_X_X_chunk."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: (("time" < ('2000-01-08'::cstring)::timestamp with time zone) AND ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone)) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: (("time" < ('2000-01-08'::cstring)::timestamp with time zone) AND ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone)) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: (("time" < ('2000-01-08'::cstring)::timestamp with time zone) AND ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone)) + Heap Fetches: 0 +(25 rows) + +-- Disable hash aggregation to get a deterministic test output +SET enable_hashagg = OFF; +-- min/max queries +:PREFIX +SELECT max(time) +FROM :TEST_TABLE; +QUERY PLAN + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=1 loops=1) + Order: metrics_space."time" DESC + -> Merge Append (actual rows=1 loops=1) + Sort Key: _hyper_X_X_chunk."time" DESC + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 1 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 1 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 1 + -> Merge Append (never executed) + Sort Key: _hyper_X_X_chunk."time" DESC + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 + -> Merge Append (never executed) + Sort Key: _hyper_X_X_chunk."time" DESC + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 +(38 rows) + +:PREFIX +SELECT min(time) +FROM :TEST_TABLE; +QUERY PLAN + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=1 loops=1) + Order: metrics_space."time" + -> Merge Append (actual rows=1 loops=1) + Sort Key: _hyper_X_X_chunk."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 1 + -> Merge Append (never executed) + Sort Key: _hyper_X_X_chunk."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 + -> Merge Append (never executed) + Sort Key: _hyper_X_X_chunk."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 +(38 rows) + +-- test first/last (doesn't use ordered append yet) +:PREFIX +SELECT first(time, time) +FROM :TEST_TABLE; +QUERY PLAN + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Result (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=1 loops=1) + Order: metrics_space."time" + -> Merge Append (actual rows=1 loops=1) + Sort Key: _hyper_X_X_chunk."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 1 + -> Merge Append (never executed) + Sort Key: _hyper_X_X_chunk."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 + -> Merge Append (never executed) + Sort Key: _hyper_X_X_chunk."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 +(39 rows) + +:PREFIX +SELECT last(time, time) +FROM :TEST_TABLE; +QUERY PLAN + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Result (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=1 loops=1) + Order: metrics_space."time" DESC + -> Merge Append (actual rows=1 loops=1) + Sort Key: _hyper_X_X_chunk."time" DESC + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 1 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 1 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 1 + -> Merge Append (never executed) + Sort Key: _hyper_X_X_chunk."time" DESC + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 + -> Merge Append (never executed) + Sort Key: _hyper_X_X_chunk."time" DESC + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 +(39 rows) + +-- test query with time_bucket +:PREFIX +SELECT time_bucket('1d', time) +FROM :TEST_TABLE +ORDER BY time ASC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Result (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=1 loops=1) + Order: metrics_space."time" + -> Merge Append (actual rows=1 loops=1) + Sort Key: _hyper_X_X_chunk."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Merge Append (never executed) + Sort Key: _hyper_X_X_chunk."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Merge Append (never executed) + Sort Key: _hyper_X_X_chunk."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 +(28 rows) + +-- test query with ORDER BY time_bucket +:PREFIX +SELECT time_bucket('1d', time) +FROM :TEST_TABLE +ORDER BY 1 +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Result (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=1 loops=1) + Order: time_bucket('@ 1 day'::interval, metrics_space."time") + -> Merge Append (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Merge Append (never executed) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Merge Append (never executed) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 +(28 rows) + +-- test query with ORDER BY time_bucket, device_id +-- must not use ordered append +:PREFIX +SELECT time_bucket('1d', time), + device_id, + v0 +FROM :TEST_TABLE +WHERE device_id IN (1, 2) +ORDER BY time_bucket('1d', time), + device_id +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=27348 loops=1) + -> Append (actual rows=27348 loops=1) + -> Seq Scan on _hyper_X_X_chunk (actual rows=3598 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Index Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk (actual rows=3598 loops=1) + Index Cond: (device_id = ANY ('{1,2}'::integer[])) + -> Seq Scan on _hyper_X_X_chunk (actual rows=5038 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Index Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk (actual rows=5038 loops=1) + Index Cond: (device_id = ANY ('{1,2}'::integer[])) + -> Seq Scan on _hyper_X_X_chunk (actual rows=5038 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Index Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk (actual rows=5038 loops=1) + Index Cond: (device_id = ANY ('{1,2}'::integer[])) +(18 rows) + +-- test query with ORDER BY date_trunc +:PREFIX +SELECT time_bucket('1d', time) +FROM :TEST_TABLE +ORDER BY date_trunc('day', time) +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Result (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=1 loops=1) + Order: date_trunc('day'::text, metrics_space."time") + -> Merge Append (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Merge Append (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Merge Append (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 +(28 rows) + +-- test query with ORDER BY date_trunc +:PREFIX +SELECT date_trunc('day', time) +FROM :TEST_TABLE +ORDER BY 1 +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Result (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=1 loops=1) + Order: date_trunc('day'::text, metrics_space."time") + -> Merge Append (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Merge Append (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Merge Append (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 +(28 rows) + +-- test query with ORDER BY date_trunc, device_id +-- must not use ordered append +:PREFIX +SELECT date_trunc('day', time), + device_id, + v0 +FROM :TEST_TABLE +WHERE device_id IN (1, 2) +ORDER BY 1, + 2 +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=27348 loops=1) + -> Append (actual rows=27348 loops=1) + -> Seq Scan on _hyper_X_X_chunk (actual rows=3598 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Index Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk (actual rows=3598 loops=1) + Index Cond: (device_id = ANY ('{1,2}'::integer[])) + -> Seq Scan on _hyper_X_X_chunk (actual rows=5038 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Index Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk (actual rows=5038 loops=1) + Index Cond: (device_id = ANY ('{1,2}'::integer[])) + -> Seq Scan on _hyper_X_X_chunk (actual rows=5038 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Index Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk (actual rows=5038 loops=1) + Index Cond: (device_id = ANY ('{1,2}'::integer[])) +(18 rows) + +-- test query with now() should result in ordered ChunkAppend +:PREFIX +SELECT time +FROM :TEST_TABLE +WHERE time < now() + '1 month' +ORDER BY time DESC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=1 loops=1) + Order: metrics_space."time" DESC + -> Merge Append (actual rows=1 loops=1) + Sort Key: _hyper_X_X_chunk."time" DESC + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: ("time" < (now() + '@ 1 mon'::interval)) + Heap Fetches: 1 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: ("time" < (now() + '@ 1 mon'::interval)) + Heap Fetches: 1 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: ("time" < (now() + '@ 1 mon'::interval)) + Heap Fetches: 1 + -> Merge Append (never executed) + Sort Key: _hyper_X_X_chunk."time" DESC + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" < (now() + '@ 1 mon'::interval)) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" < (now() + '@ 1 mon'::interval)) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" < (now() + '@ 1 mon'::interval)) + Heap Fetches: 0 + -> Merge Append (never executed) + Sort Key: _hyper_X_X_chunk."time" DESC + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" < (now() + '@ 1 mon'::interval)) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" < (now() + '@ 1 mon'::interval)) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" < (now() + '@ 1 mon'::interval)) + Heap Fetches: 0 +(36 rows) + +-- test CTE +:PREFIX WITH i AS ( + SELECT time + FROM :TEST_TABLE + WHERE time < now() + ORDER BY time DESC + LIMIT 100 +) +SELECT * +FROM i; +QUERY PLAN + Limit (actual rows=100 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=100 loops=1) + Order: metrics_space."time" DESC + -> Merge Append (actual rows=100 loops=1) + Sort Key: _hyper_X_X_chunk."time" DESC + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=21 loops=1) + Index Cond: ("time" < now()) + Heap Fetches: 21 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=60 loops=1) + Index Cond: ("time" < now()) + Heap Fetches: 60 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=21 loops=1) + Index Cond: ("time" < now()) + Heap Fetches: 21 + -> Merge Append (never executed) + Sort Key: _hyper_X_X_chunk."time" DESC + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" < now()) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" < now()) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" < now()) + Heap Fetches: 0 + -> Merge Append (never executed) + Sort Key: _hyper_X_X_chunk."time" DESC + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" < now()) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" < now()) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" < now()) + Heap Fetches: 0 +(36 rows) + +-- test CTE +-- no chunk exclusion for CTE because cte query is not pulled up +:PREFIX WITH cte AS ( + SELECT time + FROM :TEST_TABLE + WHERE device_id = 1 + ORDER BY time +) +SELECT * +FROM cte +WHERE time < '2000-02-01'::timestamptz; +QUERY PLAN + Custom Scan (ChunkAppend) on metrics_space (actual rows=13674 loops=1) + Order: metrics_space."time" + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=3598 loops=1) + Index Cond: ("time" < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone) + Filter: (device_id = 1) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=5038 loops=1) + Index Cond: ("time" < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone) + Filter: (device_id = 1) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=5038 loops=1) + Index Cond: ("time" < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone) + Filter: (device_id = 1) +(11 rows) + +-- test subquery +-- not ChunkAppend so no chunk exclusion +:PREFIX +SELECT time +FROM :TEST_TABLE +WHERE time = ( + SELECT max(time) + FROM :TEST_TABLE) +ORDER BY time; +QUERY PLAN + Custom Scan (ChunkAppend) on metrics_space (actual rows=5 loops=1) + InitPlan 2 (returns $1) + -> Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space metrics_space_1 (actual rows=1 loops=1) + Order: metrics_space_1."time" DESC + -> Merge Append (actual rows=1 loops=1) + Sort Key: _hyper_X_X_chunk_1."time" DESC + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk _hyper_X_X_chunk_1 (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 1 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk _hyper_X_X_chunk_1 (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 1 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk _hyper_X_X_chunk_1 (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 1 + -> Merge Append (never executed) + Sort Key: _hyper_X_X_chunk_1."time" DESC + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk _hyper_X_X_chunk_1 (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk _hyper_X_X_chunk_1 (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk _hyper_X_X_chunk_1 (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 + -> Merge Append (never executed) + Sort Key: _hyper_X_X_chunk_1."time" DESC + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk _hyper_X_X_chunk_1 (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk _hyper_X_X_chunk_1 (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk _hyper_X_X_chunk_1 (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 + -> Merge Append (actual rows=0 loops=1) + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=0 loops=1) + Index Cond: ("time" = $1) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=0 loops=1) + Index Cond: ("time" = $1) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=0 loops=1) + Index Cond: ("time" = $1) + Heap Fetches: 0 + -> Merge Append (actual rows=0 loops=1) + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=0 loops=1) + Index Cond: ("time" = $1) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=0 loops=1) + Index Cond: ("time" = $1) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=0 loops=1) + Index Cond: ("time" = $1) + Heap Fetches: 0 + -> Merge Append (actual rows=5 loops=1) + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: ("time" = $1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=3 loops=1) + Index Cond: ("time" = $1) + Heap Fetches: 3 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: ("time" = $1) + Heap Fetches: 1 +(70 rows) + +-- test ordered append with limit expression +:PREFIX +SELECT time +FROM :TEST_TABLE +ORDER BY time +LIMIT ( + SELECT length('four')); +QUERY PLAN + Limit (actual rows=4 loops=1) + InitPlan 1 (returns $0) + -> Result (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=4 loops=1) + Order: metrics_space."time" + -> Merge Append (actual rows=4 loops=1) + Sort Key: _hyper_X_X_chunk."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=2 loops=1) + Heap Fetches: 2 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=3 loops=1) + Heap Fetches: 3 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Merge Append (never executed) + Sort Key: _hyper_X_X_chunk."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Merge Append (never executed) + Sort Key: _hyper_X_X_chunk."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 +(29 rows) + +-- test with ordered guc disabled +SET timescaledb.enable_ordered_append TO OFF; +:PREFIX +SELECT time +FROM :TEST_TABLE +ORDER BY time +LIMIT 3; +QUERY PLAN + Limit (actual rows=3 loops=1) + -> Merge Append (actual rows=3 loops=1) + Sort Key: _hyper_X_X_chunk."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=2 loops=1) + Heap Fetches: 2 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=2 loops=1) + Heap Fetches: 2 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 +(21 rows) + +RESET timescaledb.enable_ordered_append; +:PREFIX +SELECT time +FROM :TEST_TABLE +ORDER BY time +LIMIT 3; +QUERY PLAN + Limit (actual rows=3 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=3 loops=1) + Order: metrics_space."time" + -> Merge Append (actual rows=3 loops=1) + Sort Key: _hyper_X_X_chunk."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=2 loops=1) + Heap Fetches: 2 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=2 loops=1) + Heap Fetches: 2 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Merge Append (never executed) + Sort Key: _hyper_X_X_chunk."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Merge Append (never executed) + Sort Key: _hyper_X_X_chunk."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 +(27 rows) + +-- test with chunk append disabled +SET timescaledb.enable_chunk_append TO OFF; +:PREFIX +SELECT time +FROM :TEST_TABLE +ORDER BY time +LIMIT 3; +QUERY PLAN + Limit (actual rows=3 loops=1) + -> Merge Append (actual rows=3 loops=1) + Sort Key: _hyper_X_X_chunk."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=2 loops=1) + Heap Fetches: 2 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=2 loops=1) + Heap Fetches: 2 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 +(21 rows) + +RESET timescaledb.enable_chunk_append; +:PREFIX +SELECT time +FROM :TEST_TABLE +ORDER BY time +LIMIT 3; +QUERY PLAN + Limit (actual rows=3 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=3 loops=1) + Order: metrics_space."time" + -> Merge Append (actual rows=3 loops=1) + Sort Key: _hyper_X_X_chunk."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=2 loops=1) + Heap Fetches: 2 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=2 loops=1) + Heap Fetches: 2 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Merge Append (never executed) + Sort Key: _hyper_X_X_chunk."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Merge Append (never executed) + Sort Key: _hyper_X_X_chunk."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 +(27 rows) + +\set TEST_TABLE 'metrics_compressed' +\ir :TEST_QUERY_NAME +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- In the following test cases, we test that certain indexes are used. By using the +-- timescaledb.enable_decompression_sorted_merge optimization, we are pushing a sort node +-- below the DecompressChunk node, which operates on the batches. This could lead to flaky +-- tests because the input data is small and PostgreSQL switches from IndexScans to +-- SequentialScans. Disable the optimization for the following tests to ensure we have +-- stable query plans in all CI environments. +SET timescaledb.enable_decompression_sorted_merge = 0; +-- test ASC for ordered chunks +:PREFIX +SELECT time +FROM :TEST_TABLE +ORDER BY time ASC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_X_X_chunk."time" + Sort Method: top-N heapsort + -> Append (actual rows=68370 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) +(11 rows) + +-- test DESC for ordered chunks +:PREFIX +SELECT time +FROM :TEST_TABLE +ORDER BY time DESC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_X_X_chunk."time" DESC + Sort Method: top-N heapsort + -> Append (actual rows=68370 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) +(11 rows) + +-- test query with ORDER BY column not in targetlist +:PREFIX +SELECT pg_typeof(device_id), + pg_typeof(v2) +FROM :TEST_TABLE +ORDER BY time ASC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_X_X_chunk."time" + Sort Method: top-N heapsort + -> Result (actual rows=68370 loops=1) + -> Append (actual rows=68370 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) +(12 rows) + +-- ORDER BY may include other columns after time column +:PREFIX +SELECT time, + device_id, + v0 +FROM :TEST_TABLE +WHERE device_id = 1 +ORDER BY time DESC, + device_id +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_compressed (actual rows=1 loops=1) + Order: metrics_compressed."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=1 loops=1) + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) +(12 rows) + +-- test RECORD in targetlist +:PREFIX +SELECT (time, + device_id, + v0) +FROM :TEST_TABLE +WHERE device_id = 1 +ORDER BY time DESC, + device_id +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Result (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_compressed (actual rows=1 loops=1) + Order: metrics_compressed."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=1 loops=1) + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) +(13 rows) + +-- test sort column not in targetlist +:PREFIX +SELECT time_bucket('1h', time) +FROM :TEST_TABLE +ORDER BY time DESC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_X_X_chunk."time" DESC + Sort Method: top-N heapsort + -> Result (actual rows=68370 loops=1) + -> Append (actual rows=68370 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) +(12 rows) + +-- queries with ORDER BY non-time column shouldn't use ordered append +:PREFIX +SELECT device_id +FROM :TEST_TABLE +ORDER BY device_id +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Merge Append (actual rows=1 loops=1) + Sort Key: _hyper_X_X_chunk.device_id + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: compress_hyper_X_X_chunk.device_id + Sort Method: quicksort + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: compress_hyper_X_X_chunk.device_id + Sort Method: quicksort + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: compress_hyper_X_X_chunk.device_id + Sort Method: quicksort + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) +(18 rows) + +-- time column must be primary sort order +:PREFIX +SELECT time, + device_id +FROM :TEST_TABLE +WHERE device_id IN (1, 2) +ORDER BY device_id, + time +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Merge Append (actual rows=1 loops=1) + Sort Key: _hyper_X_X_chunk.device_id, _hyper_X_X_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: compress_hyper_X_X_chunk.device_id, compress_hyper_X_X_chunk._ts_meta_sequence_num DESC + Sort Method: quicksort + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=8 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 12 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: compress_hyper_X_X_chunk.device_id, compress_hyper_X_X_chunk._ts_meta_sequence_num DESC + Sort Method: quicksort + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 18 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: compress_hyper_X_X_chunk.device_id, compress_hyper_X_X_chunk._ts_meta_sequence_num DESC + Sort Method: quicksort + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 18 +(24 rows) + +-- test equality constraint on ORDER BY prefix +-- currently not optimized +SET enable_seqscan TO false; +:PREFIX +SELECT time, + device_id +FROM :TEST_TABLE +WHERE device_id = 1 +ORDER BY device_id, + time +LIMIT 10; +QUERY PLAN + Limit (actual rows=10 loops=1) + -> Merge Append (actual rows=10 loops=1) + Sort Key: _hyper_X_X_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=1 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=1 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = 1) +(12 rows) + +RESET enable_seqscan; +-- queries without LIMIT should use ordered append +:PREFIX +SELECT time +FROM :TEST_TABLE +WHERE device_id IN (1, 2) +ORDER BY time ASC; +QUERY PLAN + Sort (actual rows=27348 loops=1) + Sort Key: _hyper_X_X_chunk."time" + Sort Method: quicksort + -> Append (actual rows=27348 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=7196 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=8 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 12 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10076 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 18 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10076 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 18 +(16 rows) + +-- queries without ORDER BY shouldnt use ordered append +:PREFIX +SELECT pg_typeof(time) +FROM :TEST_TABLE +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Result (actual rows=1 loops=1) + -> Append (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=1 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) +(9 rows) + +-- test interaction with constraint exclusion +:PREFIX +SELECT time +FROM :TEST_TABLE +WHERE time > '2000-01-07' +ORDER BY time ASC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_X_X_chunk."time" + Sort Method: top-N heapsort + -> Append (actual rows=45575 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=20385 loops=1) + Vectorized Filter: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 4615 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=25 loops=1) + Filter: (_ts_meta_max_1 > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 5 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) + Vectorized Filter: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) + Filter: (_ts_meta_max_1 > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) +(15 rows) + +:PREFIX +SELECT time +FROM :TEST_TABLE +WHERE time > '2000-01-07' +ORDER BY time DESC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_X_X_chunk."time" DESC + Sort Method: top-N heapsort + -> Append (actual rows=45575 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) + Vectorized Filter: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) + Filter: (_ts_meta_max_1 > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=20385 loops=1) + Vectorized Filter: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 4615 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=25 loops=1) + Filter: (_ts_meta_max_1 > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 5 +(15 rows) + +-- test interaction with runtime exclusion +:PREFIX +SELECT time +FROM :TEST_TABLE +WHERE time > '2000-01-08'::text::timestamptz +ORDER BY time ASC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: metrics_compressed."time" + Sort Method: top-N heapsort + -> Custom Scan (ChunkAppend) on metrics_compressed (actual rows=41975 loops=1) + Chunks excluded during startup: 1 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=16785 loops=1) + Filter: ("time" > ('2000-01-08'::cstring)::timestamp with time zone) + Rows Removed by Filter: 3215 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) + Filter: (_ts_meta_max_1 > ('2000-01-08'::cstring)::timestamp with time zone) + Rows Removed by Filter: 10 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) + Filter: ("time" > ('2000-01-08'::cstring)::timestamp with time zone) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) + Filter: (_ts_meta_max_1 > ('2000-01-08'::cstring)::timestamp with time zone) +(16 rows) + +:PREFIX +SELECT time +FROM :TEST_TABLE +WHERE time < '2000-01-08'::text::timestamptz +ORDER BY time ASC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: metrics_compressed."time" + Sort Method: top-N heapsort + -> Custom Scan (ChunkAppend) on metrics_compressed (actual rows=26390 loops=1) + Chunks excluded during startup: 1 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) + Filter: ("time" < ('2000-01-08'::cstring)::timestamp with time zone) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) + Filter: (_ts_meta_min_1 < ('2000-01-08'::cstring)::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=8400 loops=1) + Filter: ("time" < ('2000-01-08'::cstring)::timestamp with time zone) + Rows Removed by Filter: 1790 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=15 loops=1) + Filter: (_ts_meta_min_1 < ('2000-01-08'::cstring)::timestamp with time zone) + Rows Removed by Filter: 15 +(16 rows) + +-- test constraint exclusion +:PREFIX +SELECT time +FROM :TEST_TABLE +WHERE time > '2000-01-08'::text::timestamptz + AND time < '2000-01-10' +ORDER BY time ASC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: metrics_compressed."time" + Sort Method: top-N heapsort + -> Custom Scan (ChunkAppend) on metrics_compressed (actual rows=7195 loops=1) + Chunks excluded during startup: 1 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=7195 loops=1) + Filter: ("time" > ('2000-01-08'::cstring)::timestamp with time zone) + Rows Removed by Filter: 7805 + Vectorized Filter: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=15 loops=1) + Filter: ((_ts_meta_min_1 < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) AND (_ts_meta_max_1 > ('2000-01-08'::cstring)::timestamp with time zone)) + Rows Removed by Filter: 15 +(13 rows) + +:PREFIX +SELECT time +FROM :TEST_TABLE +WHERE time < '2000-01-08'::text::timestamptz + AND time > '2000-01-07' +ORDER BY time ASC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: metrics_compressed."time" + Sort Method: top-N heapsort + -> Custom Scan (ChunkAppend) on metrics_compressed (actual rows=3595 loops=1) + Chunks excluded during startup: 1 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3595 loops=1) + Filter: ("time" < ('2000-01-08'::cstring)::timestamp with time zone) + Rows Removed by Filter: 6405 + Vectorized Filter: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=10 loops=1) + Filter: ((_ts_meta_max_1 > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) AND (_ts_meta_min_1 < ('2000-01-08'::cstring)::timestamp with time zone)) + Rows Removed by Filter: 20 +(13 rows) + +-- Disable hash aggregation to get a deterministic test output +SET enable_hashagg = OFF; +-- min/max queries +:PREFIX +SELECT max(time) +FROM :TEST_TABLE; +QUERY PLAN + Finalize Aggregate (actual rows=1 loops=1) + -> Append (actual rows=3 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) +(11 rows) + +:PREFIX +SELECT min(time) +FROM :TEST_TABLE; +QUERY PLAN + Finalize Aggregate (actual rows=1 loops=1) + -> Append (actual rows=3 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) +(11 rows) + +-- test first/last (doesn't use ordered append yet) +:PREFIX +SELECT first(time, time) +FROM :TEST_TABLE; +QUERY PLAN + Finalize Aggregate (actual rows=1 loops=1) + -> Append (actual rows=3 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) +(11 rows) + +:PREFIX +SELECT last(time, time) +FROM :TEST_TABLE; +QUERY PLAN + Finalize Aggregate (actual rows=1 loops=1) + -> Append (actual rows=3 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) +(11 rows) + +-- test query with time_bucket +:PREFIX +SELECT time_bucket('1d', time) +FROM :TEST_TABLE +ORDER BY time ASC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_X_X_chunk."time" + Sort Method: top-N heapsort + -> Result (actual rows=68370 loops=1) + -> Append (actual rows=68370 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) +(12 rows) + +-- test query with ORDER BY time_bucket +:PREFIX +SELECT time_bucket('1d', time) +FROM :TEST_TABLE +ORDER BY 1 +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort + -> Result (actual rows=68370 loops=1) + -> Append (actual rows=68370 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) +(12 rows) + +-- test query with ORDER BY time_bucket, device_id +-- must not use ordered append +:PREFIX +SELECT time_bucket('1d', time), + device_id, + v0 +FROM :TEST_TABLE +WHERE device_id IN (1, 2) +ORDER BY time_bucket('1d', time), + device_id +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=27348 loops=1) + -> Append (actual rows=27348 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=7196 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=8 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 12 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10076 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 18 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10076 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 18 +(18 rows) + +-- test query with ORDER BY date_trunc +:PREFIX +SELECT time_bucket('1d', time) +FROM :TEST_TABLE +ORDER BY date_trunc('day', time) +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort + -> Result (actual rows=68370 loops=1) + -> Append (actual rows=68370 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) +(12 rows) + +-- test query with ORDER BY date_trunc +:PREFIX +SELECT date_trunc('day', time) +FROM :TEST_TABLE +ORDER BY 1 +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort + -> Result (actual rows=68370 loops=1) + -> Append (actual rows=68370 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) +(12 rows) + +-- test query with ORDER BY date_trunc, device_id +-- must not use ordered append +:PREFIX +SELECT date_trunc('day', time), + device_id, + v0 +FROM :TEST_TABLE +WHERE device_id IN (1, 2) +ORDER BY 1, + 2 +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=27348 loops=1) + -> Append (actual rows=27348 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=7196 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=8 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 12 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10076 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 18 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10076 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 18 +(18 rows) + +-- test query with now() should result in ordered ChunkAppend +:PREFIX +SELECT time +FROM :TEST_TABLE +WHERE time < now() + '1 month' +ORDER BY time DESC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: metrics_compressed."time" DESC + Sort Method: top-N heapsort + -> Custom Scan (ChunkAppend) on metrics_compressed (actual rows=68370 loops=1) + Chunks excluded during startup: 0 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) + Filter: ("time" < (now() + '@ 1 mon'::interval)) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) + Filter: ("time" < (now() + '@ 1 mon'::interval)) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) + Filter: ("time" < (now() + '@ 1 mon'::interval)) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) +(15 rows) + +-- test CTE +:PREFIX WITH i AS ( + SELECT time + FROM :TEST_TABLE + WHERE time < now() + ORDER BY time DESC + LIMIT 100 +) +SELECT * +FROM i; +QUERY PLAN + Limit (actual rows=100 loops=1) + -> Sort (actual rows=100 loops=1) + Sort Key: metrics_compressed."time" DESC + Sort Method: top-N heapsort + -> Custom Scan (ChunkAppend) on metrics_compressed (actual rows=68370 loops=1) + Chunks excluded during startup: 0 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) + Filter: ("time" < now()) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) + Filter: ("time" < now()) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) + Filter: ("time" < now()) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) +(15 rows) + +-- test CTE +-- no chunk exclusion for CTE because cte query is not pulled up +:PREFIX WITH cte AS ( + SELECT time + FROM :TEST_TABLE + WHERE device_id = 1 + ORDER BY time +) +SELECT * +FROM cte +WHERE time < '2000-02-01'::timestamptz; +QUERY PLAN + Custom Scan (ChunkAppend) on metrics_compressed (actual rows=13674 loops=1) + Order: metrics_compressed."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + Vectorized Filter: ("time" < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (actual rows=4 loops=1) + Index Cond: (device_id = 1) + Filter: (_ts_meta_min_1 < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + Vectorized Filter: ("time" < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Index Cond: (device_id = 1) + Filter: (_ts_meta_min_1 < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + Vectorized Filter: ("time" < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Index Cond: (device_id = 1) + Filter: (_ts_meta_min_1 < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone) +(17 rows) + +-- test subquery +-- not ChunkAppend so no chunk exclusion +:PREFIX +SELECT time +FROM :TEST_TABLE +WHERE time = ( + SELECT max(time) + FROM :TEST_TABLE) +ORDER BY time; +QUERY PLAN + Custom Scan (ChunkAppend) on metrics_compressed (actual rows=5 loops=1) + Chunks excluded during runtime: 2 + InitPlan 1 (returns $0) + -> Finalize Aggregate (actual rows=1 loops=1) + -> Append (actual rows=3 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk _hyper_X_X_chunk_1 (actual rows=17990 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=20 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk _hyper_X_X_chunk_1 (actual rows=25190 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=30 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk _hyper_X_X_chunk_1 (actual rows=25190 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=30 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + Filter: ("time" = $0) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + Filter: ((_ts_meta_min_1 <= $0) AND (_ts_meta_max_1 >= $0)) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + Filter: ("time" = $0) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + Filter: ((_ts_meta_min_1 <= $0) AND (_ts_meta_max_1 >= $0)) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5 loops=1) + Filter: ("time" = $0) + Rows Removed by Filter: 4995 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=5 loops=1) + Filter: ((_ts_meta_min_1 <= $0) AND (_ts_meta_max_1 >= $0)) + Rows Removed by Filter: 25 +(28 rows) + +-- test ordered append with limit expression +:PREFIX +SELECT time +FROM :TEST_TABLE +ORDER BY time +LIMIT ( + SELECT length('four')); +QUERY PLAN + Limit (actual rows=4 loops=1) + InitPlan 1 (returns $0) + -> Result (actual rows=1 loops=1) + -> Sort (actual rows=4 loops=1) + Sort Key: _hyper_X_X_chunk."time" + Sort Method: top-N heapsort + -> Append (actual rows=68370 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) +(13 rows) + +-- test with ordered guc disabled +SET timescaledb.enable_ordered_append TO OFF; +:PREFIX +SELECT time +FROM :TEST_TABLE +ORDER BY time +LIMIT 3; +QUERY PLAN + Limit (actual rows=3 loops=1) + -> Sort (actual rows=3 loops=1) + Sort Key: _hyper_X_X_chunk."time" + Sort Method: top-N heapsort + -> Append (actual rows=68370 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) +(11 rows) + +RESET timescaledb.enable_ordered_append; +:PREFIX +SELECT time +FROM :TEST_TABLE +ORDER BY time +LIMIT 3; +QUERY PLAN + Limit (actual rows=3 loops=1) + -> Sort (actual rows=3 loops=1) + Sort Key: _hyper_X_X_chunk."time" + Sort Method: top-N heapsort + -> Append (actual rows=68370 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) +(11 rows) + +-- test with chunk append disabled +SET timescaledb.enable_chunk_append TO OFF; +:PREFIX +SELECT time +FROM :TEST_TABLE +ORDER BY time +LIMIT 3; +QUERY PLAN + Limit (actual rows=3 loops=1) + -> Sort (actual rows=3 loops=1) + Sort Key: _hyper_X_X_chunk."time" + Sort Method: top-N heapsort + -> Append (actual rows=68370 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) +(11 rows) + +RESET timescaledb.enable_chunk_append; +:PREFIX +SELECT time +FROM :TEST_TABLE +ORDER BY time +LIMIT 3; +QUERY PLAN + Limit (actual rows=3 loops=1) + -> Sort (actual rows=3 loops=1) + Sort Key: _hyper_X_X_chunk."time" + Sort Method: top-N heapsort + -> Append (actual rows=68370 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) +(11 rows) + +\set TEST_TABLE 'metrics_space_compressed' +\ir :TEST_QUERY_NAME +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- In the following test cases, we test that certain indexes are used. By using the +-- timescaledb.enable_decompression_sorted_merge optimization, we are pushing a sort node +-- below the DecompressChunk node, which operates on the batches. This could lead to flaky +-- tests because the input data is small and PostgreSQL switches from IndexScans to +-- SequentialScans. Disable the optimization for the following tests to ensure we have +-- stable query plans in all CI environments. +SET timescaledb.enable_decompression_sorted_merge = 0; +-- test ASC for ordered chunks +:PREFIX +SELECT time +FROM :TEST_TABLE +ORDER BY time ASC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_X_X_chunk."time" + Sort Method: top-N heapsort + -> Append (actual rows=68370 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10794 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) +(23 rows) + +-- test DESC for ordered chunks +:PREFIX +SELECT time +FROM :TEST_TABLE +ORDER BY time DESC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_X_X_chunk."time" DESC + Sort Method: top-N heapsort + -> Append (actual rows=68370 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10794 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) +(23 rows) + +-- test query with ORDER BY column not in targetlist +:PREFIX +SELECT pg_typeof(device_id), + pg_typeof(v2) +FROM :TEST_TABLE +ORDER BY time ASC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_X_X_chunk."time" + Sort Method: top-N heapsort + -> Result (actual rows=68370 loops=1) + -> Append (actual rows=68370 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10794 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) +(24 rows) + +-- ORDER BY may include other columns after time column +:PREFIX +SELECT time, + device_id, + v0 +FROM :TEST_TABLE +WHERE device_id = 1 +ORDER BY time DESC, + device_id +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space_compressed (actual rows=1 loops=1) + Order: metrics_space_compressed."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: compress_hyper_X_X_chunk._ts_meta_sequence_num + Sort Method: quicksort + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: compress_hyper_X_X_chunk._ts_meta_sequence_num + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + Filter: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: compress_hyper_X_X_chunk._ts_meta_sequence_num + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + Filter: (device_id = 1) +(19 rows) + +-- test RECORD in targetlist +:PREFIX +SELECT (time, + device_id, + v0) +FROM :TEST_TABLE +WHERE device_id = 1 +ORDER BY time DESC, + device_id +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Result (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space_compressed (actual rows=1 loops=1) + Order: metrics_space_compressed."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: compress_hyper_X_X_chunk._ts_meta_sequence_num + Sort Method: quicksort + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: compress_hyper_X_X_chunk._ts_meta_sequence_num + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + Filter: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: compress_hyper_X_X_chunk._ts_meta_sequence_num + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + Filter: (device_id = 1) +(20 rows) + +-- test sort column not in targetlist +:PREFIX +SELECT time_bucket('1h', time) +FROM :TEST_TABLE +ORDER BY time DESC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_X_X_chunk."time" DESC + Sort Method: top-N heapsort + -> Result (actual rows=68370 loops=1) + -> Append (actual rows=68370 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10794 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) +(24 rows) + +-- queries with ORDER BY non-time column shouldn't use ordered append +:PREFIX +SELECT device_id +FROM :TEST_TABLE +ORDER BY device_id +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Merge Append (actual rows=1 loops=1) + Sort Key: _hyper_X_X_chunk.device_id + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: compress_hyper_X_X_chunk.device_id + Sort Method: quicksort + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: compress_hyper_X_X_chunk.device_id + Sort Method: quicksort + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: compress_hyper_X_X_chunk.device_id + Sort Method: quicksort + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: compress_hyper_X_X_chunk.device_id + Sort Method: quicksort + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: compress_hyper_X_X_chunk.device_id + Sort Method: quicksort + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: compress_hyper_X_X_chunk.device_id + Sort Method: quicksort + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: compress_hyper_X_X_chunk.device_id + Sort Method: quicksort + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: compress_hyper_X_X_chunk.device_id + Sort Method: quicksort + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: compress_hyper_X_X_chunk.device_id + Sort Method: quicksort + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) +(48 rows) + +-- time column must be primary sort order +:PREFIX +SELECT time, + device_id +FROM :TEST_TABLE +WHERE device_id IN (1, 2) +ORDER BY device_id, + time +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Merge Append (actual rows=1 loops=1) + Sort Key: _hyper_X_X_chunk.device_id, _hyper_X_X_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: compress_hyper_X_X_chunk.device_id, compress_hyper_X_X_chunk._ts_meta_sequence_num DESC + Sort Method: quicksort + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: compress_hyper_X_X_chunk.device_id, compress_hyper_X_X_chunk._ts_meta_sequence_num DESC + Sort Method: quicksort + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 8 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: compress_hyper_X_X_chunk.device_id, compress_hyper_X_X_chunk._ts_meta_sequence_num DESC + Sort Method: quicksort + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: compress_hyper_X_X_chunk.device_id, compress_hyper_X_X_chunk._ts_meta_sequence_num DESC + Sort Method: quicksort + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 12 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: compress_hyper_X_X_chunk.device_id, compress_hyper_X_X_chunk._ts_meta_sequence_num DESC + Sort Method: quicksort + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: compress_hyper_X_X_chunk.device_id, compress_hyper_X_X_chunk._ts_meta_sequence_num DESC + Sort Method: quicksort + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 12 +(42 rows) + +-- test equality constraint on ORDER BY prefix +-- currently not optimized +SET enable_seqscan TO false; +:PREFIX +SELECT time, + device_id +FROM :TEST_TABLE +WHERE device_id = 1 +ORDER BY device_id, + time +LIMIT 10; +QUERY PLAN + Limit (actual rows=10 loops=1) + -> Merge Append (actual rows=10 loops=1) + Sort Key: _hyper_X_X_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=1 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=1 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = 1) +(12 rows) + +RESET enable_seqscan; +-- queries without LIMIT should use ordered append +:PREFIX +SELECT time +FROM :TEST_TABLE +WHERE device_id IN (1, 2) +ORDER BY time ASC; +QUERY PLAN + Sort (actual rows=27348 loops=1) + Sort Key: _hyper_X_X_chunk."time" + Sort Method: quicksort + -> Append (actual rows=27348 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 8 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 12 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 12 +(25 rows) + +-- queries without ORDER BY shouldnt use ordered append +:PREFIX +SELECT pg_typeof(time) +FROM :TEST_TABLE +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Result (actual rows=1 loops=1) + -> Append (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=1 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) +(21 rows) + +-- test interaction with constraint exclusion +:PREFIX +SELECT time +FROM :TEST_TABLE +WHERE time > '2000-01-07' +ORDER BY time ASC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_X_X_chunk."time" + Sort Method: top-N heapsort + -> Append (actual rows=45575 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=4077 loops=1) + Vectorized Filter: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 923 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=5 loops=1) + Filter: (_ts_meta_max_1 > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 1 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=12231 loops=1) + Vectorized Filter: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 2769 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=15 loops=1) + Filter: (_ts_meta_max_1 > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 3 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=4077 loops=1) + Vectorized Filter: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 923 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=5 loops=1) + Filter: (_ts_meta_max_1 > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 1 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + Vectorized Filter: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (_ts_meta_max_1 > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) + Vectorized Filter: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + Filter: (_ts_meta_max_1 > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + Vectorized Filter: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (_ts_meta_max_1 > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) +(35 rows) + +:PREFIX +SELECT time +FROM :TEST_TABLE +WHERE time > '2000-01-07' +ORDER BY time DESC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_X_X_chunk."time" DESC + Sort Method: top-N heapsort + -> Append (actual rows=45575 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + Vectorized Filter: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (_ts_meta_max_1 > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) + Vectorized Filter: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + Filter: (_ts_meta_max_1 > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + Vectorized Filter: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (_ts_meta_max_1 > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=4077 loops=1) + Vectorized Filter: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 923 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=5 loops=1) + Filter: (_ts_meta_max_1 > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 1 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=12231 loops=1) + Vectorized Filter: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 2769 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=15 loops=1) + Filter: (_ts_meta_max_1 > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 3 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=4077 loops=1) + Vectorized Filter: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 923 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=5 loops=1) + Filter: (_ts_meta_max_1 > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 1 +(35 rows) + +-- test interaction with runtime exclusion +:PREFIX +SELECT time +FROM :TEST_TABLE +WHERE time > '2000-01-08'::text::timestamptz +ORDER BY time ASC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: metrics_space_compressed."time" + Sort Method: top-N heapsort + -> Custom Scan (ChunkAppend) on metrics_space_compressed (actual rows=41975 loops=1) + -> Merge Append (actual rows=0 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=0 loops=1) + Filter: ("time" > ('2000-01-08'::cstring)::timestamp with time zone) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=0 loops=1) + Filter: (_ts_meta_max_1 > ('2000-01-08'::cstring)::timestamp with time zone) + Rows Removed by Filter: 4 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=0 loops=1) + Filter: ("time" > ('2000-01-08'::cstring)::timestamp with time zone) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=0 loops=1) + Filter: (_ts_meta_max_1 > ('2000-01-08'::cstring)::timestamp with time zone) + Rows Removed by Filter: 12 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=0 loops=1) + Filter: ("time" > ('2000-01-08'::cstring)::timestamp with time zone) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=0 loops=1) + Filter: (_ts_meta_max_1 > ('2000-01-08'::cstring)::timestamp with time zone) + Rows Removed by Filter: 4 + -> Merge Append (actual rows=16785 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3357 loops=1) + Filter: ("time" > ('2000-01-08'::cstring)::timestamp with time zone) + Rows Removed by Filter: 643 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + Filter: (_ts_meta_max_1 > ('2000-01-08'::cstring)::timestamp with time zone) + Rows Removed by Filter: 2 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10071 loops=1) + Filter: ("time" > ('2000-01-08'::cstring)::timestamp with time zone) + Rows Removed by Filter: 1929 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + Filter: (_ts_meta_max_1 > ('2000-01-08'::cstring)::timestamp with time zone) + Rows Removed by Filter: 6 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3357 loops=1) + Filter: ("time" > ('2000-01-08'::cstring)::timestamp with time zone) + Rows Removed by Filter: 643 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + Filter: (_ts_meta_max_1 > ('2000-01-08'::cstring)::timestamp with time zone) + Rows Removed by Filter: 2 + -> Merge Append (actual rows=25190 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + Filter: ("time" > ('2000-01-08'::cstring)::timestamp with time zone) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (_ts_meta_max_1 > ('2000-01-08'::cstring)::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) + Filter: ("time" > ('2000-01-08'::cstring)::timestamp with time zone) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + Filter: (_ts_meta_max_1 > ('2000-01-08'::cstring)::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + Filter: ("time" > ('2000-01-08'::cstring)::timestamp with time zone) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (_ts_meta_max_1 > ('2000-01-08'::cstring)::timestamp with time zone) +(53 rows) + +:PREFIX +SELECT time +FROM :TEST_TABLE +WHERE time < '2000-01-08'::text::timestamptz +ORDER BY time ASC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: metrics_space_compressed."time" + Sort Method: top-N heapsort + -> Custom Scan (ChunkAppend) on metrics_space_compressed (actual rows=26390 loops=1) + -> Merge Append (actual rows=17990 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + Filter: ("time" < ('2000-01-08'::cstring)::timestamp with time zone) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + Filter: (_ts_meta_min_1 < ('2000-01-08'::cstring)::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10794 loops=1) + Filter: ("time" < ('2000-01-08'::cstring)::timestamp with time zone) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + Filter: (_ts_meta_min_1 < ('2000-01-08'::cstring)::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + Filter: ("time" < ('2000-01-08'::cstring)::timestamp with time zone) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + Filter: (_ts_meta_min_1 < ('2000-01-08'::cstring)::timestamp with time zone) + -> Merge Append (actual rows=8400 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=1680 loops=1) + Filter: ("time" < ('2000-01-08'::cstring)::timestamp with time zone) + Rows Removed by Filter: 358 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=3 loops=1) + Filter: (_ts_meta_min_1 < ('2000-01-08'::cstring)::timestamp with time zone) + Rows Removed by Filter: 3 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5040 loops=1) + Filter: ("time" < ('2000-01-08'::cstring)::timestamp with time zone) + Rows Removed by Filter: 1074 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=9 loops=1) + Filter: (_ts_meta_min_1 < ('2000-01-08'::cstring)::timestamp with time zone) + Rows Removed by Filter: 9 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=1680 loops=1) + Filter: ("time" < ('2000-01-08'::cstring)::timestamp with time zone) + Rows Removed by Filter: 358 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=3 loops=1) + Filter: (_ts_meta_min_1 < ('2000-01-08'::cstring)::timestamp with time zone) + Rows Removed by Filter: 3 + -> Merge Append (actual rows=0 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=0 loops=1) + Filter: ("time" < ('2000-01-08'::cstring)::timestamp with time zone) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=0 loops=1) + Filter: (_ts_meta_min_1 < ('2000-01-08'::cstring)::timestamp with time zone) + Rows Removed by Filter: 6 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=0 loops=1) + Filter: ("time" < ('2000-01-08'::cstring)::timestamp with time zone) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=0 loops=1) + Filter: (_ts_meta_min_1 < ('2000-01-08'::cstring)::timestamp with time zone) + Rows Removed by Filter: 18 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=0 loops=1) + Filter: ("time" < ('2000-01-08'::cstring)::timestamp with time zone) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=0 loops=1) + Filter: (_ts_meta_min_1 < ('2000-01-08'::cstring)::timestamp with time zone) + Rows Removed by Filter: 6 +(53 rows) + +-- test constraint exclusion +:PREFIX +SELECT time +FROM :TEST_TABLE +WHERE time > '2000-01-08'::text::timestamptz + AND time < '2000-01-10' +ORDER BY time ASC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: metrics_space_compressed."time" + Sort Method: top-N heapsort + -> Custom Scan (ChunkAppend) on metrics_space_compressed (actual rows=7195 loops=1) + -> Merge Append (actual rows=0 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=0 loops=1) + Filter: ("time" > ('2000-01-08'::cstring)::timestamp with time zone) + Vectorized Filter: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=0 loops=1) + Filter: ((_ts_meta_min_1 < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) AND (_ts_meta_max_1 > ('2000-01-08'::cstring)::timestamp with time zone)) + Rows Removed by Filter: 4 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=0 loops=1) + Filter: ("time" > ('2000-01-08'::cstring)::timestamp with time zone) + Vectorized Filter: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=0 loops=1) + Filter: ((_ts_meta_min_1 < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) AND (_ts_meta_max_1 > ('2000-01-08'::cstring)::timestamp with time zone)) + Rows Removed by Filter: 12 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=0 loops=1) + Filter: ("time" > ('2000-01-08'::cstring)::timestamp with time zone) + Vectorized Filter: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=0 loops=1) + Filter: ((_ts_meta_min_1 < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) AND (_ts_meta_max_1 > ('2000-01-08'::cstring)::timestamp with time zone)) + Rows Removed by Filter: 4 + -> Merge Append (actual rows=7195 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=1439 loops=1) + Filter: ("time" > ('2000-01-08'::cstring)::timestamp with time zone) + Rows Removed by Filter: 1561 + Vectorized Filter: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=3 loops=1) + Filter: ((_ts_meta_min_1 < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) AND (_ts_meta_max_1 > ('2000-01-08'::cstring)::timestamp with time zone)) + Rows Removed by Filter: 3 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=4317 loops=1) + Filter: ("time" > ('2000-01-08'::cstring)::timestamp with time zone) + Rows Removed by Filter: 4683 + Vectorized Filter: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=9 loops=1) + Filter: ((_ts_meta_min_1 < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) AND (_ts_meta_max_1 > ('2000-01-08'::cstring)::timestamp with time zone)) + Rows Removed by Filter: 9 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=1439 loops=1) + Filter: ("time" > ('2000-01-08'::cstring)::timestamp with time zone) + Rows Removed by Filter: 1561 + Vectorized Filter: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=3 loops=1) + Filter: ((_ts_meta_min_1 < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) AND (_ts_meta_max_1 > ('2000-01-08'::cstring)::timestamp with time zone)) + Rows Removed by Filter: 3 +(46 rows) + +:PREFIX +SELECT time +FROM :TEST_TABLE +WHERE time < '2000-01-08'::text::timestamptz + AND time > '2000-01-07' +ORDER BY time ASC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: metrics_space_compressed."time" + Sort Method: top-N heapsort + -> Custom Scan (ChunkAppend) on metrics_space_compressed (actual rows=3595 loops=1) + -> Merge Append (actual rows=3595 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=719 loops=1) + Filter: ("time" < ('2000-01-08'::cstring)::timestamp with time zone) + Rows Removed by Filter: 1281 + Vectorized Filter: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=2 loops=1) + Filter: ((_ts_meta_max_1 > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) AND (_ts_meta_min_1 < ('2000-01-08'::cstring)::timestamp with time zone)) + Rows Removed by Filter: 4 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=2157 loops=1) + Filter: ("time" < ('2000-01-08'::cstring)::timestamp with time zone) + Rows Removed by Filter: 3843 + Vectorized Filter: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: ((_ts_meta_max_1 > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) AND (_ts_meta_min_1 < ('2000-01-08'::cstring)::timestamp with time zone)) + Rows Removed by Filter: 12 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=719 loops=1) + Filter: ("time" < ('2000-01-08'::cstring)::timestamp with time zone) + Rows Removed by Filter: 1281 + Vectorized Filter: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=2 loops=1) + Filter: ((_ts_meta_max_1 > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) AND (_ts_meta_min_1 < ('2000-01-08'::cstring)::timestamp with time zone)) + Rows Removed by Filter: 4 + -> Merge Append (actual rows=0 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=0 loops=1) + Filter: ("time" < ('2000-01-08'::cstring)::timestamp with time zone) + Vectorized Filter: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=0 loops=1) + Filter: ((_ts_meta_max_1 > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) AND (_ts_meta_min_1 < ('2000-01-08'::cstring)::timestamp with time zone)) + Rows Removed by Filter: 6 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=0 loops=1) + Filter: ("time" < ('2000-01-08'::cstring)::timestamp with time zone) + Vectorized Filter: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=0 loops=1) + Filter: ((_ts_meta_max_1 > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) AND (_ts_meta_min_1 < ('2000-01-08'::cstring)::timestamp with time zone)) + Rows Removed by Filter: 18 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=0 loops=1) + Filter: ("time" < ('2000-01-08'::cstring)::timestamp with time zone) + Vectorized Filter: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=0 loops=1) + Filter: ((_ts_meta_max_1 > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) AND (_ts_meta_min_1 < ('2000-01-08'::cstring)::timestamp with time zone)) + Rows Removed by Filter: 6 +(46 rows) + +-- Disable hash aggregation to get a deterministic test output +SET enable_hashagg = OFF; +-- min/max queries +:PREFIX +SELECT max(time) +FROM :TEST_TABLE; +QUERY PLAN + Finalize Aggregate (actual rows=1 loops=1) + -> Append (actual rows=9 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10794 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) +(29 rows) + +:PREFIX +SELECT min(time) +FROM :TEST_TABLE; +QUERY PLAN + Finalize Aggregate (actual rows=1 loops=1) + -> Append (actual rows=9 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10794 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) +(29 rows) + +-- test first/last (doesn't use ordered append yet) +:PREFIX +SELECT first(time, time) +FROM :TEST_TABLE; +QUERY PLAN + Finalize Aggregate (actual rows=1 loops=1) + -> Append (actual rows=9 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10794 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) +(29 rows) + +:PREFIX +SELECT last(time, time) +FROM :TEST_TABLE; +QUERY PLAN + Finalize Aggregate (actual rows=1 loops=1) + -> Append (actual rows=9 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10794 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) +(29 rows) + +-- test query with time_bucket +:PREFIX +SELECT time_bucket('1d', time) +FROM :TEST_TABLE +ORDER BY time ASC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_X_X_chunk."time" + Sort Method: top-N heapsort + -> Result (actual rows=68370 loops=1) + -> Append (actual rows=68370 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10794 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) +(24 rows) + +-- test query with ORDER BY time_bucket +:PREFIX +SELECT time_bucket('1d', time) +FROM :TEST_TABLE +ORDER BY 1 +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort + -> Result (actual rows=68370 loops=1) + -> Append (actual rows=68370 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10794 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) +(24 rows) + +-- test query with ORDER BY time_bucket, device_id +-- must not use ordered append +:PREFIX +SELECT time_bucket('1d', time), + device_id, + v0 +FROM :TEST_TABLE +WHERE device_id IN (1, 2) +ORDER BY time_bucket('1d', time), + device_id +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=27348 loops=1) + -> Append (actual rows=27348 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 8 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 12 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 12 +(27 rows) + +-- test query with ORDER BY date_trunc +:PREFIX +SELECT time_bucket('1d', time) +FROM :TEST_TABLE +ORDER BY date_trunc('day', time) +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort + -> Result (actual rows=68370 loops=1) + -> Append (actual rows=68370 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10794 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) +(24 rows) + +-- test query with ORDER BY date_trunc +:PREFIX +SELECT date_trunc('day', time) +FROM :TEST_TABLE +ORDER BY 1 +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort + -> Result (actual rows=68370 loops=1) + -> Append (actual rows=68370 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10794 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) +(24 rows) + +-- test query with ORDER BY date_trunc, device_id +-- must not use ordered append +:PREFIX +SELECT date_trunc('day', time), + device_id, + v0 +FROM :TEST_TABLE +WHERE device_id IN (1, 2) +ORDER BY 1, + 2 +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=27348 loops=1) + -> Append (actual rows=27348 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 8 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 12 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 12 +(27 rows) + +-- test query with now() should result in ordered ChunkAppend +:PREFIX +SELECT time +FROM :TEST_TABLE +WHERE time < now() + '1 month' +ORDER BY time DESC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: metrics_space_compressed."time" DESC + Sort Method: top-N heapsort + -> Custom Scan (ChunkAppend) on metrics_space_compressed (actual rows=68370 loops=1) + -> Merge Append (actual rows=25190 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + Filter: ("time" < (now() + '@ 1 mon'::interval)) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) + Filter: ("time" < (now() + '@ 1 mon'::interval)) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + Filter: ("time" < (now() + '@ 1 mon'::interval)) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Merge Append (actual rows=25190 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + Filter: ("time" < (now() + '@ 1 mon'::interval)) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) + Filter: ("time" < (now() + '@ 1 mon'::interval)) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + Filter: ("time" < (now() + '@ 1 mon'::interval)) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Merge Append (actual rows=17990 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + Filter: ("time" < (now() + '@ 1 mon'::interval)) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10794 loops=1) + Filter: ("time" < (now() + '@ 1 mon'::interval)) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + Filter: ("time" < (now() + '@ 1 mon'::interval)) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) +(35 rows) + +-- test CTE +:PREFIX WITH i AS ( + SELECT time + FROM :TEST_TABLE + WHERE time < now() + ORDER BY time DESC + LIMIT 100 +) +SELECT * +FROM i; +QUERY PLAN + Limit (actual rows=100 loops=1) + -> Sort (actual rows=100 loops=1) + Sort Key: metrics_space_compressed."time" DESC + Sort Method: top-N heapsort + -> Custom Scan (ChunkAppend) on metrics_space_compressed (actual rows=68370 loops=1) + -> Merge Append (actual rows=25190 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + Filter: ("time" < now()) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) + Filter: ("time" < now()) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + Filter: ("time" < now()) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Merge Append (actual rows=25190 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + Filter: ("time" < now()) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) + Filter: ("time" < now()) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + Filter: ("time" < now()) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Merge Append (actual rows=17990 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + Filter: ("time" < now()) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10794 loops=1) + Filter: ("time" < now()) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + Filter: ("time" < now()) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) +(35 rows) + +-- test CTE +-- no chunk exclusion for CTE because cte query is not pulled up +:PREFIX WITH cte AS ( + SELECT time + FROM :TEST_TABLE + WHERE device_id = 1 + ORDER BY time +) +SELECT * +FROM cte +WHERE time < '2000-02-01'::timestamptz; +QUERY PLAN + Custom Scan (ChunkAppend) on metrics_space_compressed (actual rows=13674 loops=1) + Order: metrics_space_compressed."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + Vectorized Filter: ("time" < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (actual rows=4 loops=1) + Index Cond: (device_id = 1) + Filter: (_ts_meta_min_1 < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + Vectorized Filter: ("time" < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Index Cond: (device_id = 1) + Filter: (_ts_meta_min_1 < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + Vectorized Filter: ("time" < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Index Cond: (device_id = 1) + Filter: (_ts_meta_min_1 < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone) +(17 rows) + +-- test subquery +-- not ChunkAppend so no chunk exclusion +:PREFIX +SELECT time +FROM :TEST_TABLE +WHERE time = ( + SELECT max(time) + FROM :TEST_TABLE) +ORDER BY time; +QUERY PLAN + Custom Scan (ChunkAppend) on metrics_space_compressed (actual rows=5 loops=1) + InitPlan 1 (returns $0) + -> Finalize Aggregate (actual rows=1 loops=1) + -> Append (actual rows=9 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk _hyper_X_X_chunk_1 (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=4 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk _hyper_X_X_chunk_1 (actual rows=10794 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=12 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk _hyper_X_X_chunk_1 (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=4 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk _hyper_X_X_chunk_1 (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=6 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk _hyper_X_X_chunk_1 (actual rows=15114 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=18 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk _hyper_X_X_chunk_1 (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=6 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk _hyper_X_X_chunk_1 (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=6 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk _hyper_X_X_chunk_1 (actual rows=15114 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=18 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk _hyper_X_X_chunk_1 (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=6 loops=1) + -> Merge Append (actual rows=0 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=0 loops=1) + Filter: ("time" = $0) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=0 loops=1) + Filter: ((_ts_meta_min_1 <= $0) AND (_ts_meta_max_1 >= $0)) + Rows Removed by Filter: 4 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=0 loops=1) + Filter: ("time" = $0) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=0 loops=1) + Filter: ((_ts_meta_min_1 <= $0) AND (_ts_meta_max_1 >= $0)) + Rows Removed by Filter: 12 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=0 loops=1) + Filter: ("time" = $0) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=0 loops=1) + Filter: ((_ts_meta_min_1 <= $0) AND (_ts_meta_max_1 >= $0)) + Rows Removed by Filter: 4 + -> Merge Append (actual rows=0 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=0 loops=1) + Filter: ("time" = $0) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=0 loops=1) + Filter: ((_ts_meta_min_1 <= $0) AND (_ts_meta_max_1 >= $0)) + Rows Removed by Filter: 6 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=0 loops=1) + Filter: ("time" = $0) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=0 loops=1) + Filter: ((_ts_meta_min_1 <= $0) AND (_ts_meta_max_1 >= $0)) + Rows Removed by Filter: 18 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=0 loops=1) + Filter: ("time" = $0) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=0 loops=1) + Filter: ((_ts_meta_min_1 <= $0) AND (_ts_meta_max_1 >= $0)) + Rows Removed by Filter: 6 + -> Merge Append (actual rows=5 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=1 loops=1) + Filter: ("time" = $0) + Rows Removed by Filter: 999 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=1 loops=1) + Filter: ((_ts_meta_min_1 <= $0) AND (_ts_meta_max_1 >= $0)) + Rows Removed by Filter: 5 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3 loops=1) + Filter: ("time" = $0) + Rows Removed by Filter: 2997 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=3 loops=1) + Filter: ((_ts_meta_min_1 <= $0) AND (_ts_meta_max_1 >= $0)) + Rows Removed by Filter: 15 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=1 loops=1) + Filter: ("time" = $0) + Rows Removed by Filter: 999 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=1 loops=1) + Filter: ((_ts_meta_min_1 <= $0) AND (_ts_meta_max_1 >= $0)) + Rows Removed by Filter: 5 +(82 rows) + +-- test ordered append with limit expression +:PREFIX +SELECT time +FROM :TEST_TABLE +ORDER BY time +LIMIT ( + SELECT length('four')); +QUERY PLAN + Limit (actual rows=4 loops=1) + InitPlan 1 (returns $0) + -> Result (actual rows=1 loops=1) + -> Sort (actual rows=4 loops=1) + Sort Key: _hyper_X_X_chunk."time" + Sort Method: top-N heapsort + -> Append (actual rows=68370 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10794 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) +(25 rows) + +-- test with ordered guc disabled +SET timescaledb.enable_ordered_append TO OFF; +:PREFIX +SELECT time +FROM :TEST_TABLE +ORDER BY time +LIMIT 3; +QUERY PLAN + Limit (actual rows=3 loops=1) + -> Sort (actual rows=3 loops=1) + Sort Key: _hyper_X_X_chunk."time" + Sort Method: top-N heapsort + -> Append (actual rows=68370 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10794 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) +(23 rows) + +RESET timescaledb.enable_ordered_append; +:PREFIX +SELECT time +FROM :TEST_TABLE +ORDER BY time +LIMIT 3; +QUERY PLAN + Limit (actual rows=3 loops=1) + -> Sort (actual rows=3 loops=1) + Sort Key: _hyper_X_X_chunk."time" + Sort Method: top-N heapsort + -> Append (actual rows=68370 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10794 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) +(23 rows) + +-- test with chunk append disabled +SET timescaledb.enable_chunk_append TO OFF; +:PREFIX +SELECT time +FROM :TEST_TABLE +ORDER BY time +LIMIT 3; +QUERY PLAN + Limit (actual rows=3 loops=1) + -> Sort (actual rows=3 loops=1) + Sort Key: _hyper_X_X_chunk."time" + Sort Method: top-N heapsort + -> Append (actual rows=68370 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10794 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) +(23 rows) + +RESET timescaledb.enable_chunk_append; +:PREFIX +SELECT time +FROM :TEST_TABLE +ORDER BY time +LIMIT 3; +QUERY PLAN + Limit (actual rows=3 loops=1) + -> Sort (actual rows=3 loops=1) + Sort Key: _hyper_X_X_chunk."time" + Sort Method: top-N heapsort + -> Append (actual rows=68370 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10794 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) +(23 rows) + +-- get results for all the queries +-- run queries on uncompressed hypertable and store result +\set PREFIX '' +\set PREFIX_VERBOSE '' +\set ECHO none diff --git a/tsl/test/shared/expected/ordered_append_join-16.out b/tsl/test/shared/expected/ordered_append_join-16.out new file mode 100644 index 00000000000..9be09321a9d --- /dev/null +++ b/tsl/test/shared/expected/ordered_append_join-16.out @@ -0,0 +1,3899 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +SET timescaledb.enable_now_constify TO FALSE; +SELECT + format('include/%s.sql', :'TEST_BASE_NAME') as "TEST_QUERY_NAME", + format('%s/shared/results/%s_results_uncompressed.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') as "TEST_RESULTS_UNCOMPRESSED", + format('%s/shared/results/%s_results_compressed.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') as "TEST_RESULTS_COMPRESSED" +\gset +SELECT format('\! diff -u --label "Uncompressed results" --label "Compressed results" %s %s', :'TEST_RESULTS_UNCOMPRESSED', :'TEST_RESULTS_COMPRESSED') as "DIFF_CMD" +\gset +-- get EXPLAIN output for all variations +\set PREFIX 'EXPLAIN (analyze, costs off, timing off, summary off)' +\set PREFIX_VERBOSE 'EXPLAIN (analyze, costs off, timing off, summary off, verbose)' +set work_mem to '64MB'; +set max_parallel_workers_per_gather to 0; +\set TEST_TABLE 'metrics' +\ir :TEST_QUERY_NAME +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- In the following test cases, we test that certain indexes are used. By using the +-- timescaledb.enable_decompression_sorted_merge optimization, we are pushing a sort node +-- below the DecompressChunk node, which operates on the batches. This could lead to flaky +-- tests because the input data is small and PostgreSQL switches from IndexScans to +-- SequentialScans. Disable the optimization for the following tests to ensure we have +-- stable query plans in all CI environments. +SET timescaledb.enable_decompression_sorted_merge = 0; +-- test LATERAL with ordered append in the outer query +:PREFIX +SELECT time, + pg_typeof(l) +FROM :TEST_TABLE, + LATERAL ( + SELECT * + FROM ( + VALUES (1), + (2)) v) l +ORDER BY time DESC +LIMIT 2; +QUERY PLAN + Limit (actual rows=2 loops=1) + -> Nested Loop (actual rows=2 loops=1) + -> Custom Scan (ChunkAppend) on metrics (actual rows=1 loops=1) + Order: metrics."time" DESC + -> Index Only Scan using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Materialize (actual rows=2 loops=1) + -> Values Scan on "*VALUES*" (actual rows=2 loops=1) +(12 rows) + +-- test LATERAL with ordered append in the lateral query +:PREFIX +SELECT time, + pg_typeof(v) +FROM ( + VALUES (1), + (2)) v, + LATERAL ( + SELECT * + FROM :TEST_TABLE + ORDER BY time DESC + LIMIT 2) l; +QUERY PLAN + Nested Loop (actual rows=4 loops=1) + -> Values Scan on "*VALUES*" (actual rows=2 loops=1) + -> Materialize (actual rows=2 loops=2) + -> Subquery Scan on l (actual rows=2 loops=1) + -> Limit (actual rows=2 loops=1) + -> Result (actual rows=2 loops=1) + -> Custom Scan (ChunkAppend) on metrics (actual rows=2 loops=1) + Order: metrics."time" DESC + -> Index Only Scan using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (actual rows=2 loops=1) + Heap Fetches: 2 + -> Index Only Scan using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 +(14 rows) + +-- test plan with best index is chosen +-- this should use device_id, time index +:PREFIX +SELECT time, + device_id +FROM :TEST_TABLE +WHERE device_id = 1 +ORDER BY time DESC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics (actual rows=1 loops=1) + Order: metrics."time" DESC + -> Index Only Scan using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = 1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) + Heap Fetches: 0 +(12 rows) + +-- test plan with best index is chosen +-- this should use time index +:PREFIX +SELECT time +FROM :TEST_TABLE +ORDER BY time DESC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics (actual rows=1 loops=1) + Order: metrics."time" DESC + -> Index Only Scan using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 +(9 rows) + +-- test LATERAL with correlated query +-- only last chunk should be executed +:PREFIX +SELECT g.time, + l.time +FROM generate_series('2000-01-01'::timestamptz, '2000-01-03', '1d') AS g (time) + LEFT OUTER JOIN LATERAL ( + SELECT * + FROM :TEST_TABLE o + WHERE o.time >= g.time + AND o.time < g.time + '1d'::interval + ORDER BY time DESC + LIMIT 1) l ON TRUE; +QUERY PLAN + Nested Loop Left Join (actual rows=3 loops=1) + -> Function Scan on generate_series g (actual rows=3 loops=1) + -> Limit (actual rows=1 loops=3) + -> Result (actual rows=1 loops=3) + -> Custom Scan (ChunkAppend) on metrics o (actual rows=1 loops=3) + Order: o."time" DESC + Chunks excluded during startup: 0 + Chunks excluded during runtime: 2 + -> Index Only Scan using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk o_1 (never executed) + Index Cond: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval))) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk o_2 (never executed) + Index Cond: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval))) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk o_3 (actual rows=1 loops=3) + Index Cond: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval))) + Heap Fetches: 3 +(17 rows) + +-- test LATERAL with correlated query +-- only 2nd chunk should be executed +:PREFIX +SELECT g.time, + l.time +FROM generate_series('2000-01-10'::timestamptz, '2000-01-11', '1d') AS g (time) + LEFT OUTER JOIN LATERAL ( + SELECT * + FROM :TEST_TABLE o + WHERE o.time >= g.time + AND o.time < g.time + '1d'::interval + ORDER BY time + LIMIT 1) l ON TRUE; +QUERY PLAN + Nested Loop Left Join (actual rows=2 loops=1) + -> Function Scan on generate_series g (actual rows=2 loops=1) + -> Limit (actual rows=1 loops=2) + -> Result (actual rows=1 loops=2) + -> Custom Scan (ChunkAppend) on metrics o (actual rows=1 loops=2) + Order: o."time" + Chunks excluded during startup: 0 + Chunks excluded during runtime: 2 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk o_1 (never executed) + Index Cond: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval))) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk o_2 (actual rows=1 loops=2) + Index Cond: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval))) + Heap Fetches: 2 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk o_3 (never executed) + Index Cond: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval))) + Heap Fetches: 0 +(17 rows) + +-- test startup and runtime exclusion together +:PREFIX +SELECT g.time, + l.time +FROM generate_series('2000-01-01'::timestamptz, '2000-01-03', '1d') AS g (time) + LEFT OUTER JOIN LATERAL ( + SELECT * + FROM :TEST_TABLE o + WHERE o.time >= g.time + AND o.time < g.time + '1d'::interval + AND o.time < now() + ORDER BY time DESC + LIMIT 1) l ON TRUE; +QUERY PLAN + Nested Loop Left Join (actual rows=3 loops=1) + -> Function Scan on generate_series g (actual rows=3 loops=1) + -> Limit (actual rows=1 loops=3) + -> Result (actual rows=1 loops=3) + -> Custom Scan (ChunkAppend) on metrics o (actual rows=1 loops=3) + Order: o."time" DESC + Chunks excluded during startup: 0 + Chunks excluded during runtime: 2 + -> Index Only Scan using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk o_1 (never executed) + Index Cond: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval)) AND ("time" < now())) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk o_2 (never executed) + Index Cond: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval)) AND ("time" < now())) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk o_3 (actual rows=1 loops=3) + Index Cond: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval)) AND ("time" < now())) + Heap Fetches: 3 +(17 rows) + +-- test startup and runtime exclusion together +-- all chunks should be filtered +:PREFIX +SELECT g.time, + l.time +FROM generate_series('2000-01-01'::timestamptz, '2000-01-03', '1d') AS g (time) + LEFT OUTER JOIN LATERAL ( + SELECT * + FROM :TEST_TABLE o + WHERE o.time >= g.time + AND o.time < g.time + '1d'::interval + AND o.time > now() + ORDER BY time DESC + LIMIT 1) l ON TRUE; +QUERY PLAN + Nested Loop Left Join (actual rows=3 loops=1) + -> Function Scan on generate_series g (actual rows=3 loops=1) + -> Limit (actual rows=0 loops=3) + -> Result (actual rows=0 loops=3) + -> Custom Scan (ChunkAppend) on metrics o (actual rows=0 loops=3) + Order: o."time" DESC + Chunks excluded during startup: 3 +(7 rows) + +-- test JOIN +-- no exclusion on joined table because quals are not propagated yet +-- With PG 14 on i368, this query uses a nested loop join. Disable the nested loop join to get the same query plan in all tests +SET enable_nestloop TO off; +:PREFIX +SELECT o1.time, + o2.time +FROM :TEST_TABLE o1 + INNER JOIN :TEST_TABLE o2 ON o1.time = o2.time +WHERE o1.time < '2000-02-01' + AND o1.device_id = 1 + AND o2.device_id = 2 +ORDER BY o1.time; +QUERY PLAN + Merge Join (actual rows=13674 loops=1) + Merge Cond: (o1."time" = o2."time") + -> Custom Scan (ChunkAppend) on metrics o1 (actual rows=13674 loops=1) + Order: o1."time" + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o1_1 (actual rows=3598 loops=1) + Index Cond: ((device_id = 1) AND ("time" < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone)) + Heap Fetches: 3598 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o1_2 (actual rows=5038 loops=1) + Index Cond: ((device_id = 1) AND ("time" < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone)) + Heap Fetches: 5038 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o1_3 (actual rows=5038 loops=1) + Index Cond: ((device_id = 1) AND ("time" < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone)) + Heap Fetches: 5038 + -> Materialize (actual rows=13674 loops=1) + -> Custom Scan (ChunkAppend) on metrics o2 (actual rows=13674 loops=1) + Order: o2."time" + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o2_1 (actual rows=3598 loops=1) + Index Cond: ((device_id = 2) AND ("time" < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone)) + Heap Fetches: 3598 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o2_2 (actual rows=5038 loops=1) + Index Cond: ((device_id = 2) AND ("time" < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone)) + Heap Fetches: 5038 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o2_3 (actual rows=5038 loops=1) + Index Cond: ((device_id = 2) AND ("time" < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone)) + Heap Fetches: 5038 +(25 rows) + +RESET enable_nestloop; +-- test JOIN +-- last chunk of o2 should not be executed +:PREFIX +SELECT o1.time, + o2.time +FROM :TEST_TABLE o1 + INNER JOIN ( + SELECT * + FROM :TEST_TABLE o2 + ORDER BY time) o2 ON o1.time = o2.time +WHERE o1.time < '2000-01-08' +ORDER BY o1.time +LIMIT 10; +QUERY PLAN + Limit (actual rows=10 loops=1) + -> Merge Join (actual rows=10 loops=1) + Merge Cond: (o1."time" = o2."time") + -> Custom Scan (ChunkAppend) on metrics o1 (actual rows=2 loops=1) + Order: o1."time" + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk o1_1 (actual rows=2 loops=1) + Index Cond: ("time" < 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 2 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk o1_2 (never executed) + Index Cond: ("time" < 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 0 + -> Materialize (actual rows=10 loops=1) + -> Result (actual rows=6 loops=1) + -> Custom Scan (ChunkAppend) on metrics o2 (actual rows=6 loops=1) + Order: o2."time" + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk o2_1 (actual rows=6 loops=1) + Heap Fetches: 6 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk o2_2 (never executed) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk o2_3 (never executed) + Heap Fetches: 0 +(21 rows) + +-- test join against max query +-- not ChunkAppend so no chunk exclusion +SET enable_hashjoin = FALSE; +SET enable_nestloop = FALSE; +SET enable_hashagg = FALSE; +:PREFIX +SELECT o1.time, + o2.* +FROM :TEST_TABLE o1 + INNER JOIN ( + SELECT max(time) AS max_time + FROM :TEST_TABLE) o2 ON o1.time = o2.max_time +WHERE o1.device_id = 1 +ORDER BY time; +QUERY PLAN + Merge Join (actual rows=1 loops=1) + Merge Cond: (o1."time" = ($0)) + -> Custom Scan (ChunkAppend) on metrics o1 (actual rows=13674 loops=1) + Order: o1."time" + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o1_1 (actual rows=3598 loops=1) + Index Cond: (device_id = 1) + Heap Fetches: 3598 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o1_2 (actual rows=5038 loops=1) + Index Cond: (device_id = 1) + Heap Fetches: 5038 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o1_3 (actual rows=5038 loops=1) + Index Cond: (device_id = 1) + Heap Fetches: 5038 + -> Sort (actual rows=1 loops=1) + Sort Key: ($0) + Sort Method: quicksort + -> Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics (actual rows=1 loops=1) + Order: metrics."time" DESC + -> Index Only Scan using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 +(30 rows) + +RESET enable_hashjoin; +RESET enable_nestloop; +RESET enable_hashagg; +SET enable_seqscan TO false; +-- test JOIN on time column +-- should use 2 ChunkAppend +:PREFIX +SELECT o1.time +FROM :TEST_TABLE o1 + INNER JOIN :TEST_TABLE o2 ON o1.time = o2.time +WHERE o1.device_id = 1 + AND o2.device_id = 2 +ORDER BY o1.time +LIMIT 100; +QUERY PLAN + Limit (actual rows=100 loops=1) + -> Merge Join (actual rows=100 loops=1) + Merge Cond: (o1."time" = o2."time") + -> Custom Scan (ChunkAppend) on metrics o1 (actual rows=100 loops=1) + Order: o1."time" + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o1_1 (actual rows=100 loops=1) + Index Cond: (device_id = 1) + Heap Fetches: 100 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o1_2 (never executed) + Index Cond: (device_id = 1) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o1_3 (never executed) + Index Cond: (device_id = 1) + Heap Fetches: 0 + -> Materialize (actual rows=100 loops=1) + -> Custom Scan (ChunkAppend) on metrics o2 (actual rows=100 loops=1) + Order: o2."time" + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o2_1 (actual rows=100 loops=1) + Index Cond: (device_id = 2) + Heap Fetches: 100 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o2_2 (never executed) + Index Cond: (device_id = 2) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o2_3 (never executed) + Index Cond: (device_id = 2) + Heap Fetches: 0 +(26 rows) + +-- test JOIN on time column with USING +-- should use 2 ChunkAppend +:PREFIX +SELECT o1.time +FROM :TEST_TABLE o1 + INNER JOIN :TEST_TABLE o2 USING (time) +WHERE o1.device_id = 1 + AND o2.device_id = 2 +ORDER BY o1.time +LIMIT 100; +QUERY PLAN + Limit (actual rows=100 loops=1) + -> Merge Join (actual rows=100 loops=1) + Merge Cond: (o1."time" = o2."time") + -> Custom Scan (ChunkAppend) on metrics o1 (actual rows=100 loops=1) + Order: o1."time" + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o1_1 (actual rows=100 loops=1) + Index Cond: (device_id = 1) + Heap Fetches: 100 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o1_2 (never executed) + Index Cond: (device_id = 1) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o1_3 (never executed) + Index Cond: (device_id = 1) + Heap Fetches: 0 + -> Materialize (actual rows=100 loops=1) + -> Custom Scan (ChunkAppend) on metrics o2 (actual rows=100 loops=1) + Order: o2."time" + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o2_1 (actual rows=100 loops=1) + Index Cond: (device_id = 2) + Heap Fetches: 100 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o2_2 (never executed) + Index Cond: (device_id = 2) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o2_3 (never executed) + Index Cond: (device_id = 2) + Heap Fetches: 0 +(26 rows) + +-- test NATURAL JOIN on time column +-- should use 2 ChunkAppend +:PREFIX +SELECT o1.time +FROM :TEST_TABLE o1 + NATURAL INNER JOIN :TEST_TABLE o2 +WHERE o1.device_id = 1 + AND o2.device_id = 2 +ORDER BY o1.time +LIMIT 100; +QUERY PLAN + Limit (actual rows=0 loops=1) + -> Sort (actual rows=0 loops=1) + Sort Key: o1."time" + Sort Method: quicksort + -> Result (actual rows=0 loops=1) + One-Time Filter: false +(6 rows) + +-- test LEFT JOIN on time column +-- should use 2 ChunkAppend +:PREFIX +SELECT o1.time +FROM :TEST_TABLE o1 + LEFT JOIN :TEST_TABLE o2 ON o1.time = o2.time +WHERE o1.device_id = 1 + AND o2.device_id = 2 +ORDER BY o1.time +LIMIT 100; +QUERY PLAN + Limit (actual rows=100 loops=1) + -> Merge Join (actual rows=100 loops=1) + Merge Cond: (o1."time" = o2."time") + -> Custom Scan (ChunkAppend) on metrics o1 (actual rows=100 loops=1) + Order: o1."time" + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o1_1 (actual rows=100 loops=1) + Index Cond: (device_id = 1) + Heap Fetches: 100 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o1_2 (never executed) + Index Cond: (device_id = 1) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o1_3 (never executed) + Index Cond: (device_id = 1) + Heap Fetches: 0 + -> Materialize (actual rows=100 loops=1) + -> Custom Scan (ChunkAppend) on metrics o2 (actual rows=100 loops=1) + Order: o2."time" + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o2_1 (actual rows=100 loops=1) + Index Cond: (device_id = 2) + Heap Fetches: 100 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o2_2 (never executed) + Index Cond: (device_id = 2) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o2_3 (never executed) + Index Cond: (device_id = 2) + Heap Fetches: 0 +(26 rows) + +-- test RIGHT JOIN on time column +-- should use 2 ChunkAppend +:PREFIX +SELECT o1.time +FROM :TEST_TABLE o1 + RIGHT JOIN :TEST_TABLE o2 ON o1.time = o2.time +WHERE o1.device_id = 1 + AND o2.device_id = 2 +ORDER BY o2.time +LIMIT 100; +QUERY PLAN + Limit (actual rows=100 loops=1) + -> Merge Join (actual rows=100 loops=1) + Merge Cond: (o1."time" = o2."time") + -> Custom Scan (ChunkAppend) on metrics o1 (actual rows=100 loops=1) + Order: o1."time" + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o1_1 (actual rows=100 loops=1) + Index Cond: (device_id = 1) + Heap Fetches: 100 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o1_2 (never executed) + Index Cond: (device_id = 1) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o1_3 (never executed) + Index Cond: (device_id = 1) + Heap Fetches: 0 + -> Materialize (actual rows=100 loops=1) + -> Custom Scan (ChunkAppend) on metrics o2 (actual rows=100 loops=1) + Order: o2."time" + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o2_1 (actual rows=100 loops=1) + Index Cond: (device_id = 2) + Heap Fetches: 100 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o2_2 (never executed) + Index Cond: (device_id = 2) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o2_3 (never executed) + Index Cond: (device_id = 2) + Heap Fetches: 0 +(26 rows) + +-- test JOIN on time column with ON clause expression order switched +-- should use 2 ChunkAppend +:PREFIX +SELECT o1.time +FROM :TEST_TABLE o1 + INNER JOIN :TEST_TABLE o2 ON o2.time = o1.time +WHERE o1.device_id = 1 + AND o2.device_id = 2 +ORDER BY o1.time +LIMIT 100; +QUERY PLAN + Limit (actual rows=100 loops=1) + -> Merge Join (actual rows=100 loops=1) + Merge Cond: (o1."time" = o2."time") + -> Custom Scan (ChunkAppend) on metrics o1 (actual rows=100 loops=1) + Order: o1."time" + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o1_1 (actual rows=100 loops=1) + Index Cond: (device_id = 1) + Heap Fetches: 100 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o1_2 (never executed) + Index Cond: (device_id = 1) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o1_3 (never executed) + Index Cond: (device_id = 1) + Heap Fetches: 0 + -> Materialize (actual rows=100 loops=1) + -> Custom Scan (ChunkAppend) on metrics o2 (actual rows=100 loops=1) + Order: o2."time" + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o2_1 (actual rows=100 loops=1) + Index Cond: (device_id = 2) + Heap Fetches: 100 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o2_2 (never executed) + Index Cond: (device_id = 2) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o2_3 (never executed) + Index Cond: (device_id = 2) + Heap Fetches: 0 +(26 rows) + +-- test JOIN on time column with equality condition in WHERE clause +-- should use 2 ChunkAppend +:PREFIX +SELECT o1.time +FROM :TEST_TABLE o1 + INNER JOIN :TEST_TABLE o2 ON TRUE +WHERE o1.time = o2.time + AND o1.device_id = 1 + AND o2.device_id = 2 +ORDER BY o1.time +LIMIT 100; +QUERY PLAN + Limit (actual rows=100 loops=1) + -> Merge Join (actual rows=100 loops=1) + Merge Cond: (o1."time" = o2."time") + -> Custom Scan (ChunkAppend) on metrics o1 (actual rows=100 loops=1) + Order: o1."time" + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o1_1 (actual rows=100 loops=1) + Index Cond: (device_id = 1) + Heap Fetches: 100 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o1_2 (never executed) + Index Cond: (device_id = 1) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o1_3 (never executed) + Index Cond: (device_id = 1) + Heap Fetches: 0 + -> Materialize (actual rows=100 loops=1) + -> Custom Scan (ChunkAppend) on metrics o2 (actual rows=100 loops=1) + Order: o2."time" + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o2_1 (actual rows=100 loops=1) + Index Cond: (device_id = 2) + Heap Fetches: 100 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o2_2 (never executed) + Index Cond: (device_id = 2) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o2_3 (never executed) + Index Cond: (device_id = 2) + Heap Fetches: 0 +(26 rows) + +-- test JOIN on time column with ORDER BY 2nd hypertable +-- should use 2 ChunkAppend +:PREFIX +SELECT o1.time +FROM :TEST_TABLE o1 + INNER JOIN :TEST_TABLE o2 ON o1.time = o2.time +WHERE o1.device_id = 1 + AND o2.device_id = 2 +ORDER BY o2.time +LIMIT 100; +QUERY PLAN + Limit (actual rows=100 loops=1) + -> Merge Join (actual rows=100 loops=1) + Merge Cond: (o1."time" = o2."time") + -> Custom Scan (ChunkAppend) on metrics o1 (actual rows=100 loops=1) + Order: o1."time" + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o1_1 (actual rows=100 loops=1) + Index Cond: (device_id = 1) + Heap Fetches: 100 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o1_2 (never executed) + Index Cond: (device_id = 1) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o1_3 (never executed) + Index Cond: (device_id = 1) + Heap Fetches: 0 + -> Materialize (actual rows=100 loops=1) + -> Custom Scan (ChunkAppend) on metrics o2 (actual rows=100 loops=1) + Order: o2."time" + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o2_1 (actual rows=100 loops=1) + Index Cond: (device_id = 2) + Heap Fetches: 100 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o2_2 (never executed) + Index Cond: (device_id = 2) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o2_3 (never executed) + Index Cond: (device_id = 2) + Heap Fetches: 0 +(26 rows) + +-- test JOIN on time column and device_id +-- should use 2 ChunkAppend +:PREFIX +SELECT o1.time +FROM :TEST_TABLE o1 + INNER JOIN :TEST_TABLE o2 ON o1.device_id = o2.device_id + AND o1.time = o2.time + ORDER BY o1.time + LIMIT 100; +QUERY PLAN + Limit (actual rows=100 loops=1) + -> Merge Join (actual rows=100 loops=1) + Merge Cond: (o1."time" = o2."time") + Join Filter: (o1.device_id = o2.device_id) + Rows Removed by Join Filter: 400 + -> Custom Scan (ChunkAppend) on metrics o1 (actual rows=100 loops=1) + Order: o1."time" + -> Index Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk o1_1 (actual rows=100 loops=1) + -> Index Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk o1_2 (never executed) + -> Index Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk o1_3 (never executed) + -> Materialize (actual rows=500 loops=1) + -> Custom Scan (ChunkAppend) on metrics o2 (actual rows=101 loops=1) + Order: o2."time" + -> Index Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk o2_1 (actual rows=101 loops=1) + -> Index Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk o2_2 (never executed) + -> Index Scan Backward using _hyper_X_X_chunk_metrics_time_idx on _hyper_X_X_chunk o2_3 (never executed) +(16 rows) + +-- test JOIN on device_id +-- should not use ordered append for 2nd hypertable +:PREFIX +SELECT o1.time +FROM :TEST_TABLE o1 + INNER JOIN :TEST_TABLE o2 ON o1.device_id = o2.device_id +WHERE o1.device_id = 1 +ORDER BY o1.time +LIMIT 100; +QUERY PLAN + Limit (actual rows=100 loops=1) + -> Nested Loop (actual rows=100 loops=1) + -> Custom Scan (ChunkAppend) on metrics o1 (actual rows=1 loops=1) + Order: o1."time" + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o1_1 (actual rows=1 loops=1) + Index Cond: (device_id = 1) + Heap Fetches: 1 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o1_2 (never executed) + Index Cond: (device_id = 1) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o1_3 (never executed) + Index Cond: (device_id = 1) + Heap Fetches: 0 + -> Materialize (actual rows=100 loops=1) + -> Append (actual rows=100 loops=1) + -> Index Only Scan using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o2_1 (actual rows=100 loops=1) + Index Cond: ((device_id = 1) AND (device_id = 1)) + Heap Fetches: 100 + -> Index Only Scan using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o2_2 (never executed) + Index Cond: ((device_id = 1) AND (device_id = 1)) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o2_3 (never executed) + Index Cond: ((device_id = 1) AND (device_id = 1)) + Heap Fetches: 0 +(24 rows) + +-- test JOIN on time column with implicit join +-- should use 2 ChunkAppend +:PREFIX +SELECT o1.time +FROM :TEST_TABLE o1, + :TEST_TABLE o2 +WHERE o1.time = o2.time + AND o1.device_id = 1 + AND o2.device_id = 2 +ORDER BY o1.time +LIMIT 100; +QUERY PLAN + Limit (actual rows=100 loops=1) + -> Merge Join (actual rows=100 loops=1) + Merge Cond: (o1."time" = o2."time") + -> Custom Scan (ChunkAppend) on metrics o1 (actual rows=100 loops=1) + Order: o1."time" + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o1_1 (actual rows=100 loops=1) + Index Cond: (device_id = 1) + Heap Fetches: 100 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o1_2 (never executed) + Index Cond: (device_id = 1) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o1_3 (never executed) + Index Cond: (device_id = 1) + Heap Fetches: 0 + -> Materialize (actual rows=100 loops=1) + -> Custom Scan (ChunkAppend) on metrics o2 (actual rows=100 loops=1) + Order: o2."time" + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o2_1 (actual rows=100 loops=1) + Index Cond: (device_id = 2) + Heap Fetches: 100 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o2_2 (never executed) + Index Cond: (device_id = 2) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o2_3 (never executed) + Index Cond: (device_id = 2) + Heap Fetches: 0 +(26 rows) + +-- test JOIN on time column with 3 hypertables +-- should use 3 ChunkAppend +:PREFIX +SELECT o1.time +FROM :TEST_TABLE o1 + INNER JOIN :TEST_TABLE o2 ON o1.time = o2.time + INNER JOIN :TEST_TABLE o3 ON o1.time = o3.time +WHERE o1.device_id = 1 + AND o2.device_id = 2 + AND o3.device_id = 3 +ORDER BY o1.time +LIMIT 100; +QUERY PLAN + Limit (actual rows=100 loops=1) + -> Merge Join (actual rows=100 loops=1) + Merge Cond: (o3."time" = o1."time") + -> Custom Scan (ChunkAppend) on metrics o3 (actual rows=100 loops=1) + Order: o3."time" + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o3_1 (actual rows=100 loops=1) + Index Cond: (device_id = 3) + Heap Fetches: 100 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o3_2 (never executed) + Index Cond: (device_id = 3) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o3_3 (never executed) + Index Cond: (device_id = 3) + Heap Fetches: 0 + -> Materialize (actual rows=100 loops=1) + -> Merge Join (actual rows=100 loops=1) + Merge Cond: (o1."time" = o2."time") + -> Custom Scan (ChunkAppend) on metrics o1 (actual rows=100 loops=1) + Order: o1."time" + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o1_1 (actual rows=100 loops=1) + Index Cond: (device_id = 1) + Heap Fetches: 100 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o1_2 (never executed) + Index Cond: (device_id = 1) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o1_3 (never executed) + Index Cond: (device_id = 1) + Heap Fetches: 0 + -> Materialize (actual rows=100 loops=1) + -> Custom Scan (ChunkAppend) on metrics o2 (actual rows=100 loops=1) + Order: o2."time" + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o2_1 (actual rows=100 loops=1) + Index Cond: (device_id = 2) + Heap Fetches: 100 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o2_2 (never executed) + Index Cond: (device_id = 2) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk o2_3 (never executed) + Index Cond: (device_id = 2) + Heap Fetches: 0 +(40 rows) + +RESET enable_seqscan; +\set TEST_TABLE 'metrics_space' +\ir :TEST_QUERY_NAME +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- In the following test cases, we test that certain indexes are used. By using the +-- timescaledb.enable_decompression_sorted_merge optimization, we are pushing a sort node +-- below the DecompressChunk node, which operates on the batches. This could lead to flaky +-- tests because the input data is small and PostgreSQL switches from IndexScans to +-- SequentialScans. Disable the optimization for the following tests to ensure we have +-- stable query plans in all CI environments. +SET timescaledb.enable_decompression_sorted_merge = 0; +-- test LATERAL with ordered append in the outer query +:PREFIX +SELECT time, + pg_typeof(l) +FROM :TEST_TABLE, + LATERAL ( + SELECT * + FROM ( + VALUES (1), + (2)) v) l +ORDER BY time DESC +LIMIT 2; +QUERY PLAN + Limit (actual rows=2 loops=1) + -> Nested Loop (actual rows=2 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=1 loops=1) + Order: metrics_space."time" DESC + -> Merge Append (actual rows=1 loops=1) + Sort Key: _hyper_X_X_chunk."time" DESC + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Merge Append (never executed) + Sort Key: _hyper_X_X_chunk."time" DESC + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Merge Append (never executed) + Sort Key: _hyper_X_X_chunk."time" DESC + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Materialize (actual rows=2 loops=1) + -> Values Scan on "*VALUES*" (actual rows=2 loops=1) +(30 rows) + +-- test LATERAL with ordered append in the lateral query +:PREFIX +SELECT time, + pg_typeof(v) +FROM ( + VALUES (1), + (2)) v, + LATERAL ( + SELECT * + FROM :TEST_TABLE + ORDER BY time DESC + LIMIT 2) l; +QUERY PLAN + Nested Loop (actual rows=4 loops=1) + -> Values Scan on "*VALUES*" (actual rows=2 loops=1) + -> Materialize (actual rows=2 loops=2) + -> Subquery Scan on l (actual rows=2 loops=1) + -> Limit (actual rows=2 loops=1) + -> Result (actual rows=2 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=2 loops=1) + Order: metrics_space."time" DESC + -> Merge Append (actual rows=2 loops=1) + Sort Key: _hyper_X_X_chunk."time" DESC + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=2 loops=1) + Heap Fetches: 2 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Merge Append (never executed) + Sort Key: _hyper_X_X_chunk."time" DESC + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Merge Append (never executed) + Sort Key: _hyper_X_X_chunk."time" DESC + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 +(32 rows) + +-- test plan with best index is chosen +-- this should use device_id, time index +:PREFIX +SELECT time, + device_id +FROM :TEST_TABLE +WHERE device_id = 1 +ORDER BY time DESC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=1 loops=1) + Order: metrics_space."time" DESC + -> Index Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Filter: (device_id = 1) + -> Index Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Filter: (device_id = 1) + -> Index Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Filter: (device_id = 1) +(9 rows) + +-- test plan with best index is chosen +-- this should use time index +:PREFIX +SELECT time +FROM :TEST_TABLE +ORDER BY time DESC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=1 loops=1) + Order: metrics_space."time" DESC + -> Merge Append (actual rows=1 loops=1) + Sort Key: _hyper_X_X_chunk."time" DESC + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Merge Append (never executed) + Sort Key: _hyper_X_X_chunk."time" DESC + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Merge Append (never executed) + Sort Key: _hyper_X_X_chunk."time" DESC + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Heap Fetches: 0 +(27 rows) + +-- test LATERAL with correlated query +-- only last chunk should be executed +:PREFIX +SELECT g.time, + l.time +FROM generate_series('2000-01-01'::timestamptz, '2000-01-03', '1d') AS g (time) + LEFT OUTER JOIN LATERAL ( + SELECT * + FROM :TEST_TABLE o + WHERE o.time >= g.time + AND o.time < g.time + '1d'::interval + ORDER BY time DESC + LIMIT 1) l ON TRUE; +QUERY PLAN + Nested Loop Left Join (actual rows=3 loops=1) + -> Function Scan on generate_series g (actual rows=3 loops=1) + -> Limit (actual rows=1 loops=3) + -> Result (actual rows=1 loops=3) + -> Custom Scan (ChunkAppend) on metrics_space o (actual rows=1 loops=3) + Order: o."time" DESC + -> Merge Append (actual rows=0 loops=3) + Sort Key: o_1."time" DESC + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o_1 (actual rows=0 loops=3) + Index Cond: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval))) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o_2 (actual rows=0 loops=3) + Index Cond: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval))) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o_3 (actual rows=0 loops=3) + Index Cond: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval))) + Heap Fetches: 0 + -> Merge Append (actual rows=0 loops=3) + Sort Key: o_4."time" DESC + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o_4 (actual rows=0 loops=3) + Index Cond: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval))) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o_5 (actual rows=0 loops=3) + Index Cond: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval))) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o_6 (actual rows=0 loops=3) + Index Cond: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval))) + Heap Fetches: 0 + -> Merge Append (actual rows=1 loops=3) + Sort Key: o_7."time" DESC + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o_7 (actual rows=1 loops=3) + Index Cond: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval))) + Heap Fetches: 3 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o_8 (actual rows=1 loops=3) + Index Cond: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval))) + Heap Fetches: 3 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o_9 (actual rows=1 loops=3) + Index Cond: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval))) + Heap Fetches: 3 +(39 rows) + +-- test LATERAL with correlated query +-- only 2nd chunk should be executed +:PREFIX +SELECT g.time, + l.time +FROM generate_series('2000-01-10'::timestamptz, '2000-01-11', '1d') AS g (time) + LEFT OUTER JOIN LATERAL ( + SELECT * + FROM :TEST_TABLE o + WHERE o.time >= g.time + AND o.time < g.time + '1d'::interval + ORDER BY time + LIMIT 1) l ON TRUE; +QUERY PLAN + Nested Loop Left Join (actual rows=2 loops=1) + -> Function Scan on generate_series g (actual rows=2 loops=1) + -> Limit (actual rows=1 loops=2) + -> Result (actual rows=1 loops=2) + -> Custom Scan (ChunkAppend) on metrics_space o (actual rows=1 loops=2) + Order: o."time" + -> Merge Append (actual rows=0 loops=2) + Sort Key: o_1."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o_1 (actual rows=0 loops=2) + Index Cond: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval))) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o_2 (actual rows=0 loops=2) + Index Cond: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval))) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o_3 (actual rows=0 loops=2) + Index Cond: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval))) + Heap Fetches: 0 + -> Merge Append (actual rows=1 loops=2) + Sort Key: o_4."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o_4 (actual rows=1 loops=2) + Index Cond: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval))) + Heap Fetches: 2 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o_5 (actual rows=1 loops=2) + Index Cond: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval))) + Heap Fetches: 2 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o_6 (actual rows=1 loops=2) + Index Cond: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval))) + Heap Fetches: 2 + -> Merge Append (never executed) + Sort Key: o_7."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o_7 (never executed) + Index Cond: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval))) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o_8 (never executed) + Index Cond: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval))) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o_9 (never executed) + Index Cond: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval))) + Heap Fetches: 0 +(39 rows) + +-- test startup and runtime exclusion together +:PREFIX +SELECT g.time, + l.time +FROM generate_series('2000-01-01'::timestamptz, '2000-01-03', '1d') AS g (time) + LEFT OUTER JOIN LATERAL ( + SELECT * + FROM :TEST_TABLE o + WHERE o.time >= g.time + AND o.time < g.time + '1d'::interval + AND o.time < now() + ORDER BY time DESC + LIMIT 1) l ON TRUE; +QUERY PLAN + Nested Loop Left Join (actual rows=3 loops=1) + -> Function Scan on generate_series g (actual rows=3 loops=1) + -> Limit (actual rows=1 loops=3) + -> Result (actual rows=1 loops=3) + -> Custom Scan (ChunkAppend) on metrics_space o (actual rows=1 loops=3) + Order: o."time" DESC + -> Merge Append (actual rows=0 loops=3) + Sort Key: o_1."time" DESC + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o_1 (actual rows=0 loops=3) + Index Cond: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval)) AND ("time" < now())) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o_2 (actual rows=0 loops=3) + Index Cond: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval)) AND ("time" < now())) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o_3 (actual rows=0 loops=3) + Index Cond: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval)) AND ("time" < now())) + Heap Fetches: 0 + -> Merge Append (actual rows=0 loops=3) + Sort Key: o_4."time" DESC + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o_4 (actual rows=0 loops=3) + Index Cond: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval)) AND ("time" < now())) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o_5 (actual rows=0 loops=3) + Index Cond: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval)) AND ("time" < now())) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o_6 (actual rows=0 loops=3) + Index Cond: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval)) AND ("time" < now())) + Heap Fetches: 0 + -> Merge Append (actual rows=1 loops=3) + Sort Key: o_7."time" DESC + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o_7 (actual rows=1 loops=3) + Index Cond: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval)) AND ("time" < now())) + Heap Fetches: 3 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o_8 (actual rows=1 loops=3) + Index Cond: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval)) AND ("time" < now())) + Heap Fetches: 3 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o_9 (actual rows=1 loops=3) + Index Cond: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval)) AND ("time" < now())) + Heap Fetches: 3 +(39 rows) + +-- test startup and runtime exclusion together +-- all chunks should be filtered +:PREFIX +SELECT g.time, + l.time +FROM generate_series('2000-01-01'::timestamptz, '2000-01-03', '1d') AS g (time) + LEFT OUTER JOIN LATERAL ( + SELECT * + FROM :TEST_TABLE o + WHERE o.time >= g.time + AND o.time < g.time + '1d'::interval + AND o.time > now() + ORDER BY time DESC + LIMIT 1) l ON TRUE; +QUERY PLAN + Nested Loop Left Join (actual rows=3 loops=1) + -> Function Scan on generate_series g (actual rows=3 loops=1) + -> Limit (actual rows=0 loops=3) + -> Result (actual rows=0 loops=3) + -> Custom Scan (ChunkAppend) on metrics_space o (actual rows=0 loops=3) + Order: o."time" DESC + -> Merge Append (actual rows=0 loops=3) + Sort Key: o_1."time" DESC + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o_1 (actual rows=0 loops=3) + Index Cond: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval)) AND ("time" > now())) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o_2 (actual rows=0 loops=3) + Index Cond: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval)) AND ("time" > now())) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o_3 (actual rows=0 loops=3) + Index Cond: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval)) AND ("time" > now())) + Heap Fetches: 0 + -> Merge Append (actual rows=0 loops=3) + Sort Key: o_4."time" DESC + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o_4 (actual rows=0 loops=3) + Index Cond: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval)) AND ("time" > now())) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o_5 (actual rows=0 loops=3) + Index Cond: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval)) AND ("time" > now())) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o_6 (actual rows=0 loops=3) + Index Cond: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval)) AND ("time" > now())) + Heap Fetches: 0 + -> Merge Append (actual rows=0 loops=3) + Sort Key: o_7."time" DESC + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o_7 (actual rows=0 loops=3) + Index Cond: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval)) AND ("time" > now())) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o_8 (actual rows=0 loops=3) + Index Cond: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval)) AND ("time" > now())) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o_9 (actual rows=0 loops=3) + Index Cond: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval)) AND ("time" > now())) + Heap Fetches: 0 +(39 rows) + +-- test JOIN +-- no exclusion on joined table because quals are not propagated yet +-- With PG 14 on i368, this query uses a nested loop join. Disable the nested loop join to get the same query plan in all tests +SET enable_nestloop TO off; +:PREFIX +SELECT o1.time, + o2.time +FROM :TEST_TABLE o1 + INNER JOIN :TEST_TABLE o2 ON o1.time = o2.time +WHERE o1.time < '2000-02-01' + AND o1.device_id = 1 + AND o2.device_id = 2 +ORDER BY o1.time; +QUERY PLAN + Merge Join (actual rows=13674 loops=1) + Merge Cond: (o1."time" = o2."time") + -> Custom Scan (ChunkAppend) on metrics_space o1 (actual rows=13674 loops=1) + Order: o1."time" + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_1 (actual rows=3598 loops=1) + Index Cond: ("time" < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone) + Filter: (device_id = 1) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_2 (actual rows=5038 loops=1) + Index Cond: ("time" < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone) + Filter: (device_id = 1) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_3 (actual rows=5038 loops=1) + Index Cond: ("time" < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone) + Filter: (device_id = 1) + -> Materialize (actual rows=13674 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space o2 (actual rows=13674 loops=1) + Order: o2."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk o2_1 (actual rows=3598 loops=1) + Index Cond: ((device_id = 2) AND ("time" < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone)) + Heap Fetches: 3598 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk o2_2 (actual rows=5038 loops=1) + Index Cond: ((device_id = 2) AND ("time" < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone)) + Heap Fetches: 5038 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk o2_3 (actual rows=5038 loops=1) + Index Cond: ((device_id = 2) AND ("time" < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone)) + Heap Fetches: 5038 +(25 rows) + +RESET enable_nestloop; +-- test JOIN +-- last chunk of o2 should not be executed +:PREFIX +SELECT o1.time, + o2.time +FROM :TEST_TABLE o1 + INNER JOIN ( + SELECT * + FROM :TEST_TABLE o2 + ORDER BY time) o2 ON o1.time = o2.time +WHERE o1.time < '2000-01-08' +ORDER BY o1.time +LIMIT 10; +QUERY PLAN + Limit (actual rows=10 loops=1) + -> Merge Join (actual rows=10 loops=1) + Merge Cond: (o1."time" = o2."time") + -> Custom Scan (ChunkAppend) on metrics_space o1 (actual rows=2 loops=1) + Order: o1."time" + -> Merge Append (actual rows=2 loops=1) + Sort Key: o1_1."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_1 (actual rows=2 loops=1) + Index Cond: ("time" < 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 2 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_2 (actual rows=1 loops=1) + Index Cond: ("time" < 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 1 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_3 (actual rows=1 loops=1) + Index Cond: ("time" < 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 1 + -> Merge Append (never executed) + Sort Key: o1_4."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_4 (never executed) + Index Cond: ("time" < 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_5 (never executed) + Index Cond: ("time" < 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_6 (never executed) + Index Cond: ("time" < 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Heap Fetches: 0 + -> Materialize (actual rows=10 loops=1) + -> Result (actual rows=6 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space o2 (actual rows=6 loops=1) + Order: o2."time" + -> Merge Append (actual rows=6 loops=1) + Sort Key: o2_1."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o2_1 (actual rows=2 loops=1) + Heap Fetches: 2 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o2_2 (actual rows=4 loops=1) + Heap Fetches: 4 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o2_3 (actual rows=2 loops=1) + Heap Fetches: 2 + -> Merge Append (never executed) + Sort Key: o2_4."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o2_4 (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o2_5 (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o2_6 (never executed) + Heap Fetches: 0 + -> Merge Append (never executed) + Sort Key: o2_7."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o2_7 (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o2_8 (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o2_9 (never executed) + Heap Fetches: 0 +(55 rows) + +-- test join against max query +-- not ChunkAppend so no chunk exclusion +SET enable_hashjoin = FALSE; +SET enable_nestloop = FALSE; +SET enable_hashagg = FALSE; +:PREFIX +SELECT o1.time, + o2.* +FROM :TEST_TABLE o1 + INNER JOIN ( + SELECT max(time) AS max_time + FROM :TEST_TABLE) o2 ON o1.time = o2.max_time +WHERE o1.device_id = 1 +ORDER BY time; +QUERY PLAN + Merge Join (actual rows=1 loops=1) + Merge Cond: (o1."time" = ($0)) + -> Custom Scan (ChunkAppend) on metrics_space o1 (actual rows=13674 loops=1) + Order: o1."time" + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_1 (actual rows=3598 loops=1) + Filter: (device_id = 1) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_2 (actual rows=5038 loops=1) + Filter: (device_id = 1) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_3 (actual rows=5038 loops=1) + Filter: (device_id = 1) + -> Sort (actual rows=1 loops=1) + Sort Key: ($0) + Sort Method: quicksort + -> Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=1 loops=1) + Order: metrics_space."time" DESC + -> Merge Append (actual rows=1 loops=1) + Sort Key: _hyper_X_X_chunk."time" DESC + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 1 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 1 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 1 + -> Merge Append (never executed) + Sort Key: _hyper_X_X_chunk."time" DESC + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 + -> Merge Append (never executed) + Sort Key: _hyper_X_X_chunk."time" DESC + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 + -> Index Only Scan Backward using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + Heap Fetches: 0 +(51 rows) + +RESET enable_hashjoin; +RESET enable_nestloop; +RESET enable_hashagg; +SET enable_seqscan TO false; +-- test JOIN on time column +-- should use 2 ChunkAppend +:PREFIX +SELECT o1.time +FROM :TEST_TABLE o1 + INNER JOIN :TEST_TABLE o2 ON o1.time = o2.time +WHERE o1.device_id = 1 + AND o2.device_id = 2 +ORDER BY o1.time +LIMIT 100; +QUERY PLAN + Limit (actual rows=100 loops=1) + -> Merge Join (actual rows=100 loops=1) + Merge Cond: (o1."time" = o2."time") + -> Custom Scan (ChunkAppend) on metrics_space o1 (actual rows=100 loops=1) + Order: o1."time" + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_1 (actual rows=100 loops=1) + Filter: (device_id = 1) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_2 (never executed) + Filter: (device_id = 1) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_3 (never executed) + Filter: (device_id = 1) + -> Materialize (actual rows=100 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space o2 (actual rows=100 loops=1) + Order: o2."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk o2_1 (actual rows=100 loops=1) + Index Cond: (device_id = 2) + Heap Fetches: 100 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk o2_2 (never executed) + Index Cond: (device_id = 2) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk o2_3 (never executed) + Index Cond: (device_id = 2) + Heap Fetches: 0 +(23 rows) + +-- test JOIN on time column with USING +-- should use 2 ChunkAppend +:PREFIX +SELECT o1.time +FROM :TEST_TABLE o1 + INNER JOIN :TEST_TABLE o2 USING (time) +WHERE o1.device_id = 1 + AND o2.device_id = 2 +ORDER BY o1.time +LIMIT 100; +QUERY PLAN + Limit (actual rows=100 loops=1) + -> Merge Join (actual rows=100 loops=1) + Merge Cond: (o1."time" = o2."time") + -> Custom Scan (ChunkAppend) on metrics_space o1 (actual rows=100 loops=1) + Order: o1."time" + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_1 (actual rows=100 loops=1) + Filter: (device_id = 1) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_2 (never executed) + Filter: (device_id = 1) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_3 (never executed) + Filter: (device_id = 1) + -> Materialize (actual rows=100 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space o2 (actual rows=100 loops=1) + Order: o2."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk o2_1 (actual rows=100 loops=1) + Index Cond: (device_id = 2) + Heap Fetches: 100 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk o2_2 (never executed) + Index Cond: (device_id = 2) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk o2_3 (never executed) + Index Cond: (device_id = 2) + Heap Fetches: 0 +(23 rows) + +-- test NATURAL JOIN on time column +-- should use 2 ChunkAppend +:PREFIX +SELECT o1.time +FROM :TEST_TABLE o1 + NATURAL INNER JOIN :TEST_TABLE o2 +WHERE o1.device_id = 1 + AND o2.device_id = 2 +ORDER BY o1.time +LIMIT 100; +QUERY PLAN + Limit (actual rows=0 loops=1) + -> Sort (actual rows=0 loops=1) + Sort Key: o1."time" + Sort Method: quicksort + -> Result (actual rows=0 loops=1) + One-Time Filter: false +(6 rows) + +-- test LEFT JOIN on time column +-- should use 2 ChunkAppend +:PREFIX +SELECT o1.time +FROM :TEST_TABLE o1 + LEFT JOIN :TEST_TABLE o2 ON o1.time = o2.time +WHERE o1.device_id = 1 + AND o2.device_id = 2 +ORDER BY o1.time +LIMIT 100; +QUERY PLAN + Limit (actual rows=100 loops=1) + -> Merge Join (actual rows=100 loops=1) + Merge Cond: (o1."time" = o2."time") + -> Custom Scan (ChunkAppend) on metrics_space o1 (actual rows=100 loops=1) + Order: o1."time" + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_1 (actual rows=100 loops=1) + Filter: (device_id = 1) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_2 (never executed) + Filter: (device_id = 1) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_3 (never executed) + Filter: (device_id = 1) + -> Materialize (actual rows=100 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space o2 (actual rows=100 loops=1) + Order: o2."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk o2_1 (actual rows=100 loops=1) + Index Cond: (device_id = 2) + Heap Fetches: 100 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk o2_2 (never executed) + Index Cond: (device_id = 2) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk o2_3 (never executed) + Index Cond: (device_id = 2) + Heap Fetches: 0 +(23 rows) + +-- test RIGHT JOIN on time column +-- should use 2 ChunkAppend +:PREFIX +SELECT o1.time +FROM :TEST_TABLE o1 + RIGHT JOIN :TEST_TABLE o2 ON o1.time = o2.time +WHERE o1.device_id = 1 + AND o2.device_id = 2 +ORDER BY o2.time +LIMIT 100; +QUERY PLAN + Limit (actual rows=100 loops=1) + -> Merge Join (actual rows=100 loops=1) + Merge Cond: (o1."time" = o2."time") + -> Custom Scan (ChunkAppend) on metrics_space o1 (actual rows=100 loops=1) + Order: o1."time" + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_1 (actual rows=100 loops=1) + Filter: (device_id = 1) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_2 (never executed) + Filter: (device_id = 1) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_3 (never executed) + Filter: (device_id = 1) + -> Materialize (actual rows=100 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space o2 (actual rows=100 loops=1) + Order: o2."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk o2_1 (actual rows=100 loops=1) + Index Cond: (device_id = 2) + Heap Fetches: 100 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk o2_2 (never executed) + Index Cond: (device_id = 2) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk o2_3 (never executed) + Index Cond: (device_id = 2) + Heap Fetches: 0 +(23 rows) + +-- test JOIN on time column with ON clause expression order switched +-- should use 2 ChunkAppend +:PREFIX +SELECT o1.time +FROM :TEST_TABLE o1 + INNER JOIN :TEST_TABLE o2 ON o2.time = o1.time +WHERE o1.device_id = 1 + AND o2.device_id = 2 +ORDER BY o1.time +LIMIT 100; +QUERY PLAN + Limit (actual rows=100 loops=1) + -> Merge Join (actual rows=100 loops=1) + Merge Cond: (o1."time" = o2."time") + -> Custom Scan (ChunkAppend) on metrics_space o1 (actual rows=100 loops=1) + Order: o1."time" + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_1 (actual rows=100 loops=1) + Filter: (device_id = 1) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_2 (never executed) + Filter: (device_id = 1) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_3 (never executed) + Filter: (device_id = 1) + -> Materialize (actual rows=100 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space o2 (actual rows=100 loops=1) + Order: o2."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk o2_1 (actual rows=100 loops=1) + Index Cond: (device_id = 2) + Heap Fetches: 100 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk o2_2 (never executed) + Index Cond: (device_id = 2) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk o2_3 (never executed) + Index Cond: (device_id = 2) + Heap Fetches: 0 +(23 rows) + +-- test JOIN on time column with equality condition in WHERE clause +-- should use 2 ChunkAppend +:PREFIX +SELECT o1.time +FROM :TEST_TABLE o1 + INNER JOIN :TEST_TABLE o2 ON TRUE +WHERE o1.time = o2.time + AND o1.device_id = 1 + AND o2.device_id = 2 +ORDER BY o1.time +LIMIT 100; +QUERY PLAN + Limit (actual rows=100 loops=1) + -> Merge Join (actual rows=100 loops=1) + Merge Cond: (o1."time" = o2."time") + -> Custom Scan (ChunkAppend) on metrics_space o1 (actual rows=100 loops=1) + Order: o1."time" + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_1 (actual rows=100 loops=1) + Filter: (device_id = 1) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_2 (never executed) + Filter: (device_id = 1) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_3 (never executed) + Filter: (device_id = 1) + -> Materialize (actual rows=100 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space o2 (actual rows=100 loops=1) + Order: o2."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk o2_1 (actual rows=100 loops=1) + Index Cond: (device_id = 2) + Heap Fetches: 100 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk o2_2 (never executed) + Index Cond: (device_id = 2) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk o2_3 (never executed) + Index Cond: (device_id = 2) + Heap Fetches: 0 +(23 rows) + +-- test JOIN on time column with ORDER BY 2nd hypertable +-- should use 2 ChunkAppend +:PREFIX +SELECT o1.time +FROM :TEST_TABLE o1 + INNER JOIN :TEST_TABLE o2 ON o1.time = o2.time +WHERE o1.device_id = 1 + AND o2.device_id = 2 +ORDER BY o2.time +LIMIT 100; +QUERY PLAN + Limit (actual rows=100 loops=1) + -> Merge Join (actual rows=100 loops=1) + Merge Cond: (o1."time" = o2."time") + -> Custom Scan (ChunkAppend) on metrics_space o1 (actual rows=100 loops=1) + Order: o1."time" + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_1 (actual rows=100 loops=1) + Filter: (device_id = 1) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_2 (never executed) + Filter: (device_id = 1) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_3 (never executed) + Filter: (device_id = 1) + -> Materialize (actual rows=100 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space o2 (actual rows=100 loops=1) + Order: o2."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk o2_1 (actual rows=100 loops=1) + Index Cond: (device_id = 2) + Heap Fetches: 100 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk o2_2 (never executed) + Index Cond: (device_id = 2) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk o2_3 (never executed) + Index Cond: (device_id = 2) + Heap Fetches: 0 +(23 rows) + +-- test JOIN on time column and device_id +-- should use 2 ChunkAppend +:PREFIX +SELECT o1.time +FROM :TEST_TABLE o1 + INNER JOIN :TEST_TABLE o2 ON o1.device_id = o2.device_id + AND o1.time = o2.time + ORDER BY o1.time + LIMIT 100; +QUERY PLAN + Limit (actual rows=100 loops=1) + -> Merge Join (actual rows=100 loops=1) + Merge Cond: (o1."time" = o2."time") + Join Filter: (o1.device_id = o2.device_id) + Rows Removed by Join Filter: 400 + -> Custom Scan (ChunkAppend) on metrics_space o1 (actual rows=100 loops=1) + Order: o1."time" + -> Merge Append (actual rows=100 loops=1) + Sort Key: o1_1."time" + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_1 (actual rows=21 loops=1) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_2 (actual rows=60 loops=1) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_3 (actual rows=21 loops=1) + -> Merge Append (never executed) + Sort Key: o1_4."time" + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_4 (never executed) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_5 (never executed) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_6 (never executed) + -> Merge Append (never executed) + Sort Key: o1_7."time" + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_7 (never executed) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_8 (never executed) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_9 (never executed) + -> Materialize (actual rows=500 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space o2 (actual rows=101 loops=1) + Order: o2."time" + -> Merge Append (actual rows=101 loops=1) + Sort Key: o2_1."time" + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o2_1 (actual rows=21 loops=1) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o2_2 (actual rows=61 loops=1) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o2_3 (actual rows=21 loops=1) + -> Merge Append (never executed) + Sort Key: o2_4."time" + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o2_4 (never executed) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o2_5 (never executed) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o2_6 (never executed) + -> Merge Append (never executed) + Sort Key: o2_7."time" + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o2_7 (never executed) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o2_8 (never executed) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o2_9 (never executed) +(40 rows) + +-- test JOIN on device_id +-- should not use ordered append for 2nd hypertable +:PREFIX +SELECT o1.time +FROM :TEST_TABLE o1 + INNER JOIN :TEST_TABLE o2 ON o1.device_id = o2.device_id +WHERE o1.device_id = 1 +ORDER BY o1.time +LIMIT 100; +QUERY PLAN + Limit (actual rows=100 loops=1) + -> Nested Loop (actual rows=100 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space o1 (actual rows=1 loops=1) + Order: o1."time" + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_1 (actual rows=1 loops=1) + Filter: (device_id = 1) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_2 (never executed) + Filter: (device_id = 1) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_3 (never executed) + Filter: (device_id = 1) + -> Materialize (actual rows=100 loops=1) + -> Append (actual rows=100 loops=1) + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk o2_1 (actual rows=100 loops=1) + Index Cond: ((device_id = 1) AND (device_id = 1)) + Heap Fetches: 100 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk o2_2 (never executed) + Index Cond: ((device_id = 1) AND (device_id = 1)) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk o2_3 (never executed) + Index Cond: ((device_id = 1) AND (device_id = 1)) + Heap Fetches: 0 +(21 rows) + +-- test JOIN on time column with implicit join +-- should use 2 ChunkAppend +:PREFIX +SELECT o1.time +FROM :TEST_TABLE o1, + :TEST_TABLE o2 +WHERE o1.time = o2.time + AND o1.device_id = 1 + AND o2.device_id = 2 +ORDER BY o1.time +LIMIT 100; +QUERY PLAN + Limit (actual rows=100 loops=1) + -> Merge Join (actual rows=100 loops=1) + Merge Cond: (o1."time" = o2."time") + -> Custom Scan (ChunkAppend) on metrics_space o1 (actual rows=100 loops=1) + Order: o1."time" + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_1 (actual rows=100 loops=1) + Filter: (device_id = 1) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_2 (never executed) + Filter: (device_id = 1) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_3 (never executed) + Filter: (device_id = 1) + -> Materialize (actual rows=100 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space o2 (actual rows=100 loops=1) + Order: o2."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk o2_1 (actual rows=100 loops=1) + Index Cond: (device_id = 2) + Heap Fetches: 100 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk o2_2 (never executed) + Index Cond: (device_id = 2) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk o2_3 (never executed) + Index Cond: (device_id = 2) + Heap Fetches: 0 +(23 rows) + +-- test JOIN on time column with 3 hypertables +-- should use 3 ChunkAppend +:PREFIX +SELECT o1.time +FROM :TEST_TABLE o1 + INNER JOIN :TEST_TABLE o2 ON o1.time = o2.time + INNER JOIN :TEST_TABLE o3 ON o1.time = o3.time +WHERE o1.device_id = 1 + AND o2.device_id = 2 + AND o3.device_id = 3 +ORDER BY o1.time +LIMIT 100; +QUERY PLAN + Limit (actual rows=100 loops=1) + -> Merge Join (actual rows=100 loops=1) + Merge Cond: (o3."time" = o1."time") + -> Custom Scan (ChunkAppend) on metrics_space o3 (actual rows=100 loops=1) + Order: o3."time" + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o3_1 (actual rows=100 loops=1) + Filter: (device_id = 3) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o3_2 (never executed) + Filter: (device_id = 3) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o3_3 (never executed) + Filter: (device_id = 3) + -> Materialize (actual rows=100 loops=1) + -> Merge Join (actual rows=100 loops=1) + Merge Cond: (o1."time" = o2."time") + -> Custom Scan (ChunkAppend) on metrics_space o1 (actual rows=100 loops=1) + Order: o1."time" + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_1 (actual rows=100 loops=1) + Filter: (device_id = 1) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_2 (never executed) + Filter: (device_id = 1) + -> Index Scan using _hyper_X_X_chunk_metrics_space_time_idx on _hyper_X_X_chunk o1_3 (never executed) + Filter: (device_id = 1) + -> Materialize (actual rows=100 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space o2 (actual rows=100 loops=1) + Order: o2."time" + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk o2_1 (actual rows=100 loops=1) + Index Cond: (device_id = 2) + Heap Fetches: 100 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk o2_2 (never executed) + Index Cond: (device_id = 2) + Heap Fetches: 0 + -> Index Only Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk o2_3 (never executed) + Index Cond: (device_id = 2) + Heap Fetches: 0 +(34 rows) + +RESET enable_seqscan; +\set TEST_TABLE 'metrics_compressed' +\ir :TEST_QUERY_NAME +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- In the following test cases, we test that certain indexes are used. By using the +-- timescaledb.enable_decompression_sorted_merge optimization, we are pushing a sort node +-- below the DecompressChunk node, which operates on the batches. This could lead to flaky +-- tests because the input data is small and PostgreSQL switches from IndexScans to +-- SequentialScans. Disable the optimization for the following tests to ensure we have +-- stable query plans in all CI environments. +SET timescaledb.enable_decompression_sorted_merge = 0; +-- test LATERAL with ordered append in the outer query +:PREFIX +SELECT time, + pg_typeof(l) +FROM :TEST_TABLE, + LATERAL ( + SELECT * + FROM ( + VALUES (1), + (2)) v) l +ORDER BY time DESC +LIMIT 2; +QUERY PLAN + Limit (actual rows=2 loops=1) + -> Sort (actual rows=2 loops=1) + Sort Key: _hyper_X_X_chunk."time" DESC + Sort Method: top-N heapsort + -> Nested Loop (actual rows=136740 loops=1) + -> Append (actual rows=68370 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) + -> Materialize (actual rows=2 loops=68370) + -> Values Scan on "*VALUES*" (actual rows=2 loops=1) +(14 rows) + +-- test LATERAL with ordered append in the lateral query +:PREFIX +SELECT time, + pg_typeof(v) +FROM ( + VALUES (1), + (2)) v, + LATERAL ( + SELECT * + FROM :TEST_TABLE + ORDER BY time DESC + LIMIT 2) l; +QUERY PLAN + Nested Loop (actual rows=4 loops=1) + -> Values Scan on "*VALUES*" (actual rows=2 loops=1) + -> Materialize (actual rows=2 loops=2) + -> Subquery Scan on l (actual rows=2 loops=1) + -> Limit (actual rows=2 loops=1) + -> Sort (actual rows=2 loops=1) + Sort Key: _hyper_X_X_chunk."time" DESC + Sort Method: top-N heapsort + -> Result (actual rows=68370 loops=1) + -> Append (actual rows=68370 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) +(16 rows) + +-- test plan with best index is chosen +-- this should use device_id, time index +:PREFIX +SELECT time, + device_id +FROM :TEST_TABLE +WHERE device_id = 1 +ORDER BY time DESC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_compressed (actual rows=1 loops=1) + Order: metrics_compressed."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=1 loops=1) + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) +(12 rows) + +-- test plan with best index is chosen +-- this should use time index +:PREFIX +SELECT time +FROM :TEST_TABLE +ORDER BY time DESC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_X_X_chunk."time" DESC + Sort Method: top-N heapsort + -> Append (actual rows=68370 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) +(11 rows) + +-- test LATERAL with correlated query +-- only last chunk should be executed +:PREFIX +SELECT g.time, + l.time +FROM generate_series('2000-01-01'::timestamptz, '2000-01-03', '1d') AS g (time) + LEFT OUTER JOIN LATERAL ( + SELECT * + FROM :TEST_TABLE o + WHERE o.time >= g.time + AND o.time < g.time + '1d'::interval + ORDER BY time DESC + LIMIT 1) l ON TRUE; +QUERY PLAN + Nested Loop Left Join (actual rows=3 loops=1) + -> Function Scan on generate_series g (actual rows=3 loops=1) + -> Limit (actual rows=1 loops=3) + -> Sort (actual rows=1 loops=3) + Sort Key: o."time" DESC + Sort Method: top-N heapsort + -> Result (actual rows=3600 loops=3) + -> Custom Scan (ChunkAppend) on metrics_compressed o (actual rows=3600 loops=3) + Chunks excluded during startup: 0 + Chunks excluded during runtime: 2 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o_1 (never executed) + Filter: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval))) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + Filter: ((_ts_meta_max_1 >= g."time") AND (_ts_meta_min_1 < (g."time" + '@ 1 day'::interval))) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o_2 (never executed) + Filter: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval))) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + Filter: ((_ts_meta_max_1 >= g."time") AND (_ts_meta_min_1 < (g."time" + '@ 1 day'::interval))) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o_3 (actual rows=3600 loops=3) + Filter: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval))) + Rows Removed by Filter: 4063 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=8 loops=3) + Filter: ((_ts_meta_max_1 >= g."time") AND (_ts_meta_min_1 < (g."time" + '@ 1 day'::interval))) + Rows Removed by Filter: 12 +(24 rows) + +-- test LATERAL with correlated query +-- only 2nd chunk should be executed +:PREFIX +SELECT g.time, + l.time +FROM generate_series('2000-01-10'::timestamptz, '2000-01-11', '1d') AS g (time) + LEFT OUTER JOIN LATERAL ( + SELECT * + FROM :TEST_TABLE o + WHERE o.time >= g.time + AND o.time < g.time + '1d'::interval + ORDER BY time + LIMIT 1) l ON TRUE; +QUERY PLAN + Nested Loop Left Join (actual rows=2 loops=1) + -> Function Scan on generate_series g (actual rows=2 loops=1) + -> Limit (actual rows=1 loops=2) + -> Sort (actual rows=1 loops=2) + Sort Key: o."time" + Sort Method: top-N heapsort + -> Result (actual rows=3600 loops=2) + -> Custom Scan (ChunkAppend) on metrics_compressed o (actual rows=3600 loops=2) + Chunks excluded during startup: 0 + Chunks excluded during runtime: 2 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o_1 (never executed) + Filter: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval))) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + Filter: ((_ts_meta_max_1 >= g."time") AND (_ts_meta_min_1 < (g."time" + '@ 1 day'::interval))) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o_2 (actual rows=3600 loops=2) + Filter: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval))) + Rows Removed by Filter: 3900 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=8 loops=2) + Filter: ((_ts_meta_max_1 >= g."time") AND (_ts_meta_min_1 < (g."time" + '@ 1 day'::interval))) + Rows Removed by Filter: 22 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o_3 (never executed) + Filter: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval))) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + Filter: ((_ts_meta_max_1 >= g."time") AND (_ts_meta_min_1 < (g."time" + '@ 1 day'::interval))) +(24 rows) + +-- test startup and runtime exclusion together +:PREFIX +SELECT g.time, + l.time +FROM generate_series('2000-01-01'::timestamptz, '2000-01-03', '1d') AS g (time) + LEFT OUTER JOIN LATERAL ( + SELECT * + FROM :TEST_TABLE o + WHERE o.time >= g.time + AND o.time < g.time + '1d'::interval + AND o.time < now() + ORDER BY time DESC + LIMIT 1) l ON TRUE; +QUERY PLAN + Nested Loop Left Join (actual rows=3 loops=1) + -> Function Scan on generate_series g (actual rows=3 loops=1) + -> Limit (actual rows=1 loops=3) + -> Sort (actual rows=1 loops=3) + Sort Key: o."time" DESC + Sort Method: top-N heapsort + -> Result (actual rows=3600 loops=3) + -> Custom Scan (ChunkAppend) on metrics_compressed o (actual rows=3600 loops=3) + Chunks excluded during startup: 0 + Chunks excluded during runtime: 2 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o_1 (never executed) + Filter: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval)) AND ("time" < now())) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + Filter: ((_ts_meta_max_1 >= g."time") AND (_ts_meta_min_1 < (g."time" + '@ 1 day'::interval))) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o_2 (never executed) + Filter: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval)) AND ("time" < now())) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + Filter: ((_ts_meta_max_1 >= g."time") AND (_ts_meta_min_1 < (g."time" + '@ 1 day'::interval))) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o_3 (actual rows=3600 loops=3) + Filter: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval)) AND ("time" < now())) + Rows Removed by Filter: 4063 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=8 loops=3) + Filter: ((_ts_meta_max_1 >= g."time") AND (_ts_meta_min_1 < (g."time" + '@ 1 day'::interval))) + Rows Removed by Filter: 12 +(24 rows) + +-- test startup and runtime exclusion together +-- all chunks should be filtered +:PREFIX +SELECT g.time, + l.time +FROM generate_series('2000-01-01'::timestamptz, '2000-01-03', '1d') AS g (time) + LEFT OUTER JOIN LATERAL ( + SELECT * + FROM :TEST_TABLE o + WHERE o.time >= g.time + AND o.time < g.time + '1d'::interval + AND o.time > now() + ORDER BY time DESC + LIMIT 1) l ON TRUE; +QUERY PLAN + Nested Loop Left Join (actual rows=3 loops=1) + -> Function Scan on generate_series g (actual rows=3 loops=1) + -> Limit (actual rows=0 loops=3) + -> Sort (actual rows=0 loops=3) + Sort Key: o."time" DESC + Sort Method: quicksort + -> Result (actual rows=0 loops=3) + -> Custom Scan (ChunkAppend) on metrics_compressed o (actual rows=0 loops=3) + Chunks excluded during startup: 3 +(9 rows) + +-- test JOIN +-- no exclusion on joined table because quals are not propagated yet +-- With PG 14 on i368, this query uses a nested loop join. Disable the nested loop join to get the same query plan in all tests +SET enable_nestloop TO off; +:PREFIX +SELECT o1.time, + o2.time +FROM :TEST_TABLE o1 + INNER JOIN :TEST_TABLE o2 ON o1.time = o2.time +WHERE o1.time < '2000-02-01' + AND o1.device_id = 1 + AND o2.device_id = 2 +ORDER BY o1.time; +QUERY PLAN + Merge Join (actual rows=13674 loops=1) + Merge Cond: (o1."time" = o2."time") + -> Custom Scan (ChunkAppend) on metrics_compressed o1 (actual rows=13674 loops=1) + Order: o1."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_1 (actual rows=3598 loops=1) + Vectorized Filter: ("time" < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (actual rows=4 loops=1) + Index Cond: (device_id = 1) + Filter: (_ts_meta_min_1 < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_2 (actual rows=5038 loops=1) + Vectorized Filter: ("time" < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Index Cond: (device_id = 1) + Filter: (_ts_meta_min_1 < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_3 (actual rows=5038 loops=1) + Vectorized Filter: ("time" < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Index Cond: (device_id = 1) + Filter: (_ts_meta_min_1 < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone) + -> Materialize (actual rows=13674 loops=1) + -> Custom Scan (ChunkAppend) on metrics_compressed o2 (actual rows=13674 loops=1) + Order: o2."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_1 (actual rows=3598 loops=1) + Vectorized Filter: ("time" < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=4 loops=1) + Index Cond: (device_id = 2) + Filter: (_ts_meta_min_1 < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_2 (actual rows=5038 loops=1) + Vectorized Filter: ("time" < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=6 loops=1) + Index Cond: (device_id = 2) + Filter: (_ts_meta_min_1 < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_3 (actual rows=5038 loops=1) + Vectorized Filter: ("time" < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=6 loops=1) + Index Cond: (device_id = 2) + Filter: (_ts_meta_min_1 < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone) +(37 rows) + +RESET enable_nestloop; +-- test JOIN +-- last chunk of o2 should not be executed +:PREFIX +SELECT o1.time, + o2.time +FROM :TEST_TABLE o1 + INNER JOIN ( + SELECT * + FROM :TEST_TABLE o2 + ORDER BY time) o2 ON o1.time = o2.time +WHERE o1.time < '2000-01-08' +ORDER BY o1.time +LIMIT 10; +QUERY PLAN + Limit (actual rows=10 loops=1) + -> Merge Join (actual rows=10 loops=1) + Merge Cond: (o1_1."time" = o2_1."time") + -> Sort (actual rows=2 loops=1) + Sort Key: o1_1."time" + Sort Method: quicksort + -> Append (actual rows=26390 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_1 (actual rows=17990 loops=1) + Vectorized Filter: ("time" < 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) + Filter: (_ts_meta_min_1 < 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_2 (actual rows=8400 loops=1) + Vectorized Filter: ("time" < 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 1790 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=15 loops=1) + Filter: (_ts_meta_min_1 < 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 15 + -> Materialize (actual rows=10 loops=1) + -> Sort (actual rows=6 loops=1) + Sort Key: o2_1."time" + Sort Method: quicksort + -> Result (actual rows=68370 loops=1) + -> Append (actual rows=68370 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_1 (actual rows=17990 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=20 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_2 (actual rows=25190 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=30 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_3 (actual rows=25190 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) +(29 rows) + +-- test join against max query +-- not ChunkAppend so no chunk exclusion +SET enable_hashjoin = FALSE; +SET enable_nestloop = FALSE; +SET enable_hashagg = FALSE; +:PREFIX +SELECT o1.time, + o2.* +FROM :TEST_TABLE o1 + INNER JOIN ( + SELECT max(time) AS max_time + FROM :TEST_TABLE) o2 ON o1.time = o2.max_time +WHERE o1.device_id = 1 +ORDER BY time; +QUERY PLAN + Merge Join (actual rows=1 loops=1) + Merge Cond: (o1."time" = (max(_hyper_X_X_chunk."time"))) + -> Custom Scan (ChunkAppend) on metrics_compressed o1 (actual rows=13674 loops=1) + Order: o1."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_1 (actual rows=3598 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (actual rows=4 loops=1) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_2 (actual rows=5038 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_3 (actual rows=5038 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Index Cond: (device_id = 1) + -> Sort (actual rows=1 loops=1) + Sort Key: (max(_hyper_X_X_chunk."time")) + Sort Method: quicksort + -> Finalize Aggregate (actual rows=1 loops=1) + -> Append (actual rows=3 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=20 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=30 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=30 loops=1) +(27 rows) + +RESET enable_hashjoin; +RESET enable_nestloop; +RESET enable_hashagg; +SET enable_seqscan TO false; +-- test JOIN on time column +-- should use 2 ChunkAppend +:PREFIX +SELECT o1.time +FROM :TEST_TABLE o1 + INNER JOIN :TEST_TABLE o2 ON o1.time = o2.time +WHERE o1.device_id = 1 + AND o2.device_id = 2 +ORDER BY o1.time +LIMIT 100; +QUERY PLAN + Limit (actual rows=100 loops=1) + -> Merge Join (actual rows=100 loops=1) + Merge Cond: (o1."time" = o2."time") + -> Custom Scan (ChunkAppend) on metrics_compressed o1 (actual rows=100 loops=1) + Order: o1."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_1 (actual rows=100 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_2 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_3 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) + -> Materialize (actual rows=100 loops=1) + -> Custom Scan (ChunkAppend) on metrics_compressed o2 (actual rows=100 loops=1) + Order: o2."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_1 (actual rows=100 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=1 loops=1) + Index Cond: (device_id = 2) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_2 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (never executed) + Index Cond: (device_id = 2) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_3 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (never executed) + Index Cond: (device_id = 2) +(26 rows) + +-- test JOIN on time column with USING +-- should use 2 ChunkAppend +:PREFIX +SELECT o1.time +FROM :TEST_TABLE o1 + INNER JOIN :TEST_TABLE o2 USING (time) +WHERE o1.device_id = 1 + AND o2.device_id = 2 +ORDER BY o1.time +LIMIT 100; +QUERY PLAN + Limit (actual rows=100 loops=1) + -> Merge Join (actual rows=100 loops=1) + Merge Cond: (o1."time" = o2."time") + -> Custom Scan (ChunkAppend) on metrics_compressed o1 (actual rows=100 loops=1) + Order: o1."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_1 (actual rows=100 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_2 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_3 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) + -> Materialize (actual rows=100 loops=1) + -> Custom Scan (ChunkAppend) on metrics_compressed o2 (actual rows=100 loops=1) + Order: o2."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_1 (actual rows=100 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=1 loops=1) + Index Cond: (device_id = 2) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_2 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (never executed) + Index Cond: (device_id = 2) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_3 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (never executed) + Index Cond: (device_id = 2) +(26 rows) + +-- test NATURAL JOIN on time column +-- should use 2 ChunkAppend +:PREFIX +SELECT o1.time +FROM :TEST_TABLE o1 + NATURAL INNER JOIN :TEST_TABLE o2 +WHERE o1.device_id = 1 + AND o2.device_id = 2 +ORDER BY o1.time +LIMIT 100; +QUERY PLAN + Limit (actual rows=0 loops=1) + -> Sort (actual rows=0 loops=1) + Sort Key: o1."time" + Sort Method: quicksort + -> Result (actual rows=0 loops=1) + One-Time Filter: false +(6 rows) + +-- test LEFT JOIN on time column +-- should use 2 ChunkAppend +:PREFIX +SELECT o1.time +FROM :TEST_TABLE o1 + LEFT JOIN :TEST_TABLE o2 ON o1.time = o2.time +WHERE o1.device_id = 1 + AND o2.device_id = 2 +ORDER BY o1.time +LIMIT 100; +QUERY PLAN + Limit (actual rows=100 loops=1) + -> Merge Join (actual rows=100 loops=1) + Merge Cond: (o1."time" = o2."time") + -> Custom Scan (ChunkAppend) on metrics_compressed o1 (actual rows=100 loops=1) + Order: o1."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_1 (actual rows=100 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_2 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_3 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) + -> Materialize (actual rows=100 loops=1) + -> Custom Scan (ChunkAppend) on metrics_compressed o2 (actual rows=100 loops=1) + Order: o2."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_1 (actual rows=100 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=1 loops=1) + Index Cond: (device_id = 2) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_2 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (never executed) + Index Cond: (device_id = 2) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_3 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (never executed) + Index Cond: (device_id = 2) +(26 rows) + +-- test RIGHT JOIN on time column +-- should use 2 ChunkAppend +:PREFIX +SELECT o1.time +FROM :TEST_TABLE o1 + RIGHT JOIN :TEST_TABLE o2 ON o1.time = o2.time +WHERE o1.device_id = 1 + AND o2.device_id = 2 +ORDER BY o2.time +LIMIT 100; +QUERY PLAN + Limit (actual rows=100 loops=1) + -> Merge Join (actual rows=100 loops=1) + Merge Cond: (o1."time" = o2."time") + -> Custom Scan (ChunkAppend) on metrics_compressed o1 (actual rows=100 loops=1) + Order: o1."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_1 (actual rows=100 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_2 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_3 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) + -> Materialize (actual rows=100 loops=1) + -> Custom Scan (ChunkAppend) on metrics_compressed o2 (actual rows=100 loops=1) + Order: o2."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_1 (actual rows=100 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=1 loops=1) + Index Cond: (device_id = 2) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_2 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (never executed) + Index Cond: (device_id = 2) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_3 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (never executed) + Index Cond: (device_id = 2) +(26 rows) + +-- test JOIN on time column with ON clause expression order switched +-- should use 2 ChunkAppend +:PREFIX +SELECT o1.time +FROM :TEST_TABLE o1 + INNER JOIN :TEST_TABLE o2 ON o2.time = o1.time +WHERE o1.device_id = 1 + AND o2.device_id = 2 +ORDER BY o1.time +LIMIT 100; +QUERY PLAN + Limit (actual rows=100 loops=1) + -> Merge Join (actual rows=100 loops=1) + Merge Cond: (o1."time" = o2."time") + -> Custom Scan (ChunkAppend) on metrics_compressed o1 (actual rows=100 loops=1) + Order: o1."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_1 (actual rows=100 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_2 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_3 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) + -> Materialize (actual rows=100 loops=1) + -> Custom Scan (ChunkAppend) on metrics_compressed o2 (actual rows=100 loops=1) + Order: o2."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_1 (actual rows=100 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=1 loops=1) + Index Cond: (device_id = 2) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_2 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (never executed) + Index Cond: (device_id = 2) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_3 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (never executed) + Index Cond: (device_id = 2) +(26 rows) + +-- test JOIN on time column with equality condition in WHERE clause +-- should use 2 ChunkAppend +:PREFIX +SELECT o1.time +FROM :TEST_TABLE o1 + INNER JOIN :TEST_TABLE o2 ON TRUE +WHERE o1.time = o2.time + AND o1.device_id = 1 + AND o2.device_id = 2 +ORDER BY o1.time +LIMIT 100; +QUERY PLAN + Limit (actual rows=100 loops=1) + -> Merge Join (actual rows=100 loops=1) + Merge Cond: (o1."time" = o2."time") + -> Custom Scan (ChunkAppend) on metrics_compressed o1 (actual rows=100 loops=1) + Order: o1."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_1 (actual rows=100 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_2 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_3 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) + -> Materialize (actual rows=100 loops=1) + -> Custom Scan (ChunkAppend) on metrics_compressed o2 (actual rows=100 loops=1) + Order: o2."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_1 (actual rows=100 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=1 loops=1) + Index Cond: (device_id = 2) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_2 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (never executed) + Index Cond: (device_id = 2) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_3 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (never executed) + Index Cond: (device_id = 2) +(26 rows) + +-- test JOIN on time column with ORDER BY 2nd hypertable +-- should use 2 ChunkAppend +:PREFIX +SELECT o1.time +FROM :TEST_TABLE o1 + INNER JOIN :TEST_TABLE o2 ON o1.time = o2.time +WHERE o1.device_id = 1 + AND o2.device_id = 2 +ORDER BY o2.time +LIMIT 100; +QUERY PLAN + Limit (actual rows=100 loops=1) + -> Merge Join (actual rows=100 loops=1) + Merge Cond: (o1."time" = o2."time") + -> Custom Scan (ChunkAppend) on metrics_compressed o1 (actual rows=100 loops=1) + Order: o1."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_1 (actual rows=100 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_2 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_3 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) + -> Materialize (actual rows=100 loops=1) + -> Custom Scan (ChunkAppend) on metrics_compressed o2 (actual rows=100 loops=1) + Order: o2."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_1 (actual rows=100 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=1 loops=1) + Index Cond: (device_id = 2) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_2 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (never executed) + Index Cond: (device_id = 2) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_3 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (never executed) + Index Cond: (device_id = 2) +(26 rows) + +-- test JOIN on time column and device_id +-- should use 2 ChunkAppend +:PREFIX +SELECT o1.time +FROM :TEST_TABLE o1 + INNER JOIN :TEST_TABLE o2 ON o1.device_id = o2.device_id + AND o1.time = o2.time + ORDER BY o1.time + LIMIT 100; +QUERY PLAN + Limit (actual rows=100 loops=1) + -> Merge Join (actual rows=100 loops=1) + Merge Cond: ((o1_1."time" = o2_1."time") AND (o1_1.device_id = o2_1.device_id)) + -> Sort (actual rows=100 loops=1) + Sort Key: o1_1."time", o1_1.device_id + Sort Method: quicksort + -> Append (actual rows=68370 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_1 (actual rows=17990 loops=1) + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (actual rows=20 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_2 (actual rows=25190 loops=1) + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (actual rows=30 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_3 (actual rows=25190 loops=1) + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (actual rows=30 loops=1) + -> Sort (actual rows=100 loops=1) + Sort Key: o2_1."time", o2_1.device_id + Sort Method: quicksort + -> Append (actual rows=68370 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_1 (actual rows=17990 loops=1) + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=20 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_2 (actual rows=25190 loops=1) + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=30 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_3 (actual rows=25190 loops=1) + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=30 loops=1) +(23 rows) + +-- test JOIN on device_id +-- should not use ordered append for 2nd hypertable +:PREFIX +SELECT o1.time +FROM :TEST_TABLE o1 + INNER JOIN :TEST_TABLE o2 ON o1.device_id = o2.device_id +WHERE o1.device_id = 1 +ORDER BY o1.time +LIMIT 100; +QUERY PLAN + Limit (actual rows=100 loops=1) + -> Nested Loop (actual rows=100 loops=1) + -> Custom Scan (ChunkAppend) on metrics_compressed o1 (actual rows=1 loops=1) + Order: o1."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_1 (actual rows=1 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_2 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_3 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) + -> Materialize (actual rows=100 loops=1) + -> Append (actual rows=100 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_1 (actual rows=100 loops=1) + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=1 loops=1) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_2 (never executed) + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (never executed) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_3 (never executed) + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (never executed) + Index Cond: (device_id = 1) +(24 rows) + +-- test JOIN on time column with implicit join +-- should use 2 ChunkAppend +:PREFIX +SELECT o1.time +FROM :TEST_TABLE o1, + :TEST_TABLE o2 +WHERE o1.time = o2.time + AND o1.device_id = 1 + AND o2.device_id = 2 +ORDER BY o1.time +LIMIT 100; +QUERY PLAN + Limit (actual rows=100 loops=1) + -> Merge Join (actual rows=100 loops=1) + Merge Cond: (o1."time" = o2."time") + -> Custom Scan (ChunkAppend) on metrics_compressed o1 (actual rows=100 loops=1) + Order: o1."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_1 (actual rows=100 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_2 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_3 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) + -> Materialize (actual rows=100 loops=1) + -> Custom Scan (ChunkAppend) on metrics_compressed o2 (actual rows=100 loops=1) + Order: o2."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_1 (actual rows=100 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=1 loops=1) + Index Cond: (device_id = 2) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_2 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (never executed) + Index Cond: (device_id = 2) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_3 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (never executed) + Index Cond: (device_id = 2) +(26 rows) + +-- test JOIN on time column with 3 hypertables +-- should use 3 ChunkAppend +:PREFIX +SELECT o1.time +FROM :TEST_TABLE o1 + INNER JOIN :TEST_TABLE o2 ON o1.time = o2.time + INNER JOIN :TEST_TABLE o3 ON o1.time = o3.time +WHERE o1.device_id = 1 + AND o2.device_id = 2 + AND o3.device_id = 3 +ORDER BY o1.time +LIMIT 100; +QUERY PLAN + Limit (actual rows=100 loops=1) + -> Merge Join (actual rows=100 loops=1) + Merge Cond: (o3."time" = o1."time") + -> Custom Scan (ChunkAppend) on metrics_compressed o3 (actual rows=100 loops=1) + Order: o3."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o3_1 (actual rows=100 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_2 (actual rows=1 loops=1) + Index Cond: (device_id = 3) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o3_2 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_2 (never executed) + Index Cond: (device_id = 3) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o3_3 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_2 (never executed) + Index Cond: (device_id = 3) + -> Materialize (actual rows=100 loops=1) + -> Merge Join (actual rows=100 loops=1) + Merge Cond: (o1."time" = o2."time") + -> Custom Scan (ChunkAppend) on metrics_compressed o1 (actual rows=100 loops=1) + Order: o1."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_1 (actual rows=100 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_2 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_3 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) + -> Materialize (actual rows=100 loops=1) + -> Custom Scan (ChunkAppend) on metrics_compressed o2 (actual rows=100 loops=1) + Order: o2."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_1 (actual rows=100 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=1 loops=1) + Index Cond: (device_id = 2) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_2 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (never executed) + Index Cond: (device_id = 2) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_3 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (never executed) + Index Cond: (device_id = 2) +(40 rows) + +RESET enable_seqscan; +\set TEST_TABLE 'metrics_space_compressed' +\ir :TEST_QUERY_NAME +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- In the following test cases, we test that certain indexes are used. By using the +-- timescaledb.enable_decompression_sorted_merge optimization, we are pushing a sort node +-- below the DecompressChunk node, which operates on the batches. This could lead to flaky +-- tests because the input data is small and PostgreSQL switches from IndexScans to +-- SequentialScans. Disable the optimization for the following tests to ensure we have +-- stable query plans in all CI environments. +SET timescaledb.enable_decompression_sorted_merge = 0; +-- test LATERAL with ordered append in the outer query +:PREFIX +SELECT time, + pg_typeof(l) +FROM :TEST_TABLE, + LATERAL ( + SELECT * + FROM ( + VALUES (1), + (2)) v) l +ORDER BY time DESC +LIMIT 2; +QUERY PLAN + Limit (actual rows=2 loops=1) + -> Sort (actual rows=2 loops=1) + Sort Key: _hyper_X_X_chunk."time" DESC + Sort Method: top-N heapsort + -> Nested Loop (actual rows=136740 loops=1) + -> Append (actual rows=68370 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10794 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Materialize (actual rows=2 loops=68370) + -> Values Scan on "*VALUES*" (actual rows=2 loops=1) +(26 rows) + +-- test LATERAL with ordered append in the lateral query +:PREFIX +SELECT time, + pg_typeof(v) +FROM ( + VALUES (1), + (2)) v, + LATERAL ( + SELECT * + FROM :TEST_TABLE + ORDER BY time DESC + LIMIT 2) l; +QUERY PLAN + Nested Loop (actual rows=4 loops=1) + -> Values Scan on "*VALUES*" (actual rows=2 loops=1) + -> Materialize (actual rows=2 loops=2) + -> Subquery Scan on l (actual rows=2 loops=1) + -> Limit (actual rows=2 loops=1) + -> Sort (actual rows=2 loops=1) + Sort Key: _hyper_X_X_chunk."time" DESC + Sort Method: top-N heapsort + -> Result (actual rows=68370 loops=1) + -> Append (actual rows=68370 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10794 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) +(28 rows) + +-- test plan with best index is chosen +-- this should use device_id, time index +:PREFIX +SELECT time, + device_id +FROM :TEST_TABLE +WHERE device_id = 1 +ORDER BY time DESC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space_compressed (actual rows=1 loops=1) + Order: metrics_space_compressed."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: compress_hyper_X_X_chunk._ts_meta_sequence_num + Sort Method: quicksort + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: compress_hyper_X_X_chunk._ts_meta_sequence_num + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + Filter: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: compress_hyper_X_X_chunk._ts_meta_sequence_num + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + Filter: (device_id = 1) +(19 rows) + +-- test plan with best index is chosen +-- this should use time index +:PREFIX +SELECT time +FROM :TEST_TABLE +ORDER BY time DESC +LIMIT 1; +QUERY PLAN + Limit (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_X_X_chunk."time" DESC + Sort Method: top-N heapsort + -> Append (actual rows=68370 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10794 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) +(23 rows) + +-- test LATERAL with correlated query +-- only last chunk should be executed +:PREFIX +SELECT g.time, + l.time +FROM generate_series('2000-01-01'::timestamptz, '2000-01-03', '1d') AS g (time) + LEFT OUTER JOIN LATERAL ( + SELECT * + FROM :TEST_TABLE o + WHERE o.time >= g.time + AND o.time < g.time + '1d'::interval + ORDER BY time DESC + LIMIT 1) l ON TRUE; +QUERY PLAN + Nested Loop Left Join (actual rows=3 loops=1) + -> Function Scan on generate_series g (actual rows=3 loops=1) + -> Limit (actual rows=1 loops=3) + -> Sort (actual rows=1 loops=3) + Sort Key: o."time" DESC + Sort Method: top-N heapsort + -> Result (actual rows=3600 loops=3) + -> Custom Scan (ChunkAppend) on metrics_space_compressed o (actual rows=3600 loops=3) + -> Merge Append (actual rows=0 loops=3) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o_1 (actual rows=0 loops=3) + Filter: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval))) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=0 loops=3) + Filter: ((_ts_meta_max_1 >= g."time") AND (_ts_meta_min_1 < (g."time" + '@ 1 day'::interval))) + Rows Removed by Filter: 6 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o_2 (actual rows=0 loops=3) + Filter: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval))) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=0 loops=3) + Filter: ((_ts_meta_max_1 >= g."time") AND (_ts_meta_min_1 < (g."time" + '@ 1 day'::interval))) + Rows Removed by Filter: 18 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o_3 (actual rows=0 loops=3) + Filter: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval))) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=0 loops=3) + Filter: ((_ts_meta_max_1 >= g."time") AND (_ts_meta_min_1 < (g."time" + '@ 1 day'::interval))) + Rows Removed by Filter: 6 + -> Merge Append (actual rows=0 loops=3) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o_4 (actual rows=0 loops=3) + Filter: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval))) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=0 loops=3) + Filter: ((_ts_meta_max_1 >= g."time") AND (_ts_meta_min_1 < (g."time" + '@ 1 day'::interval))) + Rows Removed by Filter: 6 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o_5 (actual rows=0 loops=3) + Filter: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval))) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=0 loops=3) + Filter: ((_ts_meta_max_1 >= g."time") AND (_ts_meta_min_1 < (g."time" + '@ 1 day'::interval))) + Rows Removed by Filter: 18 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o_6 (actual rows=0 loops=3) + Filter: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval))) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=0 loops=3) + Filter: ((_ts_meta_max_1 >= g."time") AND (_ts_meta_min_1 < (g."time" + '@ 1 day'::interval))) + Rows Removed by Filter: 6 + -> Merge Append (actual rows=3600 loops=3) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o_7 (actual rows=720 loops=3) + Filter: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval))) + Rows Removed by Filter: 813 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=2 loops=3) + Filter: ((_ts_meta_max_1 >= g."time") AND (_ts_meta_min_1 < (g."time" + '@ 1 day'::interval))) + Rows Removed by Filter: 2 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o_8 (actual rows=2160 loops=3) + Filter: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval))) + Rows Removed by Filter: 2438 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=5 loops=3) + Filter: ((_ts_meta_max_1 >= g."time") AND (_ts_meta_min_1 < (g."time" + '@ 1 day'::interval))) + Rows Removed by Filter: 7 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o_9 (actual rows=720 loops=3) + Filter: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval))) + Rows Removed by Filter: 813 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=2 loops=3) + Filter: ((_ts_meta_max_1 >= g."time") AND (_ts_meta_min_1 < (g."time" + '@ 1 day'::interval))) + Rows Removed by Filter: 2 +(59 rows) + +-- test LATERAL with correlated query +-- only 2nd chunk should be executed +:PREFIX +SELECT g.time, + l.time +FROM generate_series('2000-01-10'::timestamptz, '2000-01-11', '1d') AS g (time) + LEFT OUTER JOIN LATERAL ( + SELECT * + FROM :TEST_TABLE o + WHERE o.time >= g.time + AND o.time < g.time + '1d'::interval + ORDER BY time + LIMIT 1) l ON TRUE; +QUERY PLAN + Nested Loop Left Join (actual rows=2 loops=1) + -> Function Scan on generate_series g (actual rows=2 loops=1) + -> Limit (actual rows=1 loops=2) + -> Sort (actual rows=1 loops=2) + Sort Key: o."time" + Sort Method: top-N heapsort + -> Result (actual rows=3600 loops=2) + -> Custom Scan (ChunkAppend) on metrics_space_compressed o (actual rows=3600 loops=2) + -> Merge Append (actual rows=0 loops=2) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o_1 (actual rows=0 loops=2) + Filter: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval))) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=0 loops=2) + Filter: ((_ts_meta_max_1 >= g."time") AND (_ts_meta_min_1 < (g."time" + '@ 1 day'::interval))) + Rows Removed by Filter: 4 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o_2 (actual rows=0 loops=2) + Filter: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval))) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=0 loops=2) + Filter: ((_ts_meta_max_1 >= g."time") AND (_ts_meta_min_1 < (g."time" + '@ 1 day'::interval))) + Rows Removed by Filter: 12 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o_3 (actual rows=0 loops=2) + Filter: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval))) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=0 loops=2) + Filter: ((_ts_meta_max_1 >= g."time") AND (_ts_meta_min_1 < (g."time" + '@ 1 day'::interval))) + Rows Removed by Filter: 4 + -> Merge Append (actual rows=3600 loops=2) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o_4 (actual rows=720 loops=2) + Filter: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval))) + Rows Removed by Filter: 780 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=2 loops=2) + Filter: ((_ts_meta_max_1 >= g."time") AND (_ts_meta_min_1 < (g."time" + '@ 1 day'::interval))) + Rows Removed by Filter: 4 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o_5 (actual rows=2160 loops=2) + Filter: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval))) + Rows Removed by Filter: 2340 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=2) + Filter: ((_ts_meta_max_1 >= g."time") AND (_ts_meta_min_1 < (g."time" + '@ 1 day'::interval))) + Rows Removed by Filter: 14 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o_6 (actual rows=720 loops=2) + Filter: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval))) + Rows Removed by Filter: 780 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=2 loops=2) + Filter: ((_ts_meta_max_1 >= g."time") AND (_ts_meta_min_1 < (g."time" + '@ 1 day'::interval))) + Rows Removed by Filter: 4 + -> Merge Append (actual rows=0 loops=2) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o_7 (actual rows=0 loops=2) + Filter: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval))) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=0 loops=2) + Filter: ((_ts_meta_max_1 >= g."time") AND (_ts_meta_min_1 < (g."time" + '@ 1 day'::interval))) + Rows Removed by Filter: 6 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o_8 (actual rows=0 loops=2) + Filter: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval))) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=0 loops=2) + Filter: ((_ts_meta_max_1 >= g."time") AND (_ts_meta_min_1 < (g."time" + '@ 1 day'::interval))) + Rows Removed by Filter: 18 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o_9 (actual rows=0 loops=2) + Filter: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval))) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=0 loops=2) + Filter: ((_ts_meta_max_1 >= g."time") AND (_ts_meta_min_1 < (g."time" + '@ 1 day'::interval))) + Rows Removed by Filter: 6 +(59 rows) + +-- test startup and runtime exclusion together +:PREFIX +SELECT g.time, + l.time +FROM generate_series('2000-01-01'::timestamptz, '2000-01-03', '1d') AS g (time) + LEFT OUTER JOIN LATERAL ( + SELECT * + FROM :TEST_TABLE o + WHERE o.time >= g.time + AND o.time < g.time + '1d'::interval + AND o.time < now() + ORDER BY time DESC + LIMIT 1) l ON TRUE; +QUERY PLAN + Nested Loop Left Join (actual rows=3 loops=1) + -> Function Scan on generate_series g (actual rows=3 loops=1) + -> Limit (actual rows=1 loops=3) + -> Sort (actual rows=1 loops=3) + Sort Key: o."time" DESC + Sort Method: top-N heapsort + -> Result (actual rows=3600 loops=3) + -> Custom Scan (ChunkAppend) on metrics_space_compressed o (actual rows=3600 loops=3) + -> Merge Append (actual rows=0 loops=3) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o_1 (actual rows=0 loops=3) + Filter: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval)) AND ("time" < now())) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=0 loops=3) + Filter: ((_ts_meta_max_1 >= g."time") AND (_ts_meta_min_1 < (g."time" + '@ 1 day'::interval))) + Rows Removed by Filter: 6 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o_2 (actual rows=0 loops=3) + Filter: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval)) AND ("time" < now())) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=0 loops=3) + Filter: ((_ts_meta_max_1 >= g."time") AND (_ts_meta_min_1 < (g."time" + '@ 1 day'::interval))) + Rows Removed by Filter: 18 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o_3 (actual rows=0 loops=3) + Filter: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval)) AND ("time" < now())) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=0 loops=3) + Filter: ((_ts_meta_max_1 >= g."time") AND (_ts_meta_min_1 < (g."time" + '@ 1 day'::interval))) + Rows Removed by Filter: 6 + -> Merge Append (actual rows=0 loops=3) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o_4 (actual rows=0 loops=3) + Filter: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval)) AND ("time" < now())) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=0 loops=3) + Filter: ((_ts_meta_max_1 >= g."time") AND (_ts_meta_min_1 < (g."time" + '@ 1 day'::interval))) + Rows Removed by Filter: 6 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o_5 (actual rows=0 loops=3) + Filter: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval)) AND ("time" < now())) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=0 loops=3) + Filter: ((_ts_meta_max_1 >= g."time") AND (_ts_meta_min_1 < (g."time" + '@ 1 day'::interval))) + Rows Removed by Filter: 18 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o_6 (actual rows=0 loops=3) + Filter: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval)) AND ("time" < now())) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=0 loops=3) + Filter: ((_ts_meta_max_1 >= g."time") AND (_ts_meta_min_1 < (g."time" + '@ 1 day'::interval))) + Rows Removed by Filter: 6 + -> Merge Append (actual rows=3600 loops=3) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o_7 (actual rows=720 loops=3) + Filter: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval)) AND ("time" < now())) + Rows Removed by Filter: 813 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=2 loops=3) + Filter: ((_ts_meta_max_1 >= g."time") AND (_ts_meta_min_1 < (g."time" + '@ 1 day'::interval))) + Rows Removed by Filter: 2 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o_8 (actual rows=2160 loops=3) + Filter: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval)) AND ("time" < now())) + Rows Removed by Filter: 2438 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=5 loops=3) + Filter: ((_ts_meta_max_1 >= g."time") AND (_ts_meta_min_1 < (g."time" + '@ 1 day'::interval))) + Rows Removed by Filter: 7 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o_9 (actual rows=720 loops=3) + Filter: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval)) AND ("time" < now())) + Rows Removed by Filter: 813 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=2 loops=3) + Filter: ((_ts_meta_max_1 >= g."time") AND (_ts_meta_min_1 < (g."time" + '@ 1 day'::interval))) + Rows Removed by Filter: 2 +(59 rows) + +-- test startup and runtime exclusion together +-- all chunks should be filtered +:PREFIX +SELECT g.time, + l.time +FROM generate_series('2000-01-01'::timestamptz, '2000-01-03', '1d') AS g (time) + LEFT OUTER JOIN LATERAL ( + SELECT * + FROM :TEST_TABLE o + WHERE o.time >= g.time + AND o.time < g.time + '1d'::interval + AND o.time > now() + ORDER BY time DESC + LIMIT 1) l ON TRUE; +QUERY PLAN + Nested Loop Left Join (actual rows=3 loops=1) + -> Function Scan on generate_series g (actual rows=3 loops=1) + -> Limit (actual rows=0 loops=3) + -> Sort (actual rows=0 loops=3) + Sort Key: o."time" DESC + Sort Method: quicksort + -> Result (actual rows=0 loops=3) + -> Custom Scan (ChunkAppend) on metrics_space_compressed o (actual rows=0 loops=3) + -> Merge Append (actual rows=0 loops=3) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o_1 (actual rows=0 loops=3) + Filter: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval)) AND ("time" > now())) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=0 loops=3) + Filter: ((_ts_meta_max_1 >= g."time") AND (_ts_meta_min_1 < (g."time" + '@ 1 day'::interval))) + Rows Removed by Filter: 6 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o_2 (actual rows=0 loops=3) + Filter: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval)) AND ("time" > now())) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=0 loops=3) + Filter: ((_ts_meta_max_1 >= g."time") AND (_ts_meta_min_1 < (g."time" + '@ 1 day'::interval))) + Rows Removed by Filter: 18 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o_3 (actual rows=0 loops=3) + Filter: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval)) AND ("time" > now())) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=0 loops=3) + Filter: ((_ts_meta_max_1 >= g."time") AND (_ts_meta_min_1 < (g."time" + '@ 1 day'::interval))) + Rows Removed by Filter: 6 + -> Merge Append (actual rows=0 loops=3) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o_4 (actual rows=0 loops=3) + Filter: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval)) AND ("time" > now())) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=0 loops=3) + Filter: ((_ts_meta_max_1 >= g."time") AND (_ts_meta_min_1 < (g."time" + '@ 1 day'::interval))) + Rows Removed by Filter: 6 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o_5 (actual rows=0 loops=3) + Filter: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval)) AND ("time" > now())) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=0 loops=3) + Filter: ((_ts_meta_max_1 >= g."time") AND (_ts_meta_min_1 < (g."time" + '@ 1 day'::interval))) + Rows Removed by Filter: 18 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o_6 (actual rows=0 loops=3) + Filter: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval)) AND ("time" > now())) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=0 loops=3) + Filter: ((_ts_meta_max_1 >= g."time") AND (_ts_meta_min_1 < (g."time" + '@ 1 day'::interval))) + Rows Removed by Filter: 6 + -> Merge Append (actual rows=0 loops=3) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o_7 (actual rows=0 loops=3) + Filter: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval)) AND ("time" > now())) + Rows Removed by Filter: 1533 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=2 loops=3) + Filter: ((_ts_meta_max_1 >= g."time") AND (_ts_meta_min_1 < (g."time" + '@ 1 day'::interval))) + Rows Removed by Filter: 2 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o_8 (actual rows=0 loops=3) + Filter: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval)) AND ("time" > now())) + Rows Removed by Filter: 4598 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=5 loops=3) + Filter: ((_ts_meta_max_1 >= g."time") AND (_ts_meta_min_1 < (g."time" + '@ 1 day'::interval))) + Rows Removed by Filter: 7 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o_9 (actual rows=0 loops=3) + Filter: (("time" >= g."time") AND ("time" < (g."time" + '@ 1 day'::interval)) AND ("time" > now())) + Rows Removed by Filter: 1533 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=2 loops=3) + Filter: ((_ts_meta_max_1 >= g."time") AND (_ts_meta_min_1 < (g."time" + '@ 1 day'::interval))) + Rows Removed by Filter: 2 +(59 rows) + +-- test JOIN +-- no exclusion on joined table because quals are not propagated yet +-- With PG 14 on i368, this query uses a nested loop join. Disable the nested loop join to get the same query plan in all tests +SET enable_nestloop TO off; +:PREFIX +SELECT o1.time, + o2.time +FROM :TEST_TABLE o1 + INNER JOIN :TEST_TABLE o2 ON o1.time = o2.time +WHERE o1.time < '2000-02-01' + AND o1.device_id = 1 + AND o2.device_id = 2 +ORDER BY o1.time; +QUERY PLAN + Merge Join (actual rows=13674 loops=1) + Merge Cond: (o1."time" = o2."time") + -> Custom Scan (ChunkAppend) on metrics_space_compressed o1 (actual rows=13674 loops=1) + Order: o1."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_1 (actual rows=3598 loops=1) + Vectorized Filter: ("time" < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (actual rows=4 loops=1) + Index Cond: (device_id = 1) + Filter: (_ts_meta_min_1 < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_2 (actual rows=5038 loops=1) + Vectorized Filter: ("time" < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Index Cond: (device_id = 1) + Filter: (_ts_meta_min_1 < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_3 (actual rows=5038 loops=1) + Vectorized Filter: ("time" < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Index Cond: (device_id = 1) + Filter: (_ts_meta_min_1 < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone) + -> Materialize (actual rows=13674 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space_compressed o2 (actual rows=13674 loops=1) + Order: o2."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_1 (actual rows=3598 loops=1) + Vectorized Filter: ("time" < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (actual rows=4 loops=1) + Index Cond: (device_id = 2) + Filter: (_ts_meta_min_1 < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_2 (actual rows=5038 loops=1) + Vectorized Filter: ("time" < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Index Cond: (device_id = 2) + Filter: (_ts_meta_min_1 < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_3 (actual rows=5038 loops=1) + Vectorized Filter: ("time" < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Index Cond: (device_id = 2) + Filter: (_ts_meta_min_1 < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone) +(37 rows) + +RESET enable_nestloop; +-- test JOIN +-- last chunk of o2 should not be executed +:PREFIX +SELECT o1.time, + o2.time +FROM :TEST_TABLE o1 + INNER JOIN ( + SELECT * + FROM :TEST_TABLE o2 + ORDER BY time) o2 ON o1.time = o2.time +WHERE o1.time < '2000-01-08' +ORDER BY o1.time +LIMIT 10; +QUERY PLAN + Limit (actual rows=10 loops=1) + -> Merge Join (actual rows=10 loops=1) + Merge Cond: (o1_1."time" = o2_1."time") + -> Sort (actual rows=2 loops=1) + Sort Key: o1_1."time" + Sort Method: quicksort + -> Append (actual rows=26390 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_1 (actual rows=3598 loops=1) + Vectorized Filter: ("time" < 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + Filter: (_ts_meta_min_1 < 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_2 (actual rows=10794 loops=1) + Vectorized Filter: ("time" < 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + Filter: (_ts_meta_min_1 < 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_3 (actual rows=3598 loops=1) + Vectorized Filter: ("time" < 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + Filter: (_ts_meta_min_1 < 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_4 (actual rows=1680 loops=1) + Vectorized Filter: ("time" < 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 358 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=3 loops=1) + Filter: (_ts_meta_min_1 < 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 3 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_5 (actual rows=5040 loops=1) + Vectorized Filter: ("time" < 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 1074 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=9 loops=1) + Filter: (_ts_meta_min_1 < 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 9 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_6 (actual rows=1680 loops=1) + Vectorized Filter: ("time" < 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 358 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=3 loops=1) + Filter: (_ts_meta_min_1 < 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 3 + -> Materialize (actual rows=10 loops=1) + -> Sort (actual rows=6 loops=1) + Sort Key: o2_1."time" + Sort Method: quicksort + -> Result (actual rows=68370 loops=1) + -> Append (actual rows=68370 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_1 (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=4 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_2 (actual rows=10794 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=12 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_3 (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=4 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_4 (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_5 (actual rows=15114 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=18 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_6 (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_7 (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_8 (actual rows=15114 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_9 (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) +(61 rows) + +-- test join against max query +-- not ChunkAppend so no chunk exclusion +SET enable_hashjoin = FALSE; +SET enable_nestloop = FALSE; +SET enable_hashagg = FALSE; +:PREFIX +SELECT o1.time, + o2.* +FROM :TEST_TABLE o1 + INNER JOIN ( + SELECT max(time) AS max_time + FROM :TEST_TABLE) o2 ON o1.time = o2.max_time +WHERE o1.device_id = 1 +ORDER BY time; +QUERY PLAN + Merge Join (actual rows=1 loops=1) + Merge Cond: (o1."time" = (max(_hyper_X_X_chunk."time"))) + -> Custom Scan (ChunkAppend) on metrics_space_compressed o1 (actual rows=13674 loops=1) + Order: o1."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_1 (actual rows=3598 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (actual rows=4 loops=1) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_2 (actual rows=5038 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_3 (actual rows=5038 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Index Cond: (device_id = 1) + -> Sort (actual rows=1 loops=1) + Sort Key: (max(_hyper_X_X_chunk."time")) + Sort Method: quicksort + -> Finalize Aggregate (actual rows=1 loops=1) + -> Append (actual rows=9 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=4 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10794 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=6 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=6 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Partial Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) +(45 rows) + +RESET enable_hashjoin; +RESET enable_nestloop; +RESET enable_hashagg; +SET enable_seqscan TO false; +-- test JOIN on time column +-- should use 2 ChunkAppend +:PREFIX +SELECT o1.time +FROM :TEST_TABLE o1 + INNER JOIN :TEST_TABLE o2 ON o1.time = o2.time +WHERE o1.device_id = 1 + AND o2.device_id = 2 +ORDER BY o1.time +LIMIT 100; +QUERY PLAN + Limit (actual rows=100 loops=1) + -> Merge Join (actual rows=100 loops=1) + Merge Cond: (o1."time" = o2."time") + -> Custom Scan (ChunkAppend) on metrics_space_compressed o1 (actual rows=100 loops=1) + Order: o1."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_1 (actual rows=100 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_2 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_3 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) + -> Materialize (actual rows=100 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space_compressed o2 (actual rows=100 loops=1) + Order: o2."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_1 (actual rows=100 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = 2) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_2 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 2) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_3 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 2) +(26 rows) + +-- test JOIN on time column with USING +-- should use 2 ChunkAppend +:PREFIX +SELECT o1.time +FROM :TEST_TABLE o1 + INNER JOIN :TEST_TABLE o2 USING (time) +WHERE o1.device_id = 1 + AND o2.device_id = 2 +ORDER BY o1.time +LIMIT 100; +QUERY PLAN + Limit (actual rows=100 loops=1) + -> Merge Join (actual rows=100 loops=1) + Merge Cond: (o1."time" = o2."time") + -> Custom Scan (ChunkAppend) on metrics_space_compressed o1 (actual rows=100 loops=1) + Order: o1."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_1 (actual rows=100 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_2 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_3 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) + -> Materialize (actual rows=100 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space_compressed o2 (actual rows=100 loops=1) + Order: o2."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_1 (actual rows=100 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = 2) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_2 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 2) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_3 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 2) +(26 rows) + +-- test NATURAL JOIN on time column +-- should use 2 ChunkAppend +:PREFIX +SELECT o1.time +FROM :TEST_TABLE o1 + NATURAL INNER JOIN :TEST_TABLE o2 +WHERE o1.device_id = 1 + AND o2.device_id = 2 +ORDER BY o1.time +LIMIT 100; +QUERY PLAN + Limit (actual rows=0 loops=1) + -> Sort (actual rows=0 loops=1) + Sort Key: o1."time" + Sort Method: quicksort + -> Result (actual rows=0 loops=1) + One-Time Filter: false +(6 rows) + +-- test LEFT JOIN on time column +-- should use 2 ChunkAppend +:PREFIX +SELECT o1.time +FROM :TEST_TABLE o1 + LEFT JOIN :TEST_TABLE o2 ON o1.time = o2.time +WHERE o1.device_id = 1 + AND o2.device_id = 2 +ORDER BY o1.time +LIMIT 100; +QUERY PLAN + Limit (actual rows=100 loops=1) + -> Merge Join (actual rows=100 loops=1) + Merge Cond: (o1."time" = o2."time") + -> Custom Scan (ChunkAppend) on metrics_space_compressed o1 (actual rows=100 loops=1) + Order: o1."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_1 (actual rows=100 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_2 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_3 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) + -> Materialize (actual rows=100 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space_compressed o2 (actual rows=100 loops=1) + Order: o2."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_1 (actual rows=100 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = 2) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_2 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 2) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_3 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 2) +(26 rows) + +-- test RIGHT JOIN on time column +-- should use 2 ChunkAppend +:PREFIX +SELECT o1.time +FROM :TEST_TABLE o1 + RIGHT JOIN :TEST_TABLE o2 ON o1.time = o2.time +WHERE o1.device_id = 1 + AND o2.device_id = 2 +ORDER BY o2.time +LIMIT 100; +QUERY PLAN + Limit (actual rows=100 loops=1) + -> Merge Join (actual rows=100 loops=1) + Merge Cond: (o1."time" = o2."time") + -> Custom Scan (ChunkAppend) on metrics_space_compressed o1 (actual rows=100 loops=1) + Order: o1."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_1 (actual rows=100 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_2 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_3 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) + -> Materialize (actual rows=100 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space_compressed o2 (actual rows=100 loops=1) + Order: o2."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_1 (actual rows=100 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = 2) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_2 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 2) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_3 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 2) +(26 rows) + +-- test JOIN on time column with ON clause expression order switched +-- should use 2 ChunkAppend +:PREFIX +SELECT o1.time +FROM :TEST_TABLE o1 + INNER JOIN :TEST_TABLE o2 ON o2.time = o1.time +WHERE o1.device_id = 1 + AND o2.device_id = 2 +ORDER BY o1.time +LIMIT 100; +QUERY PLAN + Limit (actual rows=100 loops=1) + -> Merge Join (actual rows=100 loops=1) + Merge Cond: (o1."time" = o2."time") + -> Custom Scan (ChunkAppend) on metrics_space_compressed o1 (actual rows=100 loops=1) + Order: o1."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_1 (actual rows=100 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_2 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_3 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) + -> Materialize (actual rows=100 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space_compressed o2 (actual rows=100 loops=1) + Order: o2."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_1 (actual rows=100 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = 2) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_2 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 2) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_3 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 2) +(26 rows) + +-- test JOIN on time column with equality condition in WHERE clause +-- should use 2 ChunkAppend +:PREFIX +SELECT o1.time +FROM :TEST_TABLE o1 + INNER JOIN :TEST_TABLE o2 ON TRUE +WHERE o1.time = o2.time + AND o1.device_id = 1 + AND o2.device_id = 2 +ORDER BY o1.time +LIMIT 100; +QUERY PLAN + Limit (actual rows=100 loops=1) + -> Merge Join (actual rows=100 loops=1) + Merge Cond: (o1."time" = o2."time") + -> Custom Scan (ChunkAppend) on metrics_space_compressed o1 (actual rows=100 loops=1) + Order: o1."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_1 (actual rows=100 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_2 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_3 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) + -> Materialize (actual rows=100 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space_compressed o2 (actual rows=100 loops=1) + Order: o2."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_1 (actual rows=100 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = 2) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_2 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 2) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_3 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 2) +(26 rows) + +-- test JOIN on time column with ORDER BY 2nd hypertable +-- should use 2 ChunkAppend +:PREFIX +SELECT o1.time +FROM :TEST_TABLE o1 + INNER JOIN :TEST_TABLE o2 ON o1.time = o2.time +WHERE o1.device_id = 1 + AND o2.device_id = 2 +ORDER BY o2.time +LIMIT 100; +QUERY PLAN + Limit (actual rows=100 loops=1) + -> Merge Join (actual rows=100 loops=1) + Merge Cond: (o1."time" = o2."time") + -> Custom Scan (ChunkAppend) on metrics_space_compressed o1 (actual rows=100 loops=1) + Order: o1."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_1 (actual rows=100 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_2 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_3 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) + -> Materialize (actual rows=100 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space_compressed o2 (actual rows=100 loops=1) + Order: o2."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_1 (actual rows=100 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = 2) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_2 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 2) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_3 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 2) +(26 rows) + +-- test JOIN on time column and device_id +-- should use 2 ChunkAppend +:PREFIX +SELECT o1.time +FROM :TEST_TABLE o1 + INNER JOIN :TEST_TABLE o2 ON o1.device_id = o2.device_id + AND o1.time = o2.time + ORDER BY o1.time + LIMIT 100; +QUERY PLAN + Limit (actual rows=100 loops=1) + -> Merge Join (actual rows=100 loops=1) + Merge Cond: ((o1_1."time" = o2_1."time") AND (o1_1.device_id = o2_1.device_id)) + -> Sort (actual rows=100 loops=1) + Sort Key: o1_1."time", o1_1.device_id + Sort Method: quicksort + -> Append (actual rows=68370 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_1 (actual rows=3598 loops=1) + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_2 (actual rows=10794 loops=1) + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (actual rows=12 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_3 (actual rows=3598 loops=1) + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_4 (actual rows=5038 loops=1) + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_5 (actual rows=15114 loops=1) + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_6 (actual rows=5038 loops=1) + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_7 (actual rows=5038 loops=1) + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_8 (actual rows=15114 loops=1) + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (actual rows=18 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_9 (actual rows=5038 loops=1) + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (actual rows=6 loops=1) + -> Sort (actual rows=100 loops=1) + Sort Key: o2_1."time", o2_1.device_id + Sort Method: quicksort + -> Append (actual rows=68370 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_1 (actual rows=3598 loops=1) + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=4 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_2 (actual rows=10794 loops=1) + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=12 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_3 (actual rows=3598 loops=1) + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=4 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_4 (actual rows=5038 loops=1) + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_5 (actual rows=15114 loops=1) + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=18 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_6 (actual rows=5038 loops=1) + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_7 (actual rows=5038 loops=1) + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_8 (actual rows=15114 loops=1) + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=18 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_9 (actual rows=5038 loops=1) + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=6 loops=1) +(47 rows) + +-- test JOIN on device_id +-- should not use ordered append for 2nd hypertable +:PREFIX +SELECT o1.time +FROM :TEST_TABLE o1 + INNER JOIN :TEST_TABLE o2 ON o1.device_id = o2.device_id +WHERE o1.device_id = 1 +ORDER BY o1.time +LIMIT 100; +QUERY PLAN + Limit (actual rows=100 loops=1) + -> Nested Loop (actual rows=100 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space_compressed o1 (actual rows=1 loops=1) + Order: o1."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_1 (actual rows=1 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_2 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_3 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) + -> Materialize (actual rows=100 loops=1) + -> Append (actual rows=100 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_1 (actual rows=100 loops=1) + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=1 loops=1) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_2 (never executed) + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (never executed) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_3 (never executed) + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (never executed) + Index Cond: (device_id = 1) +(24 rows) + +-- test JOIN on time column with implicit join +-- should use 2 ChunkAppend +:PREFIX +SELECT o1.time +FROM :TEST_TABLE o1, + :TEST_TABLE o2 +WHERE o1.time = o2.time + AND o1.device_id = 1 + AND o2.device_id = 2 +ORDER BY o1.time +LIMIT 100; +QUERY PLAN + Limit (actual rows=100 loops=1) + -> Merge Join (actual rows=100 loops=1) + Merge Cond: (o1."time" = o2."time") + -> Custom Scan (ChunkAppend) on metrics_space_compressed o1 (actual rows=100 loops=1) + Order: o1."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_1 (actual rows=100 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_2 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_3 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) + -> Materialize (actual rows=100 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space_compressed o2 (actual rows=100 loops=1) + Order: o2."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_1 (actual rows=100 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = 2) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_2 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 2) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_3 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 2) +(26 rows) + +-- test JOIN on time column with 3 hypertables +-- should use 3 ChunkAppend +:PREFIX +SELECT o1.time +FROM :TEST_TABLE o1 + INNER JOIN :TEST_TABLE o2 ON o1.time = o2.time + INNER JOIN :TEST_TABLE o3 ON o1.time = o3.time +WHERE o1.device_id = 1 + AND o2.device_id = 2 + AND o3.device_id = 3 +ORDER BY o1.time +LIMIT 100; +QUERY PLAN + Limit (actual rows=100 loops=1) + -> Merge Join (actual rows=100 loops=1) + Merge Cond: (o3."time" = o1."time") + -> Custom Scan (ChunkAppend) on metrics_space_compressed o3 (actual rows=100 loops=1) + Order: o3."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o3_1 (actual rows=100 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = 3) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o3_2 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 3) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o3_3 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 3) + -> Materialize (actual rows=100 loops=1) + -> Merge Join (actual rows=100 loops=1) + Merge Cond: (o1."time" = o2."time") + -> Custom Scan (ChunkAppend) on metrics_space_compressed o1 (actual rows=100 loops=1) + Order: o1."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_1 (actual rows=100 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_2 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o1_3 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 1) + -> Materialize (actual rows=100 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space_compressed o2 (actual rows=100 loops=1) + Order: o2."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_1 (actual rows=100 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = 2) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_2 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 2) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk o2_3 (never executed) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_6_device_id__t on compress_hyper_X_X_chunk (never executed) + Index Cond: (device_id = 2) +(40 rows) + +RESET enable_seqscan; +-- Disable plain/sorted aggregation to get a deterministic test output +SET timescaledb.enable_chunkwise_aggregation = OFF; +-- get results for all the queries +-- run queries on uncompressed hypertable and store result +\set PREFIX '' +\set PREFIX_VERBOSE '' +\set ECHO none + table_name + i4418_1 +(1 row) + + table_name + i4418_2 +(1 row) + +QUERY PLAN + Sort (actual rows=20 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, tbl1_1."time")) + Sort Method: quicksort + -> HashAggregate (actual rows=20 loops=1) + Group Key: time_bucket('@ 1 day'::interval, tbl1_1."time") + Batches: 1 + -> Merge Join (actual rows=9121 loops=1) + Merge Cond: ((tbl1_1.device = tbl2_1.device) AND (tbl1_1."time" = tbl2_1."time")) + -> Sort (actual rows=9121 loops=1) + Sort Key: tbl1_1.device, tbl1_1."time" + Sort Method: quicksort + -> Append (actual rows=9121 loops=1) + -> Seq Scan on _hyper_X_X_chunk tbl1_1 (actual rows=1300 loops=1) + -> Seq Scan on _hyper_X_X_chunk tbl1_2 (actual rows=3360 loops=1) + -> Seq Scan on _hyper_X_X_chunk tbl1_3 (actual rows=3360 loops=1) + -> Seq Scan on _hyper_X_X_chunk tbl1_4 (actual rows=1101 loops=1) + -> Sort (actual rows=9121 loops=1) + Sort Key: tbl2_1.device, tbl2_1."time" + Sort Method: quicksort + -> Append (actual rows=9121 loops=1) + -> Seq Scan on _hyper_X_X_chunk tbl2_1 (actual rows=1300 loops=1) + -> Seq Scan on _hyper_X_X_chunk tbl2_2 (actual rows=3360 loops=1) + -> Seq Scan on _hyper_X_X_chunk tbl2_3 (actual rows=3360 loops=1) + -> Seq Scan on _hyper_X_X_chunk tbl2_4 (actual rows=1101 loops=1) +(24 rows) + diff --git a/tsl/test/shared/expected/space_constraint-16.out b/tsl/test/shared/expected/space_constraint-16.out new file mode 100644 index 00000000000..36adc1b08b2 --- /dev/null +++ b/tsl/test/shared/expected/space_constraint-16.out @@ -0,0 +1,888 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +SET timescaledb.enable_chunk_append TO false; +SET timescaledb.enable_constraint_aware_append TO false; +SET timescaledb.current_timestamp_mock TO '1990-01-01'; +\set PREFIX 'EXPLAIN (COSTS OFF, SUMMARY OFF, TIMING OFF)' +-- test SELECT FOR UPDATE +:PREFIX SELECT FROM metrics_space WHERE device_id = 1 FOR UPDATE; +QUERY PLAN + LockRows + -> Append + -> Seq Scan on metrics_space metrics_space_1 + Filter: (device_id = 1) + -> Seq Scan on _hyper_X_X_chunk metrics_space_2 + Filter: (device_id = 1) + -> Seq Scan on _hyper_X_X_chunk metrics_space_3 + Filter: (device_id = 1) + -> Seq Scan on _hyper_X_X_chunk metrics_space_4 + Filter: (device_id = 1) +(10 rows) + +:PREFIX SELECT FROM metrics_space WHERE device_id IN (1) FOR UPDATE; +QUERY PLAN + LockRows + -> Append + -> Seq Scan on metrics_space metrics_space_1 + Filter: (device_id = 1) + -> Seq Scan on _hyper_X_X_chunk metrics_space_2 + Filter: (device_id = 1) + -> Seq Scan on _hyper_X_X_chunk metrics_space_3 + Filter: (device_id = 1) + -> Seq Scan on _hyper_X_X_chunk metrics_space_4 + Filter: (device_id = 1) +(10 rows) + +:PREFIX SELECT FROM metrics_space WHERE device_id IN (1,3) FOR UPDATE; +QUERY PLAN + LockRows + -> Append + -> Seq Scan on metrics_space metrics_space_1 + Filter: (device_id = ANY ('{1,3}'::integer[])) + -> Seq Scan on _hyper_X_X_chunk metrics_space_2 + Filter: (device_id = ANY ('{1,3}'::integer[])) + -> Seq Scan on _hyper_X_X_chunk metrics_space_3 + Filter: (device_id = ANY ('{1,3}'::integer[])) + -> Seq Scan on _hyper_X_X_chunk metrics_space_4 + Filter: (device_id = ANY ('{1,3}'::integer[])) + -> Seq Scan on _hyper_X_X_chunk metrics_space_5 + Filter: (device_id = ANY ('{1,3}'::integer[])) + -> Seq Scan on _hyper_X_X_chunk metrics_space_6 + Filter: (device_id = ANY ('{1,3}'::integer[])) + -> Seq Scan on _hyper_X_X_chunk metrics_space_7 + Filter: (device_id = ANY ('{1,3}'::integer[])) +(16 rows) + +-- check mismatching datatypes +:PREFIX SELECT FROM metrics_space WHERE device_id = smallint '2' FOR UPDATE; +QUERY PLAN + LockRows + -> Append + -> Seq Scan on metrics_space metrics_space_1 + Filter: (device_id = '2'::smallint) + -> Index Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk metrics_space_2 + Index Cond: (device_id = '2'::smallint) + -> Index Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk metrics_space_3 + Index Cond: (device_id = '2'::smallint) + -> Index Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk metrics_space_4 + Index Cond: (device_id = '2'::smallint) +(10 rows) + +:PREFIX SELECT FROM metrics_space WHERE device_id = int '2' FOR UPDATE; +QUERY PLAN + LockRows + -> Append + -> Seq Scan on metrics_space metrics_space_1 + Filter: (device_id = 2) + -> Index Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk metrics_space_2 + Index Cond: (device_id = 2) + -> Index Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk metrics_space_3 + Index Cond: (device_id = 2) + -> Index Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk metrics_space_4 + Index Cond: (device_id = 2) +(10 rows) + +:PREFIX SELECT FROM metrics_space WHERE device_id = bigint '3' FOR UPDATE; +QUERY PLAN + LockRows + -> Append + -> Seq Scan on metrics_space metrics_space_1 + Filter: (device_id = '3'::bigint) + -> Seq Scan on _hyper_X_X_chunk metrics_space_2 + Filter: (device_id = '3'::bigint) + -> Seq Scan on _hyper_X_X_chunk metrics_space_3 + Filter: (device_id = '3'::bigint) + -> Seq Scan on _hyper_X_X_chunk metrics_space_4 + Filter: (device_id = '3'::bigint) +(10 rows) + +:PREFIX SELECT FROM metrics_space WHERE device_id IN (smallint '1', smallint '1') FOR UPDATE; +QUERY PLAN + LockRows + -> Append + -> Seq Scan on metrics_space metrics_space_1 + Filter: (device_id = ANY ('{1,1}'::integer[])) + -> Seq Scan on _hyper_X_X_chunk metrics_space_2 + Filter: (device_id = ANY ('{1,1}'::integer[])) + -> Seq Scan on _hyper_X_X_chunk metrics_space_3 + Filter: (device_id = ANY ('{1,1}'::integer[])) + -> Seq Scan on _hyper_X_X_chunk metrics_space_4 + Filter: (device_id = ANY ('{1,1}'::integer[])) +(10 rows) + +:PREFIX SELECT FROM metrics_space WHERE device_id IN (int '1', int '1') FOR UPDATE; +QUERY PLAN + LockRows + -> Append + -> Seq Scan on metrics_space metrics_space_1 + Filter: (device_id = ANY ('{1,1}'::integer[])) + -> Seq Scan on _hyper_X_X_chunk metrics_space_2 + Filter: (device_id = ANY ('{1,1}'::integer[])) + -> Seq Scan on _hyper_X_X_chunk metrics_space_3 + Filter: (device_id = ANY ('{1,1}'::integer[])) + -> Seq Scan on _hyper_X_X_chunk metrics_space_4 + Filter: (device_id = ANY ('{1,1}'::integer[])) +(10 rows) + +:PREFIX SELECT FROM metrics_space WHERE device_id IN (bigint '1', bigint '1') FOR UPDATE; +QUERY PLAN + LockRows + -> Append + -> Seq Scan on metrics_space metrics_space_1 + Filter: (device_id = ANY ('{1,1}'::bigint[])) + -> Seq Scan on _hyper_X_X_chunk metrics_space_2 + Filter: (device_id = ANY ('{1,1}'::bigint[])) + -> Seq Scan on _hyper_X_X_chunk metrics_space_3 + Filter: (device_id = ANY ('{1,1}'::bigint[])) + -> Seq Scan on _hyper_X_X_chunk metrics_space_4 + Filter: (device_id = ANY ('{1,1}'::bigint[])) +(10 rows) + +-- test valid variants we are optimizing +:PREFIX DELETE FROM metrics_space WHERE device_id = 1; +QUERY PLAN + Custom Scan (HypertableModify) + -> Delete on metrics_space + Delete on _hyper_X_X_chunk metrics_space_1 + Delete on _hyper_X_X_chunk metrics_space_2 + Delete on _hyper_X_X_chunk metrics_space_3 + -> Append + -> Seq Scan on _hyper_X_X_chunk metrics_space_1 + Filter: (device_id = 1) + -> Seq Scan on _hyper_X_X_chunk metrics_space_2 + Filter: (device_id = 1) + -> Seq Scan on _hyper_X_X_chunk metrics_space_3 + Filter: (device_id = 1) +(12 rows) + +:PREFIX DELETE FROM metrics_space WHERE device_id IN (1); +QUERY PLAN + Custom Scan (HypertableModify) + -> Delete on metrics_space + Delete on _hyper_X_X_chunk metrics_space_1 + Delete on _hyper_X_X_chunk metrics_space_2 + Delete on _hyper_X_X_chunk metrics_space_3 + -> Append + -> Seq Scan on _hyper_X_X_chunk metrics_space_1 + Filter: (device_id = 1) + -> Seq Scan on _hyper_X_X_chunk metrics_space_2 + Filter: (device_id = 1) + -> Seq Scan on _hyper_X_X_chunk metrics_space_3 + Filter: (device_id = 1) +(12 rows) + +:PREFIX DELETE FROM metrics_space WHERE device_id IN (1,1); +QUERY PLAN + Custom Scan (HypertableModify) + -> Delete on metrics_space + Delete on _hyper_X_X_chunk metrics_space_1 + Delete on _hyper_X_X_chunk metrics_space_2 + Delete on _hyper_X_X_chunk metrics_space_3 + -> Append + -> Seq Scan on _hyper_X_X_chunk metrics_space_1 + Filter: (device_id = ANY ('{1,1}'::integer[])) + -> Seq Scan on _hyper_X_X_chunk metrics_space_2 + Filter: (device_id = ANY ('{1,1}'::integer[])) + -> Seq Scan on _hyper_X_X_chunk metrics_space_3 + Filter: (device_id = ANY ('{1,1}'::integer[])) +(12 rows) + +:PREFIX DELETE FROM metrics_space WHERE device_id IN (1,3); +QUERY PLAN + Custom Scan (HypertableModify) + -> Delete on metrics_space + Delete on _hyper_X_X_chunk metrics_space_1 + Delete on _hyper_X_X_chunk metrics_space_2 + Delete on _hyper_X_X_chunk metrics_space_3 + Delete on _hyper_X_X_chunk metrics_space_4 + Delete on _hyper_X_X_chunk metrics_space_5 + Delete on _hyper_X_X_chunk metrics_space_6 + -> Append + -> Seq Scan on _hyper_X_X_chunk metrics_space_1 + Filter: (device_id = ANY ('{1,3}'::integer[])) + -> Seq Scan on _hyper_X_X_chunk metrics_space_2 + Filter: (device_id = ANY ('{1,3}'::integer[])) + -> Seq Scan on _hyper_X_X_chunk metrics_space_3 + Filter: (device_id = ANY ('{1,3}'::integer[])) + -> Seq Scan on _hyper_X_X_chunk metrics_space_4 + Filter: (device_id = ANY ('{1,3}'::integer[])) + -> Seq Scan on _hyper_X_X_chunk metrics_space_5 + Filter: (device_id = ANY ('{1,3}'::integer[])) + -> Seq Scan on _hyper_X_X_chunk metrics_space_6 + Filter: (device_id = ANY ('{1,3}'::integer[])) +(21 rows) + +-- test multiple constraints +:PREFIX DELETE FROM metrics_space WHERE device_id = 1 AND device_id = 1; +QUERY PLAN + Custom Scan (HypertableModify) + -> Delete on metrics_space + Delete on _hyper_X_X_chunk metrics_space_1 + Delete on _hyper_X_X_chunk metrics_space_2 + Delete on _hyper_X_X_chunk metrics_space_3 + -> Append + -> Seq Scan on _hyper_X_X_chunk metrics_space_1 + Filter: ((device_id = 1) AND (_timescaledb_functions.get_partition_hash(device_id) = 242423622)) + -> Seq Scan on _hyper_X_X_chunk metrics_space_2 + Filter: ((device_id = 1) AND (_timescaledb_functions.get_partition_hash(device_id) = 242423622)) + -> Seq Scan on _hyper_X_X_chunk metrics_space_3 + Filter: ((device_id = 1) AND (_timescaledb_functions.get_partition_hash(device_id) = 242423622)) +(12 rows) + +:PREFIX DELETE FROM metrics_space WHERE device_id = 1 AND device_id = 2; +QUERY PLAN + Custom Scan (HypertableModify) + -> Delete on metrics_space + -> Result + One-Time Filter: false +(4 rows) + +:PREFIX DELETE FROM metrics_space WHERE device_id = 1 OR device_id = 2; +QUERY PLAN + Custom Scan (HypertableModify) + -> Delete on metrics_space + Delete on _hyper_X_X_chunk metrics_space_1 + Delete on _hyper_X_X_chunk metrics_space_2 + Delete on _hyper_X_X_chunk metrics_space_3 + Delete on _hyper_X_X_chunk metrics_space_4 + Delete on _hyper_X_X_chunk metrics_space_5 + Delete on _hyper_X_X_chunk metrics_space_6 + Delete on _hyper_X_X_chunk metrics_space_7 + Delete on _hyper_X_X_chunk metrics_space_8 + Delete on _hyper_X_X_chunk metrics_space_9 + -> Append + -> Seq Scan on _hyper_X_X_chunk metrics_space_1 + Filter: ((device_id = 1) OR (device_id = 2)) + -> Bitmap Heap Scan on _hyper_X_X_chunk metrics_space_2 + Recheck Cond: ((device_id = 1) OR (device_id = 2)) + -> BitmapOr + -> Bitmap Index Scan on _hyper_X_X_chunk_metrics_space_device_id_time_idx + Index Cond: (device_id = 1) + -> Bitmap Index Scan on _hyper_X_X_chunk_metrics_space_device_id_time_idx + Index Cond: (device_id = 2) + -> Bitmap Heap Scan on _hyper_X_X_chunk metrics_space_3 + Recheck Cond: ((device_id = 1) OR (device_id = 2)) + -> BitmapOr + -> Bitmap Index Scan on _hyper_X_X_chunk_metrics_space_device_id_time_idx + Index Cond: (device_id = 1) + -> Bitmap Index Scan on _hyper_X_X_chunk_metrics_space_device_id_time_idx + Index Cond: (device_id = 2) + -> Seq Scan on _hyper_X_X_chunk metrics_space_4 + Filter: ((device_id = 1) OR (device_id = 2)) + -> Bitmap Heap Scan on _hyper_X_X_chunk metrics_space_5 + Recheck Cond: ((device_id = 1) OR (device_id = 2)) + -> BitmapOr + -> Bitmap Index Scan on _hyper_X_X_chunk_metrics_space_device_id_time_idx + Index Cond: (device_id = 1) + -> Bitmap Index Scan on _hyper_X_X_chunk_metrics_space_device_id_time_idx + Index Cond: (device_id = 2) + -> Bitmap Heap Scan on _hyper_X_X_chunk metrics_space_6 + Recheck Cond: ((device_id = 1) OR (device_id = 2)) + -> BitmapOr + -> Bitmap Index Scan on _hyper_X_X_chunk_metrics_space_device_id_time_idx + Index Cond: (device_id = 1) + -> Bitmap Index Scan on _hyper_X_X_chunk_metrics_space_device_id_time_idx + Index Cond: (device_id = 2) + -> Seq Scan on _hyper_X_X_chunk metrics_space_7 + Filter: ((device_id = 1) OR (device_id = 2)) + -> Bitmap Heap Scan on _hyper_X_X_chunk metrics_space_8 + Recheck Cond: ((device_id = 1) OR (device_id = 2)) + -> BitmapOr + -> Bitmap Index Scan on _hyper_X_X_chunk_metrics_space_device_id_time_idx + Index Cond: (device_id = 1) + -> Bitmap Index Scan on _hyper_X_X_chunk_metrics_space_device_id_time_idx + Index Cond: (device_id = 2) + -> Bitmap Heap Scan on _hyper_X_X_chunk metrics_space_9 + Recheck Cond: ((device_id = 1) OR (device_id = 2)) + -> BitmapOr + -> Bitmap Index Scan on _hyper_X_X_chunk_metrics_space_device_id_time_idx + Index Cond: (device_id = 1) + -> Bitmap Index Scan on _hyper_X_X_chunk_metrics_space_device_id_time_idx + Index Cond: (device_id = 2) +(60 rows) + +:PREFIX DELETE FROM metrics_space WHERE device_id IN (1) OR device_id IN (2); +QUERY PLAN + Custom Scan (HypertableModify) + -> Delete on metrics_space + Delete on _hyper_X_X_chunk metrics_space_1 + Delete on _hyper_X_X_chunk metrics_space_2 + Delete on _hyper_X_X_chunk metrics_space_3 + Delete on _hyper_X_X_chunk metrics_space_4 + Delete on _hyper_X_X_chunk metrics_space_5 + Delete on _hyper_X_X_chunk metrics_space_6 + Delete on _hyper_X_X_chunk metrics_space_7 + Delete on _hyper_X_X_chunk metrics_space_8 + Delete on _hyper_X_X_chunk metrics_space_9 + -> Append + -> Seq Scan on _hyper_X_X_chunk metrics_space_1 + Filter: ((device_id = 1) OR (device_id = 2)) + -> Bitmap Heap Scan on _hyper_X_X_chunk metrics_space_2 + Recheck Cond: ((device_id = 1) OR (device_id = 2)) + -> BitmapOr + -> Bitmap Index Scan on _hyper_X_X_chunk_metrics_space_device_id_time_idx + Index Cond: (device_id = 1) + -> Bitmap Index Scan on _hyper_X_X_chunk_metrics_space_device_id_time_idx + Index Cond: (device_id = 2) + -> Bitmap Heap Scan on _hyper_X_X_chunk metrics_space_3 + Recheck Cond: ((device_id = 1) OR (device_id = 2)) + -> BitmapOr + -> Bitmap Index Scan on _hyper_X_X_chunk_metrics_space_device_id_time_idx + Index Cond: (device_id = 1) + -> Bitmap Index Scan on _hyper_X_X_chunk_metrics_space_device_id_time_idx + Index Cond: (device_id = 2) + -> Seq Scan on _hyper_X_X_chunk metrics_space_4 + Filter: ((device_id = 1) OR (device_id = 2)) + -> Bitmap Heap Scan on _hyper_X_X_chunk metrics_space_5 + Recheck Cond: ((device_id = 1) OR (device_id = 2)) + -> BitmapOr + -> Bitmap Index Scan on _hyper_X_X_chunk_metrics_space_device_id_time_idx + Index Cond: (device_id = 1) + -> Bitmap Index Scan on _hyper_X_X_chunk_metrics_space_device_id_time_idx + Index Cond: (device_id = 2) + -> Bitmap Heap Scan on _hyper_X_X_chunk metrics_space_6 + Recheck Cond: ((device_id = 1) OR (device_id = 2)) + -> BitmapOr + -> Bitmap Index Scan on _hyper_X_X_chunk_metrics_space_device_id_time_idx + Index Cond: (device_id = 1) + -> Bitmap Index Scan on _hyper_X_X_chunk_metrics_space_device_id_time_idx + Index Cond: (device_id = 2) + -> Seq Scan on _hyper_X_X_chunk metrics_space_7 + Filter: ((device_id = 1) OR (device_id = 2)) + -> Bitmap Heap Scan on _hyper_X_X_chunk metrics_space_8 + Recheck Cond: ((device_id = 1) OR (device_id = 2)) + -> BitmapOr + -> Bitmap Index Scan on _hyper_X_X_chunk_metrics_space_device_id_time_idx + Index Cond: (device_id = 1) + -> Bitmap Index Scan on _hyper_X_X_chunk_metrics_space_device_id_time_idx + Index Cond: (device_id = 2) + -> Bitmap Heap Scan on _hyper_X_X_chunk metrics_space_9 + Recheck Cond: ((device_id = 1) OR (device_id = 2)) + -> BitmapOr + -> Bitmap Index Scan on _hyper_X_X_chunk_metrics_space_device_id_time_idx + Index Cond: (device_id = 1) + -> Bitmap Index Scan on _hyper_X_X_chunk_metrics_space_device_id_time_idx + Index Cond: (device_id = 2) +(60 rows) + +:PREFIX DELETE FROM metrics_space WHERE device_id IN (1) OR device_id = 2; +QUERY PLAN + Custom Scan (HypertableModify) + -> Delete on metrics_space + Delete on _hyper_X_X_chunk metrics_space_1 + Delete on _hyper_X_X_chunk metrics_space_2 + Delete on _hyper_X_X_chunk metrics_space_3 + Delete on _hyper_X_X_chunk metrics_space_4 + Delete on _hyper_X_X_chunk metrics_space_5 + Delete on _hyper_X_X_chunk metrics_space_6 + Delete on _hyper_X_X_chunk metrics_space_7 + Delete on _hyper_X_X_chunk metrics_space_8 + Delete on _hyper_X_X_chunk metrics_space_9 + -> Append + -> Seq Scan on _hyper_X_X_chunk metrics_space_1 + Filter: ((device_id = 1) OR (device_id = 2)) + -> Bitmap Heap Scan on _hyper_X_X_chunk metrics_space_2 + Recheck Cond: ((device_id = 1) OR (device_id = 2)) + -> BitmapOr + -> Bitmap Index Scan on _hyper_X_X_chunk_metrics_space_device_id_time_idx + Index Cond: (device_id = 1) + -> Bitmap Index Scan on _hyper_X_X_chunk_metrics_space_device_id_time_idx + Index Cond: (device_id = 2) + -> Bitmap Heap Scan on _hyper_X_X_chunk metrics_space_3 + Recheck Cond: ((device_id = 1) OR (device_id = 2)) + -> BitmapOr + -> Bitmap Index Scan on _hyper_X_X_chunk_metrics_space_device_id_time_idx + Index Cond: (device_id = 1) + -> Bitmap Index Scan on _hyper_X_X_chunk_metrics_space_device_id_time_idx + Index Cond: (device_id = 2) + -> Seq Scan on _hyper_X_X_chunk metrics_space_4 + Filter: ((device_id = 1) OR (device_id = 2)) + -> Bitmap Heap Scan on _hyper_X_X_chunk metrics_space_5 + Recheck Cond: ((device_id = 1) OR (device_id = 2)) + -> BitmapOr + -> Bitmap Index Scan on _hyper_X_X_chunk_metrics_space_device_id_time_idx + Index Cond: (device_id = 1) + -> Bitmap Index Scan on _hyper_X_X_chunk_metrics_space_device_id_time_idx + Index Cond: (device_id = 2) + -> Bitmap Heap Scan on _hyper_X_X_chunk metrics_space_6 + Recheck Cond: ((device_id = 1) OR (device_id = 2)) + -> BitmapOr + -> Bitmap Index Scan on _hyper_X_X_chunk_metrics_space_device_id_time_idx + Index Cond: (device_id = 1) + -> Bitmap Index Scan on _hyper_X_X_chunk_metrics_space_device_id_time_idx + Index Cond: (device_id = 2) + -> Seq Scan on _hyper_X_X_chunk metrics_space_7 + Filter: ((device_id = 1) OR (device_id = 2)) + -> Bitmap Heap Scan on _hyper_X_X_chunk metrics_space_8 + Recheck Cond: ((device_id = 1) OR (device_id = 2)) + -> BitmapOr + -> Bitmap Index Scan on _hyper_X_X_chunk_metrics_space_device_id_time_idx + Index Cond: (device_id = 1) + -> Bitmap Index Scan on _hyper_X_X_chunk_metrics_space_device_id_time_idx + Index Cond: (device_id = 2) + -> Bitmap Heap Scan on _hyper_X_X_chunk metrics_space_9 + Recheck Cond: ((device_id = 1) OR (device_id = 2)) + -> BitmapOr + -> Bitmap Index Scan on _hyper_X_X_chunk_metrics_space_device_id_time_idx + Index Cond: (device_id = 1) + -> Bitmap Index Scan on _hyper_X_X_chunk_metrics_space_device_id_time_idx + Index Cond: (device_id = 2) +(60 rows) + +:PREFIX DELETE FROM metrics_space WHERE (time > '2000-01-01' OR device_id = 1) OR (time < '3000-01-01' OR device_id = 2); +QUERY PLAN + Custom Scan (HypertableModify) + -> Delete on metrics_space + Delete on _hyper_X_X_chunk metrics_space_1 + Delete on _hyper_X_X_chunk metrics_space_2 + Delete on _hyper_X_X_chunk metrics_space_3 + Delete on _hyper_X_X_chunk metrics_space_4 + Delete on _hyper_X_X_chunk metrics_space_5 + Delete on _hyper_X_X_chunk metrics_space_6 + Delete on _hyper_X_X_chunk metrics_space_7 + Delete on _hyper_X_X_chunk metrics_space_8 + Delete on _hyper_X_X_chunk metrics_space_9 + -> Append + -> Seq Scan on _hyper_X_X_chunk metrics_space_1 + Filter: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) OR (device_id = 1) OR ("time" < 'Wed Jan 01 00:00:00 3000 PST'::timestamp with time zone) OR (device_id = 2)) + -> Seq Scan on _hyper_X_X_chunk metrics_space_2 + Filter: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) OR (device_id = 1) OR ("time" < 'Wed Jan 01 00:00:00 3000 PST'::timestamp with time zone) OR (device_id = 2)) + -> Seq Scan on _hyper_X_X_chunk metrics_space_3 + Filter: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) OR (device_id = 1) OR ("time" < 'Wed Jan 01 00:00:00 3000 PST'::timestamp with time zone) OR (device_id = 2)) + -> Seq Scan on _hyper_X_X_chunk metrics_space_4 + Filter: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) OR (device_id = 1) OR ("time" < 'Wed Jan 01 00:00:00 3000 PST'::timestamp with time zone) OR (device_id = 2)) + -> Seq Scan on _hyper_X_X_chunk metrics_space_5 + Filter: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) OR (device_id = 1) OR ("time" < 'Wed Jan 01 00:00:00 3000 PST'::timestamp with time zone) OR (device_id = 2)) + -> Seq Scan on _hyper_X_X_chunk metrics_space_6 + Filter: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) OR (device_id = 1) OR ("time" < 'Wed Jan 01 00:00:00 3000 PST'::timestamp with time zone) OR (device_id = 2)) + -> Seq Scan on _hyper_X_X_chunk metrics_space_7 + Filter: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) OR (device_id = 1) OR ("time" < 'Wed Jan 01 00:00:00 3000 PST'::timestamp with time zone) OR (device_id = 2)) + -> Seq Scan on _hyper_X_X_chunk metrics_space_8 + Filter: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) OR (device_id = 1) OR ("time" < 'Wed Jan 01 00:00:00 3000 PST'::timestamp with time zone) OR (device_id = 2)) + -> Seq Scan on _hyper_X_X_chunk metrics_space_9 + Filter: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) OR (device_id = 1) OR ("time" < 'Wed Jan 01 00:00:00 3000 PST'::timestamp with time zone) OR (device_id = 2)) +(30 rows) + +-- variants we don't optimize +:PREFIX DELETE FROM metrics_space WHERE device_id > 1; +QUERY PLAN + Custom Scan (HypertableModify) + -> Delete on metrics_space + Delete on _hyper_X_X_chunk metrics_space_1 + Delete on _hyper_X_X_chunk metrics_space_2 + Delete on _hyper_X_X_chunk metrics_space_3 + Delete on _hyper_X_X_chunk metrics_space_4 + Delete on _hyper_X_X_chunk metrics_space_5 + Delete on _hyper_X_X_chunk metrics_space_6 + Delete on _hyper_X_X_chunk metrics_space_7 + Delete on _hyper_X_X_chunk metrics_space_8 + Delete on _hyper_X_X_chunk metrics_space_9 + -> Append + -> Index Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk metrics_space_1 + Index Cond: (device_id > 1) + -> Seq Scan on _hyper_X_X_chunk metrics_space_2 + Filter: (device_id > 1) + -> Seq Scan on _hyper_X_X_chunk metrics_space_3 + Filter: (device_id > 1) + -> Index Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk metrics_space_4 + Index Cond: (device_id > 1) + -> Seq Scan on _hyper_X_X_chunk metrics_space_5 + Filter: (device_id > 1) + -> Seq Scan on _hyper_X_X_chunk metrics_space_6 + Filter: (device_id > 1) + -> Index Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk metrics_space_7 + Index Cond: (device_id > 1) + -> Seq Scan on _hyper_X_X_chunk metrics_space_8 + Filter: (device_id > 1) + -> Seq Scan on _hyper_X_X_chunk metrics_space_9 + Filter: (device_id > 1) +(30 rows) + +:PREFIX DELETE FROM metrics_space WHERE device_id < 10; +QUERY PLAN + Custom Scan (HypertableModify) + -> Delete on metrics_space + Delete on _hyper_X_X_chunk metrics_space_1 + Delete on _hyper_X_X_chunk metrics_space_2 + Delete on _hyper_X_X_chunk metrics_space_3 + Delete on _hyper_X_X_chunk metrics_space_4 + Delete on _hyper_X_X_chunk metrics_space_5 + Delete on _hyper_X_X_chunk metrics_space_6 + Delete on _hyper_X_X_chunk metrics_space_7 + Delete on _hyper_X_X_chunk metrics_space_8 + Delete on _hyper_X_X_chunk metrics_space_9 + -> Append + -> Seq Scan on _hyper_X_X_chunk metrics_space_1 + Filter: (device_id < 10) + -> Seq Scan on _hyper_X_X_chunk metrics_space_2 + Filter: (device_id < 10) + -> Seq Scan on _hyper_X_X_chunk metrics_space_3 + Filter: (device_id < 10) + -> Seq Scan on _hyper_X_X_chunk metrics_space_4 + Filter: (device_id < 10) + -> Seq Scan on _hyper_X_X_chunk metrics_space_5 + Filter: (device_id < 10) + -> Seq Scan on _hyper_X_X_chunk metrics_space_6 + Filter: (device_id < 10) + -> Seq Scan on _hyper_X_X_chunk metrics_space_7 + Filter: (device_id < 10) + -> Seq Scan on _hyper_X_X_chunk metrics_space_8 + Filter: (device_id < 10) + -> Seq Scan on _hyper_X_X_chunk metrics_space_9 + Filter: (device_id < 10) +(30 rows) + +-- CTE +:PREFIX WITH q1 AS ( + DELETE FROM metrics_space WHERE device_id IN (1,3) RETURNING device_id +) SELECT FROM q1; +QUERY PLAN + CTE Scan on q1 + CTE q1 + -> Custom Scan (HypertableModify) + -> Delete on metrics_space + Delete on _hyper_X_X_chunk metrics_space_1 + Delete on _hyper_X_X_chunk metrics_space_2 + Delete on _hyper_X_X_chunk metrics_space_3 + Delete on _hyper_X_X_chunk metrics_space_4 + Delete on _hyper_X_X_chunk metrics_space_5 + Delete on _hyper_X_X_chunk metrics_space_6 + -> Append + -> Seq Scan on _hyper_X_X_chunk metrics_space_1 + Filter: (device_id = ANY ('{1,3}'::integer[])) + -> Seq Scan on _hyper_X_X_chunk metrics_space_2 + Filter: (device_id = ANY ('{1,3}'::integer[])) + -> Seq Scan on _hyper_X_X_chunk metrics_space_3 + Filter: (device_id = ANY ('{1,3}'::integer[])) + -> Seq Scan on _hyper_X_X_chunk metrics_space_4 + Filter: (device_id = ANY ('{1,3}'::integer[])) + -> Seq Scan on _hyper_X_X_chunk metrics_space_5 + Filter: (device_id = ANY ('{1,3}'::integer[])) + -> Seq Scan on _hyper_X_X_chunk metrics_space_6 + Filter: (device_id = ANY ('{1,3}'::integer[])) +(23 rows) + +:PREFIX WITH q1 AS ( + DELETE FROM metrics_space WHERE device_id = 2 RETURNING device_id +) DELETE FROM metrics_space WHERE device_id IN (1,3) RETURNING device_id; +QUERY PLAN + Custom Scan (HypertableModify) + CTE q1 + -> Custom Scan (HypertableModify) + -> Delete on metrics_space metrics_space_7 + Delete on _hyper_X_X_chunk metrics_space_8 + Delete on _hyper_X_X_chunk metrics_space_9 + Delete on _hyper_X_X_chunk metrics_space_10 + -> Append + -> Index Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk metrics_space_8 + Index Cond: (device_id = 2) + -> Index Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk metrics_space_9 + Index Cond: (device_id = 2) + -> Index Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk metrics_space_10 + Index Cond: (device_id = 2) + -> Delete on metrics_space + Delete on _hyper_X_X_chunk metrics_space_1 + Delete on _hyper_X_X_chunk metrics_space_2 + Delete on _hyper_X_X_chunk metrics_space_3 + Delete on _hyper_X_X_chunk metrics_space_4 + Delete on _hyper_X_X_chunk metrics_space_5 + Delete on _hyper_X_X_chunk metrics_space_6 + -> Append + -> Seq Scan on _hyper_X_X_chunk metrics_space_1 + Filter: (device_id = ANY ('{1,3}'::integer[])) + -> Seq Scan on _hyper_X_X_chunk metrics_space_2 + Filter: (device_id = ANY ('{1,3}'::integer[])) + -> Seq Scan on _hyper_X_X_chunk metrics_space_3 + Filter: (device_id = ANY ('{1,3}'::integer[])) + -> Seq Scan on _hyper_X_X_chunk metrics_space_4 + Filter: (device_id = ANY ('{1,3}'::integer[])) + -> Seq Scan on _hyper_X_X_chunk metrics_space_5 + Filter: (device_id = ANY ('{1,3}'::integer[])) + -> Seq Scan on _hyper_X_X_chunk metrics_space_6 + Filter: (device_id = ANY ('{1,3}'::integer[])) +(34 rows) + +-- JOIN +:PREFIX DELETE FROM metrics_space m1 using metrics_space m2 WHERE m1.device_id = 1 AND m2.device_id = 2; +QUERY PLAN + Custom Scan (HypertableModify) + -> Delete on metrics_space m1 + Delete on _hyper_X_X_chunk m1_1 + Delete on _hyper_X_X_chunk m1_2 + Delete on _hyper_X_X_chunk m1_3 + -> Nested Loop + -> Append + -> Seq Scan on _hyper_X_X_chunk m1_1 + Filter: (device_id = 1) + -> Seq Scan on _hyper_X_X_chunk m1_2 + Filter: (device_id = 1) + -> Seq Scan on _hyper_X_X_chunk m1_3 + Filter: (device_id = 1) + -> Materialize + -> Append + -> Index Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk m2_1 + Index Cond: (device_id = 2) + -> Index Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk m2_2 + Index Cond: (device_id = 2) + -> Index Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk m2_3 + Index Cond: (device_id = 2) +(21 rows) + +:PREFIX UPDATE metrics_space m1 set v0 = 0.1 FROM metrics_space m2 WHERE m2.device_id=1 AND m1.device_id=2; +QUERY PLAN + Custom Scan (HypertableModify) + -> Update on metrics_space m1 + Update on _hyper_X_X_chunk m1_1 + Update on _hyper_X_X_chunk m1_2 + Update on _hyper_X_X_chunk m1_3 + -> Nested Loop + -> Append + -> Index Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk m1_1 + Index Cond: (device_id = 2) + -> Index Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk m1_2 + Index Cond: (device_id = 2) + -> Index Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk m1_3 + Index Cond: (device_id = 2) + -> Materialize + -> Append + -> Seq Scan on _hyper_X_X_chunk m2_1 + Filter: (device_id = 1) + -> Seq Scan on _hyper_X_X_chunk m2_2 + Filter: (device_id = 1) + -> Seq Scan on _hyper_X_X_chunk m2_3 + Filter: (device_id = 1) +(21 rows) + +-- test multiple space dimensions and different datatypes +CREATE TABLE space_constraint(time timestamptz, s1 text, s2 numeric, s3 int, v float); +SELECT table_name FROM create_hypertable('space_constraint','time'); +NOTICE: adding not-null constraint to column "time" + table_name + space_constraint +(1 row) + +SELECT column_name FROM add_dimension('space_constraint','s1',number_partitions:=3); + column_name + s1 +(1 row) + +SELECT column_name FROM add_dimension('space_constraint','s2',number_partitions:=3); + column_name + s2 +(1 row) + +SELECT column_name FROM add_dimension('space_constraint','s3',number_partitions:=3); + column_name + s3 +(1 row) + +INSERT INTO space_constraint +SELECT t,s1,s2,s3,0.12345 FROM + (VALUES ('2000-01-01'::timestamptz),('2001-01-01')) v1(t), + (VALUES ('s1_1'),('s1_2')) v2(s1), + (VALUES (1.23),(4.56)) v3(s2), + (VALUES (1),(2)) v4(s3); +\set PREFIX 'EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF)' +BEGIN; +:PREFIX DELETE FROM space_constraint WHERE s1 = 's1_2'; +QUERY PLAN + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Delete on space_constraint (actual rows=0 loops=1) + Delete on _hyper_X_X_chunk space_constraint_1 + Delete on _hyper_X_X_chunk space_constraint_2 + Delete on _hyper_X_X_chunk space_constraint_3 + Delete on _hyper_X_X_chunk space_constraint_4 + Delete on _hyper_X_X_chunk space_constraint_5 + Delete on _hyper_X_X_chunk space_constraint_6 + Delete on _hyper_X_X_chunk space_constraint_7 + Delete on _hyper_X_X_chunk space_constraint_8 + -> Append (actual rows=8 loops=1) + -> Seq Scan on _hyper_X_X_chunk space_constraint_1 (actual rows=1 loops=1) + Filter: (s1 = 's1_2'::text) + -> Seq Scan on _hyper_X_X_chunk space_constraint_2 (actual rows=1 loops=1) + Filter: (s1 = 's1_2'::text) + -> Seq Scan on _hyper_X_X_chunk space_constraint_3 (actual rows=1 loops=1) + Filter: (s1 = 's1_2'::text) + -> Seq Scan on _hyper_X_X_chunk space_constraint_4 (actual rows=1 loops=1) + Filter: (s1 = 's1_2'::text) + -> Seq Scan on _hyper_X_X_chunk space_constraint_5 (actual rows=1 loops=1) + Filter: (s1 = 's1_2'::text) + -> Seq Scan on _hyper_X_X_chunk space_constraint_6 (actual rows=1 loops=1) + Filter: (s1 = 's1_2'::text) + -> Seq Scan on _hyper_X_X_chunk space_constraint_7 (actual rows=1 loops=1) + Filter: (s1 = 's1_2'::text) + -> Seq Scan on _hyper_X_X_chunk space_constraint_8 (actual rows=1 loops=1) + Filter: (s1 = 's1_2'::text) +(27 rows) + +ROLLBACK; +BEGIN; +:PREFIX DELETE FROM space_constraint WHERE s1 = 's1_2' AND s2 = 1.23 AND s3 = 2; +QUERY PLAN + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Delete on space_constraint (actual rows=0 loops=1) + Delete on _hyper_X_X_chunk space_constraint_1 + Delete on _hyper_X_X_chunk space_constraint_2 + -> Append (actual rows=2 loops=1) + -> Seq Scan on _hyper_X_X_chunk space_constraint_1 (actual rows=1 loops=1) + Filter: ((s1 = 's1_2'::text) AND (s2 = 1.23) AND (s3 = 2)) + -> Seq Scan on _hyper_X_X_chunk space_constraint_2 (actual rows=1 loops=1) + Filter: ((s1 = 's1_2'::text) AND (s2 = 1.23) AND (s3 = 2)) +(9 rows) + +ROLLBACK; +BEGIN; +:PREFIX DELETE FROM space_constraint WHERE time > '2000-06-01' AND s1 = 's1_2' AND s2 = 1.23 AND s3 = 2; +QUERY PLAN + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Delete on space_constraint (actual rows=0 loops=1) + Delete on _hyper_X_X_chunk space_constraint_1 + -> Index Scan using _hyper_X_X_chunk_space_constraint_time_idx on _hyper_X_X_chunk space_constraint_1 (actual rows=1 loops=1) + Index Cond: ("time" > 'Thu Jun 01 00:00:00 2000 PDT'::timestamp with time zone) + Filter: ((s1 = 's1_2'::text) AND (s2 = 1.23) AND (s3 = 2)) +(6 rows) + +ROLLBACK; +BEGIN; +:PREFIX DELETE FROM space_constraint WHERE s1 IN ('s1_2','s1_2') AND s2 = 1.23 AND s3 = 2; +QUERY PLAN + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Delete on space_constraint (actual rows=0 loops=1) + Delete on _hyper_X_X_chunk space_constraint_1 + Delete on _hyper_X_X_chunk space_constraint_2 + -> Append (actual rows=2 loops=1) + -> Seq Scan on _hyper_X_X_chunk space_constraint_1 (actual rows=1 loops=1) + Filter: ((s1 = ANY ('{s1_2,s1_2}'::text[])) AND (s2 = 1.23) AND (s3 = 2)) + -> Seq Scan on _hyper_X_X_chunk space_constraint_2 (actual rows=1 loops=1) + Filter: ((s1 = ANY ('{s1_2,s1_2}'::text[])) AND (s2 = 1.23) AND (s3 = 2)) +(9 rows) + +ROLLBACK; +BEGIN; +:PREFIX DELETE FROM space_constraint WHERE s1 IN ('s1_1','s1_3') AND s2 IN (1.23,4.44) AND s3 IN (1,100); +QUERY PLAN + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Delete on space_constraint (actual rows=0 loops=1) + Delete on _hyper_X_X_chunk space_constraint_1 + Delete on _hyper_X_X_chunk space_constraint_2 + Delete on _hyper_X_X_chunk space_constraint_3 + Delete on _hyper_X_X_chunk space_constraint_4 + -> Append (actual rows=2 loops=1) + -> Seq Scan on _hyper_X_X_chunk space_constraint_1 (actual rows=1 loops=1) + Filter: ((s1 = ANY ('{s1_1,s1_3}'::text[])) AND (s2 = ANY ('{1.23,4.44}'::numeric[])) AND (s3 = ANY ('{1,100}'::integer[]))) + -> Seq Scan on _hyper_X_X_chunk space_constraint_2 (actual rows=0 loops=1) + Filter: ((s1 = ANY ('{s1_1,s1_3}'::text[])) AND (s2 = ANY ('{1.23,4.44}'::numeric[])) AND (s3 = ANY ('{1,100}'::integer[]))) + Rows Removed by Filter: 1 + -> Seq Scan on _hyper_X_X_chunk space_constraint_3 (actual rows=1 loops=1) + Filter: ((s1 = ANY ('{s1_1,s1_3}'::text[])) AND (s2 = ANY ('{1.23,4.44}'::numeric[])) AND (s3 = ANY ('{1,100}'::integer[]))) + -> Seq Scan on _hyper_X_X_chunk space_constraint_4 (actual rows=0 loops=1) + Filter: ((s1 = ANY ('{s1_1,s1_3}'::text[])) AND (s2 = ANY ('{1.23,4.44}'::numeric[])) AND (s3 = ANY ('{1,100}'::integer[]))) + Rows Removed by Filter: 1 +(17 rows) + +ROLLBACK; +BEGIN; +:PREFIX UPDATE space_constraint SET v=0.7 WHERE s1 = 's1_2'; +QUERY PLAN + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Update on space_constraint (actual rows=0 loops=1) + Update on _hyper_X_X_chunk space_constraint_1 + Update on _hyper_X_X_chunk space_constraint_2 + Update on _hyper_X_X_chunk space_constraint_3 + Update on _hyper_X_X_chunk space_constraint_4 + Update on _hyper_X_X_chunk space_constraint_5 + Update on _hyper_X_X_chunk space_constraint_6 + Update on _hyper_X_X_chunk space_constraint_7 + Update on _hyper_X_X_chunk space_constraint_8 + -> Result (actual rows=8 loops=1) + -> Append (actual rows=8 loops=1) + -> Seq Scan on _hyper_X_X_chunk space_constraint_1 (actual rows=1 loops=1) + Filter: (s1 = 's1_2'::text) + -> Seq Scan on _hyper_X_X_chunk space_constraint_2 (actual rows=1 loops=1) + Filter: (s1 = 's1_2'::text) + -> Seq Scan on _hyper_X_X_chunk space_constraint_3 (actual rows=1 loops=1) + Filter: (s1 = 's1_2'::text) + -> Seq Scan on _hyper_X_X_chunk space_constraint_4 (actual rows=1 loops=1) + Filter: (s1 = 's1_2'::text) + -> Seq Scan on _hyper_X_X_chunk space_constraint_5 (actual rows=1 loops=1) + Filter: (s1 = 's1_2'::text) + -> Seq Scan on _hyper_X_X_chunk space_constraint_6 (actual rows=1 loops=1) + Filter: (s1 = 's1_2'::text) + -> Seq Scan on _hyper_X_X_chunk space_constraint_7 (actual rows=1 loops=1) + Filter: (s1 = 's1_2'::text) + -> Seq Scan on _hyper_X_X_chunk space_constraint_8 (actual rows=1 loops=1) + Filter: (s1 = 's1_2'::text) +(28 rows) + +ROLLBACK; +BEGIN; +:PREFIX UPDATE space_constraint SET v=0.7 WHERE s1 = 's1_2' AND s2 = 1.23 AND s3 = 2; +QUERY PLAN + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Update on space_constraint (actual rows=0 loops=1) + Update on _hyper_X_X_chunk space_constraint_1 + Update on _hyper_X_X_chunk space_constraint_2 + -> Result (actual rows=2 loops=1) + -> Append (actual rows=2 loops=1) + -> Seq Scan on _hyper_X_X_chunk space_constraint_1 (actual rows=1 loops=1) + Filter: ((s1 = 's1_2'::text) AND (s2 = 1.23) AND (s3 = 2)) + -> Seq Scan on _hyper_X_X_chunk space_constraint_2 (actual rows=1 loops=1) + Filter: ((s1 = 's1_2'::text) AND (s2 = 1.23) AND (s3 = 2)) +(10 rows) + +ROLLBACK; +BEGIN; +:PREFIX UPDATE space_constraint SET v=0.7 WHERE time > '2000-06-01' AND s1 = 's1_2' AND s2 = 1.23 AND s3 = 2; +QUERY PLAN + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Update on space_constraint (actual rows=0 loops=1) + Update on _hyper_X_X_chunk space_constraint_1 + -> Result (actual rows=1 loops=1) + -> Index Scan using _hyper_X_X_chunk_space_constraint_time_idx on _hyper_X_X_chunk space_constraint_1 (actual rows=1 loops=1) + Index Cond: ("time" > 'Thu Jun 01 00:00:00 2000 PDT'::timestamp with time zone) + Filter: ((s1 = 's1_2'::text) AND (s2 = 1.23) AND (s3 = 2)) +(7 rows) + +ROLLBACK; +BEGIN; +:PREFIX UPDATE space_constraint SET v=0.7 WHERE s1 IN ('s1_2','s1_2') AND s2 = 1.23 AND s3 = 2; +QUERY PLAN + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Update on space_constraint (actual rows=0 loops=1) + Update on _hyper_X_X_chunk space_constraint_1 + Update on _hyper_X_X_chunk space_constraint_2 + -> Result (actual rows=2 loops=1) + -> Append (actual rows=2 loops=1) + -> Seq Scan on _hyper_X_X_chunk space_constraint_1 (actual rows=1 loops=1) + Filter: ((s1 = ANY ('{s1_2,s1_2}'::text[])) AND (s2 = 1.23) AND (s3 = 2)) + -> Seq Scan on _hyper_X_X_chunk space_constraint_2 (actual rows=1 loops=1) + Filter: ((s1 = ANY ('{s1_2,s1_2}'::text[])) AND (s2 = 1.23) AND (s3 = 2)) +(10 rows) + +ROLLBACK; +BEGIN; +:PREFIX UPDATE space_constraint SET v=0.7 WHERE s1 IN ('s1_1','s1_3') AND s2 IN (1.23,4.44) AND s3 IN (1,100); +QUERY PLAN + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Update on space_constraint (actual rows=0 loops=1) + Update on _hyper_X_X_chunk space_constraint_1 + Update on _hyper_X_X_chunk space_constraint_2 + Update on _hyper_X_X_chunk space_constraint_3 + Update on _hyper_X_X_chunk space_constraint_4 + -> Result (actual rows=2 loops=1) + -> Append (actual rows=2 loops=1) + -> Seq Scan on _hyper_X_X_chunk space_constraint_1 (actual rows=1 loops=1) + Filter: ((s1 = ANY ('{s1_1,s1_3}'::text[])) AND (s2 = ANY ('{1.23,4.44}'::numeric[])) AND (s3 = ANY ('{1,100}'::integer[]))) + -> Seq Scan on _hyper_X_X_chunk space_constraint_2 (actual rows=0 loops=1) + Filter: ((s1 = ANY ('{s1_1,s1_3}'::text[])) AND (s2 = ANY ('{1.23,4.44}'::numeric[])) AND (s3 = ANY ('{1,100}'::integer[]))) + Rows Removed by Filter: 1 + -> Seq Scan on _hyper_X_X_chunk space_constraint_3 (actual rows=1 loops=1) + Filter: ((s1 = ANY ('{s1_1,s1_3}'::text[])) AND (s2 = ANY ('{1.23,4.44}'::numeric[])) AND (s3 = ANY ('{1,100}'::integer[]))) + -> Seq Scan on _hyper_X_X_chunk space_constraint_4 (actual rows=0 loops=1) + Filter: ((s1 = ANY ('{s1_1,s1_3}'::text[])) AND (s2 = ANY ('{1.23,4.44}'::numeric[])) AND (s3 = ANY ('{1,100}'::integer[]))) + Rows Removed by Filter: 1 +(18 rows) + +ROLLBACK; +DROP TABLE space_constraint; diff --git a/tsl/test/shared/expected/transparent_decompress_chunk-16.out b/tsl/test/shared/expected/transparent_decompress_chunk-16.out new file mode 100644 index 00000000000..4831c52fc9a --- /dev/null +++ b/tsl/test/shared/expected/transparent_decompress_chunk-16.out @@ -0,0 +1,1052 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\set PREFIX 'EXPLAIN (analyze, costs off, timing off, summary off)' +\set PREFIX_VERBOSE 'EXPLAIN (analyze, costs off, timing off, summary off, verbose)' +\set PREFIX_NO_ANALYZE 'EXPLAIN (verbose, costs off)' +\set PREFIX_NO_VERBOSE 'EXPLAIN (costs off)' +SELECT show_chunks('metrics_compressed') AS "TEST_TABLE" ORDER BY 1::text LIMIT 1 \gset +-- this should use DecompressChunk node +:PREFIX_VERBOSE +SELECT * FROM :TEST_TABLE WHERE device_id = 1 ORDER BY time LIMIT 5; +QUERY PLAN + Limit (actual rows=5 loops=1) + Output: _hyper_X_X_chunk."time", _hyper_X_X_chunk.device_id, _hyper_X_X_chunk.v0, _hyper_X_X_chunk.v1, _hyper_X_X_chunk.v2, _hyper_X_X_chunk.v3 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_X_X_chunk (actual rows=5 loops=1) + Output: _hyper_X_X_chunk."time", _hyper_X_X_chunk.device_id, _hyper_X_X_chunk.v0, _hyper_X_X_chunk.v1, _hyper_X_X_chunk.v2, _hyper_X_X_chunk.v3 + Bulk Decompression: true + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on _timescaledb_internal.compress_hyper_X_X_chunk (actual rows=1 loops=1) + Output: compress_hyper_X_X_chunk."time", compress_hyper_X_X_chunk.device_id, compress_hyper_X_X_chunk.v0, compress_hyper_X_X_chunk.v1, compress_hyper_X_X_chunk.v2, compress_hyper_X_X_chunk.v3, compress_hyper_X_X_chunk._ts_meta_count, compress_hyper_X_X_chunk._ts_meta_sequence_num, compress_hyper_X_X_chunk._ts_meta_min_1, compress_hyper_X_X_chunk._ts_meta_max_1 + Index Cond: (compress_hyper_X_X_chunk.device_id = 1) +(8 rows) + +-- must not use DecompressChunk node +:PREFIX_VERBOSE +SELECT * FROM ONLY :TEST_TABLE WHERE device_id = 1 ORDER BY time LIMIT 5; +QUERY PLAN + Limit (actual rows=0 loops=1) + Output: "time", device_id, v0, v1, v2, v3 + -> Sort (actual rows=0 loops=1) + Output: "time", device_id, v0, v1, v2, v3 + Sort Key: _hyper_X_X_chunk."time" + Sort Method: quicksort + -> Seq Scan on _timescaledb_internal._hyper_X_X_chunk (actual rows=0 loops=1) + Output: "time", device_id, v0, v1, v2, v3 + Filter: (_hyper_X_X_chunk.device_id = 1) +(9 rows) + +-- test expressions +:PREFIX +SELECT time_bucket ('1d', time), + v1 + v2 AS "sum", + COALESCE(NULL, v1, v2) AS "coalesce", + NULL AS "NULL", + 'text' AS "text", + t AS "RECORD" +FROM :TEST_TABLE t +WHERE device_id IN (1, 2) +ORDER BY time, device_id; +QUERY PLAN + Sort (actual rows=7196 loops=1) + Sort Key: t."time", t.device_id + Sort Method: quicksort + -> Result (actual rows=7196 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk t (actual rows=7196 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=8 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 12 +(8 rows) + +-- test empty targetlist +:PREFIX SELECT FROM :TEST_TABLE; +QUERY PLAN + Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) +(2 rows) + +-- test empty resultset +:PREFIX SELECT * FROM :TEST_TABLE WHERE device_id < 0; +QUERY PLAN + Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=0 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=0 loops=1) + Filter: (device_id < 0) + Rows Removed by Filter: 20 +(4 rows) + +-- test targetlist not referencing columns +:PREFIX SELECT 1 FROM :TEST_TABLE; +QUERY PLAN + Result (actual rows=17990 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) +(3 rows) + +-- test constraints not present in targetlist +:PREFIX SELECT v1 FROM :TEST_TABLE WHERE device_id = 1 ORDER BY v1; +QUERY PLAN + Sort (actual rows=3598 loops=1) + Sort Key: _hyper_X_X_chunk.v1 + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (actual rows=4 loops=1) + Index Cond: (device_id = 1) +(6 rows) + +-- test order not present in targetlist +:PREFIX SELECT v2 FROM :TEST_TABLE WHERE device_id = 1 ORDER BY v1; +QUERY PLAN + Sort (actual rows=3598 loops=1) + Sort Key: _hyper_X_X_chunk.v1 + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (actual rows=4 loops=1) + Index Cond: (device_id = 1) +(6 rows) + +-- test column with all NULL +:PREFIX SELECT v3 FROM :TEST_TABLE WHERE device_id = 1; +QUERY PLAN + Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (actual rows=4 loops=1) + Index Cond: (device_id = 1) +(3 rows) + +-- test qual pushdown +-- v3 is not segment by or order by column so should not be pushed down +:PREFIX_VERBOSE SELECT * FROM :TEST_TABLE WHERE v3 > 10.0 ORDER BY time, device_id; +QUERY PLAN + Sort (actual rows=0 loops=1) + Output: _hyper_X_X_chunk."time", _hyper_X_X_chunk.device_id, _hyper_X_X_chunk.v0, _hyper_X_X_chunk.v1, _hyper_X_X_chunk.v2, _hyper_X_X_chunk.v3 + Sort Key: _hyper_X_X_chunk."time", _hyper_X_X_chunk.device_id + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_X_X_chunk (actual rows=0 loops=1) + Output: _hyper_X_X_chunk."time", _hyper_X_X_chunk.device_id, _hyper_X_X_chunk.v0, _hyper_X_X_chunk.v1, _hyper_X_X_chunk.v2, _hyper_X_X_chunk.v3 + Vectorized Filter: (_hyper_X_X_chunk.v3 > '10'::double precision) + Rows Removed by Filter: 17990 + Bulk Decompression: true + -> Seq Scan on _timescaledb_internal.compress_hyper_X_X_chunk (actual rows=20 loops=1) + Output: compress_hyper_X_X_chunk."time", compress_hyper_X_X_chunk.device_id, compress_hyper_X_X_chunk.v0, compress_hyper_X_X_chunk.v1, compress_hyper_X_X_chunk.v2, compress_hyper_X_X_chunk.v3, compress_hyper_X_X_chunk._ts_meta_count, compress_hyper_X_X_chunk._ts_meta_sequence_num, compress_hyper_X_X_chunk._ts_meta_min_1, compress_hyper_X_X_chunk._ts_meta_max_1 +(11 rows) + +-- device_id constraint should be pushed down +:PREFIX SELECT * FROM :TEST_TABLE WHERE device_id = 1 ORDER BY time, device_id LIMIT 10; +QUERY PLAN + Limit (actual rows=10 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = 1) +(4 rows) + +-- test IS NULL / IS NOT NULL +:PREFIX SELECT * FROM :TEST_TABLE WHERE device_id IS NOT NULL ORDER BY time, device_id LIMIT 10; +QUERY PLAN + Limit (actual rows=10 loops=1) + -> Sort (actual rows=10 loops=1) + Sort Key: _hyper_X_X_chunk."time", _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) + Filter: (device_id IS NOT NULL) +(7 rows) + +:PREFIX SELECT * FROM :TEST_TABLE WHERE device_id IS NULL ORDER BY time, device_id LIMIT 10; +QUERY PLAN + Limit (actual rows=0 loops=1) + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_X_X_chunk."time", _hyper_X_X_chunk.device_id + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=0 loops=1) + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (actual rows=0 loops=1) + Index Cond: (device_id IS NULL) +(7 rows) + +-- test IN (Const,Const) +:PREFIX SELECT * FROM :TEST_TABLE WHERE device_id IN (1, 2) ORDER BY time, device_id LIMIT 10; +QUERY PLAN + Limit (actual rows=10 loops=1) + -> Sort (actual rows=10 loops=1) + Sort Key: _hyper_X_X_chunk."time", _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=7196 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=8 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 12 +(8 rows) + +-- test cast pushdown +:PREFIX SELECT * FROM :TEST_TABLE WHERE device_id = '1'::text::int ORDER BY time, device_id LIMIT 10; +QUERY PLAN + Limit (actual rows=10 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = 1) +(4 rows) + +--test var op var +:PREFIX SELECT * FROM :TEST_TABLE WHERE device_id = v0 ORDER BY time, device_id LIMIT 10; +QUERY PLAN + Limit (actual rows=0 loops=1) + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_X_X_chunk."time", _hyper_X_X_chunk.device_id + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=0 loops=1) + Filter: (device_id = v0) + Rows Removed by Filter: 17990 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) +(8 rows) + +:PREFIX SELECT * FROM :TEST_TABLE WHERE device_id < v1 ORDER BY time, device_id LIMIT 10; +QUERY PLAN + Limit (actual rows=10 loops=1) + -> Sort (actual rows=10 loops=1) + Sort Key: _hyper_X_X_chunk."time", _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) + Filter: (device_id < v1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) +(7 rows) + +-- test expressions +:PREFIX SELECT * FROM :TEST_TABLE WHERE device_id = 1 + 4 / 2 ORDER BY time, device_id LIMIT 10; +QUERY PLAN + Limit (actual rows=10 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = 3) +(4 rows) + +-- test function calls +-- not yet pushed down +:PREFIX SELECT * FROM :TEST_TABLE WHERE device_id = length(substring(version(), 1, 3)) ORDER BY time, device_id LIMIT 10; +QUERY PLAN + Limit (actual rows=10 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10 loops=1) + Filter: (device_id = length("substring"(version(), 1, 3))) + Rows Removed by Filter: 2392 + -> Sort (actual rows=6 loops=1) + Sort Key: compress_hyper_X_X_chunk._ts_meta_min_1 + Sort Method: quicksort + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) +(8 rows) + +-- test segment meta pushdown +-- order by column and const +:PREFIX SELECT * FROM :TEST_TABLE WHERE time = '2000-01-01 1:00:00+0' ORDER BY time, device_id LIMIT 10; +QUERY PLAN + Limit (actual rows=5 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5 loops=1) + Vectorized Filter: ("time" = 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + Rows Removed by Filter: 2985 + -> Sort (actual rows=5 loops=1) + Sort Key: compress_hyper_X_X_chunk.device_id + Sort Method: quicksort + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=5 loops=1) + Filter: ((_ts_meta_min_1 <= 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) AND (_ts_meta_max_1 >= 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone)) + Rows Removed by Filter: 15 +(10 rows) + +:PREFIX SELECT * FROM :TEST_TABLE WHERE time < '2000-01-01 1:00:00+0' ORDER BY time, device_id LIMIT 10; +QUERY PLAN + Limit (actual rows=10 loops=1) + -> Sort (actual rows=10 loops=1) + Sort Key: _hyper_X_X_chunk."time", _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=150 loops=1) + Vectorized Filter: ("time" < 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + Rows Removed by Filter: 2840 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=5 loops=1) + Filter: (_ts_meta_min_1 < 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + Rows Removed by Filter: 15 +(10 rows) + +:PREFIX SELECT * FROM :TEST_TABLE WHERE time <= '2000-01-01 1:00:00+0' ORDER BY time, device_id LIMIT 10; +QUERY PLAN + Limit (actual rows=10 loops=1) + -> Sort (actual rows=10 loops=1) + Sort Key: _hyper_X_X_chunk."time", _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=155 loops=1) + Vectorized Filter: ("time" <= 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + Rows Removed by Filter: 2835 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=5 loops=1) + Filter: (_ts_meta_min_1 <= 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + Rows Removed by Filter: 15 +(10 rows) + +:PREFIX SELECT * FROM :TEST_TABLE WHERE time >= '2000-01-01 1:00:00+0' ORDER BY time, device_id LIMIT 10; +QUERY PLAN + Limit (actual rows=10 loops=1) + -> Sort (actual rows=10 loops=1) + Sort Key: _hyper_X_X_chunk."time", _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17840 loops=1) + Vectorized Filter: ("time" >= 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + Rows Removed by Filter: 150 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) + Filter: (_ts_meta_max_1 >= 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) +(9 rows) + +:PREFIX SELECT * FROM :TEST_TABLE WHERE time > '2000-01-01 1:00:00+0' ORDER BY time, device_id LIMIT 10; +QUERY PLAN + Limit (actual rows=10 loops=1) + -> Sort (actual rows=10 loops=1) + Sort Key: _hyper_X_X_chunk."time", _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17835 loops=1) + Vectorized Filter: ("time" > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + Rows Removed by Filter: 155 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) + Filter: (_ts_meta_max_1 > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) +(9 rows) + +:PREFIX SELECT * FROM :TEST_TABLE WHERE '2000-01-01 1:00:00+0' < time ORDER BY time, device_id LIMIT 10; +QUERY PLAN + Limit (actual rows=10 loops=1) + -> Sort (actual rows=10 loops=1) + Sort Key: _hyper_X_X_chunk."time", _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17835 loops=1) + Vectorized Filter: ("time" > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + Rows Removed by Filter: 155 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) + Filter: (_ts_meta_max_1 > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) +(9 rows) + +--pushdowns between order by and segment by columns +:PREFIX SELECT * FROM :TEST_TABLE WHERE v0 < 1 ORDER BY time, device_id LIMIT 10; +QUERY PLAN + Limit (actual rows=0 loops=1) + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_X_X_chunk."time", _hyper_X_X_chunk.device_id + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=0 loops=1) + Vectorized Filter: (v0 < 1) + Rows Removed by Filter: 17990 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) +(8 rows) + +:PREFIX SELECT * FROM :TEST_TABLE WHERE v0 < device_id ORDER BY time, device_id LIMIT 10; +QUERY PLAN + Limit (actual rows=0 loops=1) + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_X_X_chunk."time", _hyper_X_X_chunk.device_id + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=0 loops=1) + Filter: (v0 < device_id) + Rows Removed by Filter: 17990 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) +(8 rows) + +:PREFIX SELECT * FROM :TEST_TABLE WHERE device_id < v0 ORDER BY time, device_id LIMIT 10; +QUERY PLAN + Limit (actual rows=10 loops=1) + -> Sort (actual rows=10 loops=1) + Sort Key: _hyper_X_X_chunk."time", _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) + Filter: (device_id < v0) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) +(7 rows) + +:PREFIX SELECT * FROM :TEST_TABLE WHERE v1 = device_id ORDER BY time, device_id LIMIT 10; +QUERY PLAN + Limit (actual rows=0 loops=1) + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_X_X_chunk."time", _hyper_X_X_chunk.device_id + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=0 loops=1) + Filter: (v1 = device_id) + Rows Removed by Filter: 17990 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) +(8 rows) + +--pushdown between two order by column (not pushed down) +:PREFIX SELECT * FROM :TEST_TABLE WHERE v0 = v1 ORDER BY time, device_id LIMIT 10; +QUERY PLAN + Limit (actual rows=0 loops=1) + -> Sort (actual rows=0 loops=1) + Sort Key: _hyper_X_X_chunk."time", _hyper_X_X_chunk.device_id + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=0 loops=1) + Filter: (v0 = v1) + Rows Removed by Filter: 17990 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) +(8 rows) + +--pushdown of quals on order by and segment by cols anded together +:PREFIX_VERBOSE SELECT * FROM :TEST_TABLE WHERE time > '2000-01-01 1:00:00+0' AND device_id = 1 ORDER BY time, device_id LIMIT 10; +QUERY PLAN + Limit (actual rows=10 loops=1) + Output: _hyper_X_X_chunk."time", _hyper_X_X_chunk.device_id, _hyper_X_X_chunk.v0, _hyper_X_X_chunk.v1, _hyper_X_X_chunk.v2, _hyper_X_X_chunk.v3 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_X_X_chunk (actual rows=10 loops=1) + Output: _hyper_X_X_chunk."time", _hyper_X_X_chunk.device_id, _hyper_X_X_chunk.v0, _hyper_X_X_chunk.v1, _hyper_X_X_chunk.v2, _hyper_X_X_chunk.v3 + Vectorized Filter: (_hyper_X_X_chunk."time" > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) + Rows Removed by Filter: 31 + Bulk Decompression: true + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on _timescaledb_internal.compress_hyper_X_X_chunk (actual rows=1 loops=1) + Output: compress_hyper_X_X_chunk."time", compress_hyper_X_X_chunk.device_id, compress_hyper_X_X_chunk.v0, compress_hyper_X_X_chunk.v1, compress_hyper_X_X_chunk.v2, compress_hyper_X_X_chunk.v3, compress_hyper_X_X_chunk._ts_meta_count, compress_hyper_X_X_chunk._ts_meta_sequence_num, compress_hyper_X_X_chunk._ts_meta_min_1, compress_hyper_X_X_chunk._ts_meta_max_1 + Index Cond: (compress_hyper_X_X_chunk.device_id = 1) + Filter: (compress_hyper_X_X_chunk._ts_meta_max_1 > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) +(11 rows) + +--pushdown of quals on order by and segment by cols or together (not pushed down) +:PREFIX SELECT * FROM :TEST_TABLE WHERE time > '2000-01-01 1:00:00+0' OR device_id = 1 ORDER BY time, device_id LIMIT 10; +QUERY PLAN + Limit (actual rows=10 loops=1) + -> Sort (actual rows=10 loops=1) + Sort Key: _hyper_X_X_chunk."time", _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17866 loops=1) + Filter: (("time" > 'Fri Dec 31 17:00:00 1999 PST'::timestamp with time zone) OR (device_id = 1)) + Rows Removed by Filter: 124 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) +(8 rows) + +--functions not yet optimized +:PREFIX SELECT * FROM :TEST_TABLE WHERE time < now() ORDER BY time, device_id LIMIT 10; +QUERY PLAN + Limit (actual rows=10 loops=1) + -> Sort (actual rows=10 loops=1) + Sort Key: _hyper_X_X_chunk."time", _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) + Filter: ("time" < now()) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) +(7 rows) + +-- test sort optimization interaction +:PREFIX SELECT time FROM :TEST_TABLE ORDER BY time DESC LIMIT 10; +QUERY PLAN + Limit (actual rows=10 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10 loops=1) + -> Sort (actual rows=6 loops=1) + Sort Key: compress_hyper_X_X_chunk._ts_meta_max_1 DESC + Sort Method: quicksort + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) +(6 rows) + +:PREFIX SELECT time, device_id FROM :TEST_TABLE ORDER BY time DESC, device_id LIMIT 10; +QUERY PLAN + Limit (actual rows=10 loops=1) + -> Sort (actual rows=10 loops=1) + Sort Key: _hyper_X_X_chunk."time" DESC, _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) +(6 rows) + +:PREFIX SELECT time, device_id FROM :TEST_TABLE ORDER BY device_id, time DESC LIMIT 10; +QUERY PLAN + Limit (actual rows=10 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10 loops=1) + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (actual rows=1 loops=1) +(3 rows) + +-- test aggregate +:PREFIX SELECT count(*) FROM :TEST_TABLE; +QUERY PLAN + Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) +(3 rows) + +-- test aggregate with GROUP BY +:PREFIX SELECT count(*) FROM :TEST_TABLE GROUP BY device_id ORDER BY device_id; +QUERY PLAN + Sort (actual rows=5 loops=1) + Sort Key: _hyper_X_X_chunk.device_id + Sort Method: quicksort + -> HashAggregate (actual rows=5 loops=1) + Group Key: _hyper_X_X_chunk.device_id + Batches: 1 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) +(8 rows) + +-- test window functions with GROUP BY +:PREFIX SELECT sum(count(*)) OVER () FROM :TEST_TABLE GROUP BY device_id ORDER BY device_id; +QUERY PLAN + Sort (actual rows=5 loops=1) + Sort Key: _hyper_X_X_chunk.device_id + Sort Method: quicksort + -> WindowAgg (actual rows=5 loops=1) + -> HashAggregate (actual rows=5 loops=1) + Group Key: _hyper_X_X_chunk.device_id + Batches: 1 + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) +(9 rows) + +-- test CTE +:PREFIX WITH q AS ( + SELECT v1 FROM :TEST_TABLE ORDER BY time +) +SELECT * FROM q ORDER BY v1; +QUERY PLAN + Sort (actual rows=17990 loops=1) + Sort Key: q.v1 + Sort Method: quicksort + -> Subquery Scan on q (actual rows=17990 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) + -> Sort (actual rows=20 loops=1) + Sort Key: compress_hyper_X_X_chunk._ts_meta_min_1 + Sort Method: quicksort + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) +(9 rows) + +-- test CTE join +:PREFIX WITH q1 AS ( + SELECT time, v1 FROM :TEST_TABLE WHERE device_id = 1 ORDER BY time +), +q2 AS ( + SELECT time, v2 FROM :TEST_TABLE WHERE device_id = 2 ORDER BY time +) +SELECT * FROM q1 INNER JOIN q2 ON q1.time = q2.time ORDER BY q1.time; +QUERY PLAN + Merge Join (actual rows=3598 loops=1) + Merge Cond: (_hyper_X_X_chunk."time" = _hyper_X_X_chunk_1."time") + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (actual rows=4 loops=1) + Index Cond: (device_id = 1) + -> Materialize (actual rows=3598 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk _hyper_X_X_chunk_1 (actual rows=3598 loops=1) + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=4 loops=1) + Index Cond: (device_id = 2) +(9 rows) + +-- test indexes +SET enable_seqscan TO FALSE; +-- IndexScans should work +:PREFIX_VERBOSE SELECT time, device_id FROM :TEST_TABLE WHERE device_id = 1 ORDER BY device_id, time; +QUERY PLAN + Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_X_X_chunk (actual rows=3598 loops=1) + Output: _hyper_X_X_chunk."time", _hyper_X_X_chunk.device_id + Bulk Decompression: true + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on _timescaledb_internal.compress_hyper_X_X_chunk (actual rows=4 loops=1) + Output: compress_hyper_X_X_chunk."time", compress_hyper_X_X_chunk.device_id, compress_hyper_X_X_chunk.v0, compress_hyper_X_X_chunk.v1, compress_hyper_X_X_chunk.v2, compress_hyper_X_X_chunk.v3, compress_hyper_X_X_chunk._ts_meta_count, compress_hyper_X_X_chunk._ts_meta_sequence_num, compress_hyper_X_X_chunk._ts_meta_min_1, compress_hyper_X_X_chunk._ts_meta_max_1 + Index Cond: (compress_hyper_X_X_chunk.device_id = 1) +(6 rows) + +-- globs should not plan IndexOnlyScans +:PREFIX_VERBOSE SELECT * FROM :TEST_TABLE WHERE device_id = 1 ORDER BY device_id, time; +QUERY PLAN + Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_X_X_chunk (actual rows=3598 loops=1) + Output: _hyper_X_X_chunk."time", _hyper_X_X_chunk.device_id, _hyper_X_X_chunk.v0, _hyper_X_X_chunk.v1, _hyper_X_X_chunk.v2, _hyper_X_X_chunk.v3 + Bulk Decompression: true + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on _timescaledb_internal.compress_hyper_X_X_chunk (actual rows=4 loops=1) + Output: compress_hyper_X_X_chunk."time", compress_hyper_X_X_chunk.device_id, compress_hyper_X_X_chunk.v0, compress_hyper_X_X_chunk.v1, compress_hyper_X_X_chunk.v2, compress_hyper_X_X_chunk.v3, compress_hyper_X_X_chunk._ts_meta_count, compress_hyper_X_X_chunk._ts_meta_sequence_num, compress_hyper_X_X_chunk._ts_meta_min_1, compress_hyper_X_X_chunk._ts_meta_max_1 + Index Cond: (compress_hyper_X_X_chunk.device_id = 1) +(6 rows) + +-- whole row reference should work +:PREFIX_VERBOSE SELECT test_table FROM :TEST_TABLE AS test_table WHERE device_id = 1 ORDER BY device_id, time; +QUERY PLAN + Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_X_X_chunk test_table (actual rows=3598 loops=1) + Output: test_table.*, test_table.device_id, test_table."time" + Bulk Decompression: true + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on _timescaledb_internal.compress_hyper_X_X_chunk (actual rows=4 loops=1) + Output: compress_hyper_X_X_chunk."time", compress_hyper_X_X_chunk.device_id, compress_hyper_X_X_chunk.v0, compress_hyper_X_X_chunk.v1, compress_hyper_X_X_chunk.v2, compress_hyper_X_X_chunk.v3, compress_hyper_X_X_chunk._ts_meta_count, compress_hyper_X_X_chunk._ts_meta_sequence_num, compress_hyper_X_X_chunk._ts_meta_min_1, compress_hyper_X_X_chunk._ts_meta_max_1 + Index Cond: (compress_hyper_X_X_chunk.device_id = 1) +(6 rows) + +-- even when we select only a segmentby column, we still need count +:PREFIX_VERBOSE SELECT device_id FROM :TEST_TABLE WHERE device_id = 1 ORDER BY device_id; +QUERY PLAN + Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_X_X_chunk (actual rows=3598 loops=1) + Output: _hyper_X_X_chunk.device_id + Bulk Decompression: false + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on _timescaledb_internal.compress_hyper_X_X_chunk (actual rows=4 loops=1) + Output: compress_hyper_X_X_chunk."time", compress_hyper_X_X_chunk.device_id, compress_hyper_X_X_chunk.v0, compress_hyper_X_X_chunk.v1, compress_hyper_X_X_chunk.v2, compress_hyper_X_X_chunk.v3, compress_hyper_X_X_chunk._ts_meta_count, compress_hyper_X_X_chunk._ts_meta_sequence_num, compress_hyper_X_X_chunk._ts_meta_min_1, compress_hyper_X_X_chunk._ts_meta_max_1 + Index Cond: (compress_hyper_X_X_chunk.device_id = 1) +(6 rows) + +:PREFIX_VERBOSE SELECT count(*) FROM :TEST_TABLE WHERE device_id = 1; +QUERY PLAN + Aggregate (actual rows=1 loops=1) + Output: count(*) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_X_X_chunk (actual rows=3598 loops=1) + Bulk Decompression: false + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on _timescaledb_internal.compress_hyper_X_X_chunk (actual rows=4 loops=1) + Output: compress_hyper_X_X_chunk."time", compress_hyper_X_X_chunk.device_id, compress_hyper_X_X_chunk.v0, compress_hyper_X_X_chunk.v1, compress_hyper_X_X_chunk.v2, compress_hyper_X_X_chunk.v3, compress_hyper_X_X_chunk._ts_meta_count, compress_hyper_X_X_chunk._ts_meta_sequence_num, compress_hyper_X_X_chunk._ts_meta_min_1, compress_hyper_X_X_chunk._ts_meta_max_1 + Index Cond: (compress_hyper_X_X_chunk.device_id = 1) +(7 rows) + +--ensure that we can get a nested loop +SET enable_seqscan TO TRUE; +SET enable_hashjoin TO FALSE; +:PREFIX_VERBOSE SELECT device_id FROM :TEST_TABLE WHERE device_id IN ( VALUES (1)); +QUERY PLAN + Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_X_X_chunk (actual rows=3598 loops=1) + Output: _hyper_X_X_chunk.device_id + Bulk Decompression: false + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on _timescaledb_internal.compress_hyper_X_X_chunk (actual rows=4 loops=1) + Output: compress_hyper_X_X_chunk."time", compress_hyper_X_X_chunk.device_id, compress_hyper_X_X_chunk.v0, compress_hyper_X_X_chunk.v1, compress_hyper_X_X_chunk.v2, compress_hyper_X_X_chunk.v3, compress_hyper_X_X_chunk._ts_meta_count, compress_hyper_X_X_chunk._ts_meta_sequence_num, compress_hyper_X_X_chunk._ts_meta_min_1, compress_hyper_X_X_chunk._ts_meta_max_1 + Index Cond: (compress_hyper_X_X_chunk.device_id = 1) +(6 rows) + +--with multiple values can get a nested loop. +:PREFIX_VERBOSE SELECT device_id FROM :TEST_TABLE WHERE device_id IN ( VALUES (1), (2)); +QUERY PLAN + Nested Loop (actual rows=7196 loops=1) + Output: _hyper_X_X_chunk.device_id + -> Unique (actual rows=2 loops=1) + Output: "*VALUES*".column1 + -> Sort (actual rows=2 loops=1) + Output: "*VALUES*".column1 + Sort Key: "*VALUES*".column1 + Sort Method: quicksort + -> Values Scan on "*VALUES*" (actual rows=2 loops=1) + Output: "*VALUES*".column1 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_X_X_chunk (actual rows=3598 loops=2) + Output: _hyper_X_X_chunk.device_id + Bulk Decompression: false + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on _timescaledb_internal.compress_hyper_X_X_chunk (actual rows=4 loops=2) + Output: compress_hyper_X_X_chunk."time", compress_hyper_X_X_chunk.device_id, compress_hyper_X_X_chunk.v0, compress_hyper_X_X_chunk.v1, compress_hyper_X_X_chunk.v2, compress_hyper_X_X_chunk.v3, compress_hyper_X_X_chunk._ts_meta_count, compress_hyper_X_X_chunk._ts_meta_sequence_num, compress_hyper_X_X_chunk._ts_meta_min_1, compress_hyper_X_X_chunk._ts_meta_max_1 + Index Cond: (compress_hyper_X_X_chunk.device_id = "*VALUES*".column1) +(16 rows) + +RESET enable_hashjoin; +:PREFIX_VERBOSE SELECT device_id FROM :TEST_TABLE WHERE device_id IN (VALUES (1)); +QUERY PLAN + Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_X_X_chunk (actual rows=3598 loops=1) + Output: _hyper_X_X_chunk.device_id + Bulk Decompression: false + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on _timescaledb_internal.compress_hyper_X_X_chunk (actual rows=4 loops=1) + Output: compress_hyper_X_X_chunk."time", compress_hyper_X_X_chunk.device_id, compress_hyper_X_X_chunk.v0, compress_hyper_X_X_chunk.v1, compress_hyper_X_X_chunk.v2, compress_hyper_X_X_chunk.v3, compress_hyper_X_X_chunk._ts_meta_count, compress_hyper_X_X_chunk._ts_meta_sequence_num, compress_hyper_X_X_chunk._ts_meta_min_1, compress_hyper_X_X_chunk._ts_meta_max_1 + Index Cond: (compress_hyper_X_X_chunk.device_id = 1) +(6 rows) + +--with multiple values can get a semi-join or nested loop depending on seq_page_cost. +:PREFIX_VERBOSE SELECT device_id FROM :TEST_TABLE WHERE device_id IN (VALUES (1), (2)); +QUERY PLAN + Nested Loop (actual rows=7196 loops=1) + Output: _hyper_X_X_chunk.device_id + -> Unique (actual rows=2 loops=1) + Output: "*VALUES*".column1 + -> Sort (actual rows=2 loops=1) + Output: "*VALUES*".column1 + Sort Key: "*VALUES*".column1 + Sort Method: quicksort + -> Values Scan on "*VALUES*" (actual rows=2 loops=1) + Output: "*VALUES*".column1 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_X_X_chunk (actual rows=3598 loops=2) + Output: _hyper_X_X_chunk.device_id + Bulk Decompression: false + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on _timescaledb_internal.compress_hyper_X_X_chunk (actual rows=4 loops=2) + Output: compress_hyper_X_X_chunk."time", compress_hyper_X_X_chunk.device_id, compress_hyper_X_X_chunk.v0, compress_hyper_X_X_chunk.v1, compress_hyper_X_X_chunk.v2, compress_hyper_X_X_chunk.v3, compress_hyper_X_X_chunk._ts_meta_count, compress_hyper_X_X_chunk._ts_meta_sequence_num, compress_hyper_X_X_chunk._ts_meta_min_1, compress_hyper_X_X_chunk._ts_meta_max_1 + Index Cond: (compress_hyper_X_X_chunk.device_id = "*VALUES*".column1) +(16 rows) + +SET seq_page_cost = 100; +-- loop/row counts of this query is different on windows so we run it without analyze +:PREFIX_NO_ANALYZE SELECT device_id FROM :TEST_TABLE WHERE device_id IN (VALUES (1), (2)); +QUERY PLAN + Nested Loop + Output: _hyper_X_X_chunk.device_id + -> Unique + Output: "*VALUES*".column1 + -> Sort + Output: "*VALUES*".column1 + Sort Key: "*VALUES*".column1 + -> Values Scan on "*VALUES*" + Output: "*VALUES*".column1 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_X_X_chunk + Output: _hyper_X_X_chunk.device_id + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on _timescaledb_internal.compress_hyper_X_X_chunk + Output: compress_hyper_X_X_chunk."time", compress_hyper_X_X_chunk.device_id, compress_hyper_X_X_chunk.v0, compress_hyper_X_X_chunk.v1, compress_hyper_X_X_chunk.v2, compress_hyper_X_X_chunk.v3, compress_hyper_X_X_chunk._ts_meta_count, compress_hyper_X_X_chunk._ts_meta_sequence_num, compress_hyper_X_X_chunk._ts_meta_min_1, compress_hyper_X_X_chunk._ts_meta_max_1 + Index Cond: (compress_hyper_X_X_chunk.device_id = "*VALUES*".column1) +(14 rows) + +RESET seq_page_cost; +-- test view +CREATE OR REPLACE VIEW compressed_view AS SELECT time, device_id, v1, v2 FROM :TEST_TABLE; +:PREFIX SELECT * FROM compressed_view WHERE device_id = 1 ORDER BY time DESC LIMIT 10; +QUERY PLAN + Limit (actual rows=10 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10 loops=1) + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (actual rows=1 loops=1) + Index Cond: (device_id = 1) +(4 rows) + +DROP VIEW compressed_view; +SET parallel_leader_participation TO off; +SET min_parallel_table_scan_size TO '0'; +-- test INNER JOIN +:PREFIX_NO_VERBOSE +SELECT * +FROM :TEST_TABLE m1 + INNER JOIN :TEST_TABLE m2 ON m1.time = m2.time + AND m1.device_id = m2.device_id + ORDER BY m1.time, + m1.device_id + LIMIT 10; +QUERY PLAN + Limit + -> Nested Loop + -> Gather Merge + Workers Planned: 1 + -> Sort + Sort Key: m1."time", m1.device_id + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk m1 + -> Parallel Seq Scan on compress_hyper_X_X_chunk + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk m2 + Filter: (m1."time" = "time") + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 + Index Cond: (device_id = m1.device_id) +(12 rows) + +:PREFIX_NO_VERBOSE +SELECT * +FROM :TEST_TABLE m1 + INNER JOIN :TEST_TABLE m2 ON m1.time = m2.time + INNER JOIN :TEST_TABLE m3 ON m2.time = m3.time + AND m1.device_id = m2.device_id + AND m3.device_id = 3 + ORDER BY m1.time, + m1.device_id + LIMIT 10; +QUERY PLAN + Limit + -> Nested Loop + -> Nested Loop + Join Filter: (m1."time" = m3."time") + -> Gather Merge + Workers Planned: 1 + -> Sort + Sort Key: m1."time", m1.device_id + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk m1 + -> Parallel Seq Scan on compress_hyper_X_X_chunk + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk m3 + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_2 + Index Cond: (device_id = 3) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk m2 + Filter: (m1."time" = "time") + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 + Index Cond: (device_id = m1.device_id) +(17 rows) + +RESET min_parallel_table_scan_size; +:PREFIX_NO_VERBOSE +SELECT * +FROM :TEST_TABLE m1 + INNER JOIN :TEST_TABLE m2 ON m1.time = m2.time + AND m1.device_id = 1 + AND m2.device_id = 2 + ORDER BY m1.time, + m1.device_id, + m2.time, + m2.device_id + LIMIT 100; +QUERY PLAN + Limit + -> Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk m1 + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk + Index Cond: (device_id = 1) + -> Materialize + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk m2 + -> Index Scan Backward using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 + Index Cond: (device_id = 2) +(10 rows) + +:PREFIX_NO_VERBOSE +SELECT * +FROM metrics m1 + INNER JOIN metrics_space m2 ON m1.time = m2.time + AND m1.device_id = 1 + AND m2.device_id = 2 + ORDER BY m1.time, + m1.device_id, + m2.time, + m2.device_id + LIMIT 100; +QUERY PLAN + Limit + -> Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics m1 + Order: m1."time" + -> Index Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk m1_1 + Index Cond: (device_id = 1) + -> Index Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk m1_2 + Index Cond: (device_id = 1) + -> Index Scan Backward using _hyper_X_X_chunk_metrics_device_id_time_idx on _hyper_X_X_chunk m1_3 + Index Cond: (device_id = 1) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_space m2 + Order: m2."time" + -> Index Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk m2_1 + Index Cond: (device_id = 2) + -> Index Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk m2_2 + Index Cond: (device_id = 2) + -> Index Scan using _hyper_X_X_chunk_metrics_space_device_id_time_idx on _hyper_X_X_chunk m2_3 + Index Cond: (device_id = 2) +(20 rows) + +-- test OUTER JOIN +SET min_parallel_table_scan_size TO '0'; +:PREFIX_NO_VERBOSE +SELECT * +FROM :TEST_TABLE m1 + LEFT OUTER JOIN :TEST_TABLE m2 ON m1.time = m2.time + AND m1.device_id = m2.device_id +ORDER BY m1.time, + m1.device_id +LIMIT 10; +QUERY PLAN + Limit + -> Nested Loop Left Join + Join Filter: ((m1."time" = m2."time") AND (m1.device_id = m2.device_id)) + -> Gather Merge + Workers Planned: 1 + -> Sort + Sort Key: m1."time", m1.device_id + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk m1 + -> Parallel Seq Scan on compress_hyper_X_X_chunk + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk m2 + -> Seq Scan on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 +(11 rows) + +RESET min_parallel_table_scan_size; +:PREFIX_NO_VERBOSE +SELECT * +FROM :TEST_TABLE m1 + LEFT OUTER JOIN :TEST_TABLE m2 ON m1.time = m2.time + AND m1.device_id = 1 + AND m2.device_id = 2 +ORDER BY m1.time, + m1.device_id, + m2.time, + m2.device_id +LIMIT 100; +QUERY PLAN + Limit + -> Incremental Sort + Sort Key: m1."time", m1.device_id, m2."time", m2.device_id + Presorted Key: m1."time" + -> Merge Left Join + Merge Cond: (m1."time" = m2."time") + Join Filter: (m1.device_id = 1) + -> Sort + Sort Key: m1."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk m1 + -> Seq Scan on compress_hyper_X_X_chunk + -> Sort + Sort Key: m2."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk m2 + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 + Index Cond: (device_id = 2) +(16 rows) + +-- test implicit self-join +:PREFIX_NO_VERBOSE +SELECT * +FROM :TEST_TABLE m1, + :TEST_TABLE m2 +WHERE m1.time = m2.time +ORDER BY m1.time, + m1.device_id, + m2.time, + m2.device_id +LIMIT 20; +QUERY PLAN + Limit + -> Incremental Sort + Sort Key: m1."time", m1.device_id, m2.device_id + Presorted Key: m1."time" + -> Merge Join + Merge Cond: (m1."time" = m2."time") + -> Sort + Sort Key: m1."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk m1 + -> Seq Scan on compress_hyper_X_X_chunk + -> Sort + Sort Key: m2."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk m2 + -> Seq Scan on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 +(14 rows) + +-- test self-join with sub-query +:PREFIX_NO_VERBOSE +SELECT * +FROM ( + SELECT * + FROM :TEST_TABLE m1) m1 + INNER JOIN ( + SELECT * + FROM :TEST_TABLE m2) m2 ON m1.time = m2.time +ORDER BY m1.time, + m1.device_id, + m2.device_id +LIMIT 10; +QUERY PLAN + Limit + -> Incremental Sort + Sort Key: m1."time", m1.device_id, m2.device_id + Presorted Key: m1."time" + -> Merge Join + Merge Cond: (m1."time" = m2."time") + -> Sort + Sort Key: m1."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk m1 + -> Seq Scan on compress_hyper_X_X_chunk + -> Sort + Sort Key: m2."time" + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk m2 + -> Seq Scan on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 +(14 rows) + +RESET parallel_leader_participation; +:PREFIX +SELECT * +FROM generate_series('2000-01-01'::timestamptz, '2000-02-01'::timestamptz, '1d'::interval) g (time) + INNER JOIN LATERAL ( + SELECT time + FROM :TEST_TABLE m1 + WHERE m1.time = g.time + LIMIT 1) m1 ON TRUE; +QUERY PLAN + Nested Loop (actual rows=5 loops=1) + -> Function Scan on generate_series g (actual rows=32 loops=1) + -> Limit (actual rows=0 loops=32) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk m1 (actual rows=0 loops=32) + Filter: ("time" = g."time") + Rows Removed by Filter: 81 + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=0 loops=32) + Filter: ((_ts_meta_min_1 <= g."time") AND (_ts_meta_max_1 >= g."time")) + Rows Removed by Filter: 17 +(9 rows) + +-- test prepared statement +SET plan_cache_mode TO force_generic_plan; +PREPARE prep AS SELECT count(time) FROM :TEST_TABLE WHERE device_id = 1; +:PREFIX EXECUTE prep; +QUERY PLAN + Aggregate (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (actual rows=4 loops=1) + Index Cond: (device_id = 1) +(4 rows) + +EXECUTE prep; + count + 3598 +(1 row) + +EXECUTE prep; + count + 3598 +(1 row) + +EXECUTE prep; + count + 3598 +(1 row) + +EXECUTE prep; + count + 3598 +(1 row) + +EXECUTE prep; + count + 3598 +(1 row) + +EXECUTE prep; + count + 3598 +(1 row) + +DEALLOCATE prep; +-- test prepared statement with params pushdown +PREPARE param_prep (int) AS +SELECT * +FROM generate_series('2000-01-01'::timestamptz, '2000-02-01'::timestamptz, '1d'::interval) g (time) + INNER JOIN LATERAL ( + SELECT time + FROM :TEST_TABLE m1 + WHERE m1.time = g.time + AND device_id = $1 + LIMIT 1) m1 ON TRUE; +:PREFIX EXECUTE param_prep (1); +QUERY PLAN + Nested Loop (actual rows=5 loops=1) + -> Function Scan on generate_series g (actual rows=32 loops=1) + -> Limit (actual rows=0 loops=32) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk m1 (actual rows=0 loops=32) + Filter: ("time" = g."time") + Rows Removed by Filter: 81 + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (actual rows=0 loops=32) + Index Cond: (device_id = $1) + Filter: ((_ts_meta_min_1 <= g."time") AND (_ts_meta_max_1 >= g."time")) + Rows Removed by Filter: 4 +(10 rows) + +:PREFIX EXECUTE param_prep (2); +QUERY PLAN + Nested Loop (actual rows=5 loops=1) + -> Function Scan on generate_series g (actual rows=32 loops=1) + -> Limit (actual rows=0 loops=32) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk m1 (actual rows=0 loops=32) + Filter: ("time" = g."time") + Rows Removed by Filter: 81 + -> Index Scan using compress_hyper_X_X_chunk__compressed_hypertable_4_device_id__t on compress_hyper_X_X_chunk (actual rows=0 loops=32) + Index Cond: (device_id = $1) + Filter: ((_ts_meta_min_1 <= g."time") AND (_ts_meta_max_1 >= g."time")) + Rows Removed by Filter: 4 +(10 rows) + +EXECUTE param_prep (1); + time | time +------------------------------+------------------------------ + Sat Jan 01 00:00:00 2000 PST | Sat Jan 01 00:00:00 2000 PST + Sun Jan 02 00:00:00 2000 PST | Sun Jan 02 00:00:00 2000 PST + Mon Jan 03 00:00:00 2000 PST | Mon Jan 03 00:00:00 2000 PST + Tue Jan 04 00:00:00 2000 PST | Tue Jan 04 00:00:00 2000 PST + Wed Jan 05 00:00:00 2000 PST | Wed Jan 05 00:00:00 2000 PST +(5 rows) + +EXECUTE param_prep (2); + time | time +------------------------------+------------------------------ + Sat Jan 01 00:00:00 2000 PST | Sat Jan 01 00:00:00 2000 PST + Sun Jan 02 00:00:00 2000 PST | Sun Jan 02 00:00:00 2000 PST + Mon Jan 03 00:00:00 2000 PST | Mon Jan 03 00:00:00 2000 PST + Tue Jan 04 00:00:00 2000 PST | Tue Jan 04 00:00:00 2000 PST + Wed Jan 05 00:00:00 2000 PST | Wed Jan 05 00:00:00 2000 PST +(5 rows) + +EXECUTE param_prep (1); + time | time +------------------------------+------------------------------ + Sat Jan 01 00:00:00 2000 PST | Sat Jan 01 00:00:00 2000 PST + Sun Jan 02 00:00:00 2000 PST | Sun Jan 02 00:00:00 2000 PST + Mon Jan 03 00:00:00 2000 PST | Mon Jan 03 00:00:00 2000 PST + Tue Jan 04 00:00:00 2000 PST | Tue Jan 04 00:00:00 2000 PST + Wed Jan 05 00:00:00 2000 PST | Wed Jan 05 00:00:00 2000 PST +(5 rows) + +EXECUTE param_prep (2); + time | time +------------------------------+------------------------------ + Sat Jan 01 00:00:00 2000 PST | Sat Jan 01 00:00:00 2000 PST + Sun Jan 02 00:00:00 2000 PST | Sun Jan 02 00:00:00 2000 PST + Mon Jan 03 00:00:00 2000 PST | Mon Jan 03 00:00:00 2000 PST + Tue Jan 04 00:00:00 2000 PST | Tue Jan 04 00:00:00 2000 PST + Wed Jan 05 00:00:00 2000 PST | Wed Jan 05 00:00:00 2000 PST +(5 rows) + +EXECUTE param_prep (1); + time | time +------------------------------+------------------------------ + Sat Jan 01 00:00:00 2000 PST | Sat Jan 01 00:00:00 2000 PST + Sun Jan 02 00:00:00 2000 PST | Sun Jan 02 00:00:00 2000 PST + Mon Jan 03 00:00:00 2000 PST | Mon Jan 03 00:00:00 2000 PST + Tue Jan 04 00:00:00 2000 PST | Tue Jan 04 00:00:00 2000 PST + Wed Jan 05 00:00:00 2000 PST | Wed Jan 05 00:00:00 2000 PST +(5 rows) + +DEALLOCATE param_prep; +RESET plan_cache_mode; diff --git a/tsl/test/sql/.gitignore b/tsl/test/sql/.gitignore index b057ef35718..7c80876e95a 100644 --- a/tsl/test/sql/.gitignore +++ b/tsl/test/sql/.gitignore @@ -1,13 +1,20 @@ /*.pgbinary +/cagg_bgw-*.sql /cagg_ddl-*.sql +/cagg_ddl_dist_ht-*.sql +/cagg_errors_deprecated-*.sql /cagg_invalidation_dist_ht-*.sql /cagg_permissions-*.sql /cagg_query-*.sql +/cagg_repair-*.sql /cagg_union_view-*.sql +/cagg_usage-*.sql +/compression_errors-*.sql /compression_sorted_merge-*.sql /compression_permissions-*.sql /continuous_aggs-*.sql /continuous_aggs_deprecated-*.sql +/deparse-*.sql /dist_grant-*.sql /dist_query-*.sql /dist_hypertable-*.sql diff --git a/tsl/test/sql/CMakeLists.txt b/tsl/test/sql/CMakeLists.txt index 640e849f14a..9a623ffee2d 100644 --- a/tsl/test/sql/CMakeLists.txt +++ b/tsl/test/sql/CMakeLists.txt @@ -54,7 +54,6 @@ if(CMAKE_BUILD_TYPE MATCHES Debug) chunk_utils_compression.sql compression_algos.sql compression_ddl.sql - compression_errors.sql compression_hypertable.sql compression_merge.sql compression_indexscan.sql @@ -62,27 +61,21 @@ if(CMAKE_BUILD_TYPE MATCHES Debug) compress_sorted_merge_filter.sql compress_table.sql cagg_bgw_drop_chunks.sql - cagg_bgw.sql - cagg_ddl.sql cagg_drop_chunks.sql cagg_dump.sql - cagg_errors_deprecated.sql cagg_joins.sql cagg_migrate.sql cagg_multi.sql cagg_on_cagg.sql cagg_on_cagg_joins.sql cagg_tableam.sql - cagg_usage.sql cagg_policy_run.sql - cagg_repair.sql data_fetcher.sql data_node_bootstrap.sql data_node.sql ddl_hook.sql debug_notice.sql decompress_vector_qual.sql - deparse.sql hypertable_generalization.sql insert_memory_usage.sql information_view_chunk_count.sql @@ -101,7 +94,6 @@ if(CMAKE_BUILD_TYPE MATCHES Debug) TEST_FILES cagg_bgw_dist_ht.sql cagg_migrate_dist_ht.sql - cagg_ddl_dist_ht.sql cagg_on_cagg_dist_ht.sql cagg_on_cagg_joins_dist_ht.sql dist_api_calls.sql @@ -185,12 +177,24 @@ if((${PG_VERSION_MAJOR} GREATER_EQUAL "14")) endif() if(CMAKE_BUILD_TYPE MATCHES Debug) - list(APPEND TEST_TEMPLATES cagg_query.sql.in continuous_aggs.sql.in - continuous_aggs_deprecated.sql.in) + list( + APPEND + TEST_TEMPLATES + cagg_bgw.sql.in + cagg_ddl.sql.in + cagg_errors_deprecated.sql.in + cagg_query.sql.in + cagg_repair.sql.in + cagg_usage.sql.in + compression_errors.sql.in + continuous_aggs.sql.in + continuous_aggs_deprecated.sql.in + deparse.sql.in) if(ENABLE_MULTINODE_TESTS) list( APPEND TEST_TEMPLATES + cagg_ddl_dist_ht.sql.in cagg_invalidation_dist_ht.sql.in dist_hypertable.sql.in dist_grant.sql.in diff --git a/tsl/test/sql/cagg_bgw.sql b/tsl/test/sql/cagg_bgw.sql.in similarity index 100% rename from tsl/test/sql/cagg_bgw.sql rename to tsl/test/sql/cagg_bgw.sql.in diff --git a/tsl/test/sql/cagg_ddl.sql b/tsl/test/sql/cagg_ddl.sql.in similarity index 100% rename from tsl/test/sql/cagg_ddl.sql rename to tsl/test/sql/cagg_ddl.sql.in diff --git a/tsl/test/sql/cagg_ddl_dist_ht.sql b/tsl/test/sql/cagg_ddl_dist_ht.sql.in similarity index 100% rename from tsl/test/sql/cagg_ddl_dist_ht.sql rename to tsl/test/sql/cagg_ddl_dist_ht.sql.in diff --git a/tsl/test/sql/cagg_errors_deprecated.sql b/tsl/test/sql/cagg_errors_deprecated.sql.in similarity index 96% rename from tsl/test/sql/cagg_errors_deprecated.sql rename to tsl/test/sql/cagg_errors_deprecated.sql.in index d83312cec34..937df662cd7 100644 --- a/tsl/test/sql/cagg_errors_deprecated.sql +++ b/tsl/test/sql/cagg_errors_deprecated.sql.in @@ -137,6 +137,8 @@ from conditions group by time_bucket('1week', timec) , location WITH NO DATA; ; +-- Starting on PG16 this test will pass because array_agg is parallel safe +-- https://github.com/postgres/postgres/commit/16fd03e956540d1b47b743f6a84f37c54ac93dd4 CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) AS Select sum(humidity), avg(temperature), array_agg(location) @@ -150,6 +152,8 @@ CREATE AGGREGATE newavg ( finalfunc = int8_avg, initcond1 = '{0,0}' ); + +DROP MATERIALIZED VIEW IF EXISTS mat_m1; CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized = false) AS Select sum(humidity), newavg(temperature::int4) @@ -471,6 +475,12 @@ SELECT time_bucket('1 day', time), COUNT(time) FROM measurements GROUP BY 1 WITH NO DATA; +SELECT ca.mat_hypertable_id AS "MAT_HYPERTABLE_ID" +FROM _timescaledb_catalog.continuous_agg ca +INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) +WHERE user_view_name = 'measurements_summary' +\gset + -- First test that add_job checks the config. It is currently possible -- to add non-custom jobs using the add_job function so we need to -- test that the function actually checks the config parameters. These @@ -487,13 +497,13 @@ SELECT add_job( '_timescaledb_functions.policy_refresh_continuous_aggregate'::regproc, '1 hour'::interval, check_config => '_timescaledb_functions.policy_refresh_continuous_aggregate_check'::regproc, - config => '{"end_offset": null, "start_offset": "1 fortnight", "mat_hypertable_id": 11}'); + config => ('{"end_offset": null, "start_offset": "1 fortnight", "mat_hypertable_id": '||:'MAT_HYPERTABLE_ID'||'}')::jsonb); -- ... this one because it has a bad value for end_offset SELECT add_job( '_timescaledb_functions.policy_refresh_continuous_aggregate'::regproc, '1 hour'::interval, check_config => '_timescaledb_functions.policy_refresh_continuous_aggregate_check'::regproc, - config => '{"end_offset": "chicken", "start_offset": null, "mat_hypertable_id": 11}'); + config => ('{"end_offset": "chicken", "start_offset": null, "mat_hypertable_id": '||:'MAT_HYPERTABLE_ID'||'}')::jsonb); \set ON_ERROR_STOP 1 SELECT add_continuous_aggregate_policy('measurements_summary', NULL, NULL, '1 h'::interval) AS job_id @@ -509,9 +519,9 @@ SELECT * FROM _timescaledb_config.bgw_job WHERE id = :job_id; \set ON_ERROR_STOP 0 SELECT alter_job(:job_id, config => '{"end_offset": "1 week", "start_offset": "2 fortnights"}'); SELECT alter_job(:job_id, - config => '{"mat_hypertable_id": 11, "end_offset": "chicken", "start_offset": "1 fortnights"}'); + config => ('{"mat_hypertable_id": '||:'MAT_HYPERTABLE_ID'||', "end_offset": "chicken", "start_offset": "1 fortnights"}')::jsonb); SELECT alter_job(:job_id, - config => '{"mat_hypertable_id": 11, "end_offset": "chicken", "start_offset": "1 week"}'); + config => ('{"mat_hypertable_id": '||:'MAT_HYPERTABLE_ID'||', "end_offset": "chicken", "start_offset": "1 week"}')::jsonb); \set ON_ERROR_STOP 1 DROP TABLE measurements CASCADE; diff --git a/tsl/test/sql/cagg_query.sql.in b/tsl/test/sql/cagg_query.sql.in index 2d3ee97ee84..c805af9446b 100644 --- a/tsl/test/sql/cagg_query.sql.in +++ b/tsl/test/sql/cagg_query.sql.in @@ -291,7 +291,8 @@ SELECT m1.location, m1.timec, sumt, sumh, firsth, lasth, maxtemp, mintemp FROM mat_m1 m1 RIGHT JOIN mat_m2 m2 ON (m1.location = m2.location AND m1.timec = m2.timec) -ORDER BY m1.location COLLATE "C" NULLS LAST, m1.timec DESC NULLS LAST, firsth NULLS LAST +ORDER BY m1.location COLLATE "C" NULLS LAST, m1.timec DESC NULLS LAST, firsth NULLS LAST, + lasth NULLS LAST, mintemp NULLS LAST, maxtemp NULLS LAST LIMIT 10; ROLLBACK; diff --git a/tsl/test/sql/cagg_repair.sql b/tsl/test/sql/cagg_repair.sql.in similarity index 100% rename from tsl/test/sql/cagg_repair.sql rename to tsl/test/sql/cagg_repair.sql.in diff --git a/tsl/test/sql/cagg_usage.sql b/tsl/test/sql/cagg_usage.sql.in similarity index 100% rename from tsl/test/sql/cagg_usage.sql rename to tsl/test/sql/cagg_usage.sql.in diff --git a/tsl/test/sql/compression_bgw.sql b/tsl/test/sql/compression_bgw.sql index e63bd244823..0c57505dc8c 100644 --- a/tsl/test/sql/compression_bgw.sql +++ b/tsl/test/sql/compression_bgw.sql @@ -161,7 +161,6 @@ SELECT add_compression_policy('test_table_nologin', 2::int); \set ON_ERROR_STOP 1 DROP TABLE test_table_nologin; RESET ROLE; -REVOKE NOLOGIN_ROLE FROM :ROLE_DEFAULT_PERM_USER; \c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER diff --git a/tsl/test/sql/compression_errors.sql b/tsl/test/sql/compression_errors.sql.in similarity index 99% rename from tsl/test/sql/compression_errors.sql rename to tsl/test/sql/compression_errors.sql.in index 2df0a672312..f624aea2152 100644 --- a/tsl/test/sql/compression_errors.sql +++ b/tsl/test/sql/compression_errors.sql.in @@ -351,7 +351,7 @@ ALTER TABLE test SET ( --below queries will pass before chunks are compressed SELECT 1 FROM test GROUP BY enum_col; -EXPLAIN SELECT DISTINCT 1 FROM test; +EXPLAIN (COSTS OFF, TIMING OFF, SUMMARY OFF) SELECT DISTINCT 1 FROM test; --compress chunks SELECT COMPRESS_CHUNK(X) FROM SHOW_CHUNKS('test') X; @@ -359,7 +359,7 @@ ANALYZE test; --below query should pass after chunks are compressed SELECT 1 FROM test GROUP BY enum_col; -EXPLAIN SELECT DISTINCT 1 FROM test; +EXPLAIN (COSTS OFF, TIMING OFF, SUMMARY OFF) SELECT DISTINCT 1 FROM test; --github issue 4398 SELECT format('CREATE TABLE data_table AS SELECT now() AS tm, %s', array_to_string(array_agg(format('125 AS c%s',a)), ', ')) FROM generate_series(1,550)a \gexec diff --git a/tsl/test/sql/deparse.sql b/tsl/test/sql/deparse.sql.in similarity index 100% rename from tsl/test/sql/deparse.sql rename to tsl/test/sql/deparse.sql.in diff --git a/tsl/test/sql/include/cagg_ddl_common.sql b/tsl/test/sql/include/cagg_ddl_common.sql index fabb4f4a8fd..0e8b7939b2d 100644 --- a/tsl/test/sql/include/cagg_ddl_common.sql +++ b/tsl/test/sql/include/cagg_ddl_common.sql @@ -34,6 +34,11 @@ SELECT table_name FROM create_hypertable('conditions', 'timec'); -- schema tests \c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER +-- drop if the tablespace1 and/or tablespace2 exists +SET client_min_messages TO error; +DROP TABLESPACE IF EXISTS tablespace1; +DROP TABLESPACE IF EXISTS tablespace2; +RESET client_min_messages; CREATE TABLESPACE tablespace1 OWNER :ROLE_DEFAULT_PERM_USER LOCATION :TEST_TABLESPACE1_PATH; CREATE TABLESPACE tablespace2 OWNER :ROLE_DEFAULT_PERM_USER LOCATION :TEST_TABLESPACE2_PATH;