From 7dc5317f5ec4d2a7e21836a3ebfe74361f6b3179 Mon Sep 17 00:00:00 2001 From: Jan Nidzwetzki Date: Thu, 23 Nov 2023 08:28:17 +0100 Subject: [PATCH 1/6] Ensure dist_move_chunk is executed as solo test dist_move_chunk is now version specific. However, only the generic version was declared as a solo test. This PR adjusts the the test declaration. --- tsl/test/sql/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tsl/test/sql/CMakeLists.txt b/tsl/test/sql/CMakeLists.txt index 2fa1341995d..2c010ead3e8 100644 --- a/tsl/test/sql/CMakeLists.txt +++ b/tsl/test/sql/CMakeLists.txt @@ -164,7 +164,7 @@ set(SOLO_TESTS if(ENABLE_MULTINODE_TESTS AND ${PG_VERSION_MAJOR} EQUAL "15" AND ${PG_VERSION_MINOR} LESS "3") - list(APPEND SOLO_TESTS dist_move_chunk) + list(APPEND SOLO_TESTS dist_move_chunk-${PG_VERSION_MAJOR}) endif() if(ENABLE_MULTINODE_TESTS AND ${PG_VERSION_MAJOR} LESS "16") From e9a4f30abf73ae5199793d93235eebc8b15afa82 Mon Sep 17 00:00:00 2001 From: Jan Nidzwetzki Date: Thu, 23 Nov 2023 09:26:10 +0100 Subject: [PATCH 2/6] Make parallel test deterministic One test query of the parallel test did not contain an ORDER BY. Therefore, the test result was not deterministic. This patch adds the missing ORDER BY. --- test/expected/parallel-13.out | 40 ++++++++++++++++++-------------- test/expected/parallel-14.out | 40 ++++++++++++++++++-------------- test/expected/parallel-15.out | 40 ++++++++++++++++++-------------- test/expected/parallel-16.out | 43 ++++++++++++++++++++--------------- test/sql/parallel.sql.in | 8 ++++--- 5 files changed, 99 insertions(+), 72 deletions(-) diff --git a/test/expected/parallel-13.out b/test/expected/parallel-13.out index dddfd7667ea..e960d671655 100644 --- a/test/expected/parallel-13.out +++ b/test/expected/parallel-13.out @@ -285,37 +285,42 @@ SELECT count(*) FROM "test" WHERE i > 1 AND length(version()) > 0; (1 row) RESET parallel_leader_participation; --- Test parallel chunk append is used +-- Test parallel chunk append is used (index scan is disabled to trigger a parallel chunk append) SET parallel_tuple_cost = 0; -:PREFIX SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 LIMIT 10) AS t1 LEFT JOIN (SELECT * FROM "test" WHERE i < 500000 LIMIT 10) AS t2 ON (t1.i = t2.i) ORDER BY t1.i, t2.i; - QUERY PLAN ----------------------------------------------------------------------------------------------------------- +SET enable_indexscan = OFF; +:PREFIX SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 ORDER BY I LIMIT 10) AS t1 LEFT JOIN (SELECT * FROM "test" WHERE i < 500000 ORDER BY I LIMIT 10) AS t2 ON (t1.i = t2.i) ORDER BY t1.i, t2.i; + QUERY PLAN +---------------------------------------------------------------------------------------------- Sort - Sort Key: test.i, _hyper_1_1_chunk.i - -> Hash Right Join - Hash Cond: (_hyper_1_1_chunk.i = test.i) + Sort Key: test.i, _hyper_1_1_chunk_1.i + -> Merge Left Join + Merge Cond: (test.i = _hyper_1_1_chunk_1.i) -> Limit - -> Gather + -> Gather Merge Workers Planned: 2 - -> Parallel Seq Scan on _hyper_1_1_chunk - Filter: (i < 500000) - -> Hash - -> Limit - -> Gather - Workers Planned: 2 + -> Sort + Sort Key: test.i -> Result One-Time Filter: (length(version()) > 0) -> Parallel Custom Scan (ChunkAppend) on test Chunks excluded during startup: 0 -> Result One-Time Filter: (length(version()) > 0) - -> Parallel Seq Scan on _hyper_1_1_chunk _hyper_1_1_chunk_1 + -> Parallel Seq Scan on _hyper_1_1_chunk -> Result One-Time Filter: (length(version()) > 0) -> Parallel Seq Scan on _hyper_1_2_chunk -(23 rows) + -> Materialize + -> Limit + -> Gather Merge + Workers Planned: 2 + -> Sort + Sort Key: _hyper_1_1_chunk_1.i + -> Parallel Seq Scan on _hyper_1_1_chunk _hyper_1_1_chunk_1 + Filter: (i < 500000) +(27 rows) -SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 LIMIT 10) AS t1 LEFT JOIN (SELECT * FROM "test" WHERE i < 500000 LIMIT 10) AS t2 ON (t1.i = t2.i) ORDER BY t1.i, t2.i; +SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 ORDER BY I LIMIT 10) AS t1 LEFT JOIN (SELECT * FROM "test" WHERE i < 500000 ORDER BY I LIMIT 10) AS t2 ON (t1.i = t2.i) ORDER BY t1.i, t2.i; i | j | ts | i | j | ts ----+------+-----------------------------+----+------+----------------------------- 0 | 0.1 | Wed Dec 31 16:00:00 1969 | 0 | 0.1 | Wed Dec 31 16:00:00 1969 @@ -330,6 +335,7 @@ SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 LIMIT 10) AS t1 90 | 90.1 | Wed Dec 31 16:00:00.09 1969 | 90 | 90.1 | Wed Dec 31 16:00:00.09 1969 (10 rows) +SET enable_indexscan = ON; -- Test normal chunk append can be used in a parallel worker :PREFIX SELECT * FROM (SELECT * FROM "test" WHERE i >= 999000 ORDER BY i) AS t1 JOIN (SELECT * FROM "test" WHERE i >= 400000 ORDER BY i) AS t2 ON (TRUE) ORDER BY t1.i, t2.i LIMIT 10; QUERY PLAN diff --git a/test/expected/parallel-14.out b/test/expected/parallel-14.out index dddfd7667ea..e960d671655 100644 --- a/test/expected/parallel-14.out +++ b/test/expected/parallel-14.out @@ -285,37 +285,42 @@ SELECT count(*) FROM "test" WHERE i > 1 AND length(version()) > 0; (1 row) RESET parallel_leader_participation; --- Test parallel chunk append is used +-- Test parallel chunk append is used (index scan is disabled to trigger a parallel chunk append) SET parallel_tuple_cost = 0; -:PREFIX SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 LIMIT 10) AS t1 LEFT JOIN (SELECT * FROM "test" WHERE i < 500000 LIMIT 10) AS t2 ON (t1.i = t2.i) ORDER BY t1.i, t2.i; - QUERY PLAN ----------------------------------------------------------------------------------------------------------- +SET enable_indexscan = OFF; +:PREFIX SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 ORDER BY I LIMIT 10) AS t1 LEFT JOIN (SELECT * FROM "test" WHERE i < 500000 ORDER BY I LIMIT 10) AS t2 ON (t1.i = t2.i) ORDER BY t1.i, t2.i; + QUERY PLAN +---------------------------------------------------------------------------------------------- Sort - Sort Key: test.i, _hyper_1_1_chunk.i - -> Hash Right Join - Hash Cond: (_hyper_1_1_chunk.i = test.i) + Sort Key: test.i, _hyper_1_1_chunk_1.i + -> Merge Left Join + Merge Cond: (test.i = _hyper_1_1_chunk_1.i) -> Limit - -> Gather + -> Gather Merge Workers Planned: 2 - -> Parallel Seq Scan on _hyper_1_1_chunk - Filter: (i < 500000) - -> Hash - -> Limit - -> Gather - Workers Planned: 2 + -> Sort + Sort Key: test.i -> Result One-Time Filter: (length(version()) > 0) -> Parallel Custom Scan (ChunkAppend) on test Chunks excluded during startup: 0 -> Result One-Time Filter: (length(version()) > 0) - -> Parallel Seq Scan on _hyper_1_1_chunk _hyper_1_1_chunk_1 + -> Parallel Seq Scan on _hyper_1_1_chunk -> Result One-Time Filter: (length(version()) > 0) -> Parallel Seq Scan on _hyper_1_2_chunk -(23 rows) + -> Materialize + -> Limit + -> Gather Merge + Workers Planned: 2 + -> Sort + Sort Key: _hyper_1_1_chunk_1.i + -> Parallel Seq Scan on _hyper_1_1_chunk _hyper_1_1_chunk_1 + Filter: (i < 500000) +(27 rows) -SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 LIMIT 10) AS t1 LEFT JOIN (SELECT * FROM "test" WHERE i < 500000 LIMIT 10) AS t2 ON (t1.i = t2.i) ORDER BY t1.i, t2.i; +SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 ORDER BY I LIMIT 10) AS t1 LEFT JOIN (SELECT * FROM "test" WHERE i < 500000 ORDER BY I LIMIT 10) AS t2 ON (t1.i = t2.i) ORDER BY t1.i, t2.i; i | j | ts | i | j | ts ----+------+-----------------------------+----+------+----------------------------- 0 | 0.1 | Wed Dec 31 16:00:00 1969 | 0 | 0.1 | Wed Dec 31 16:00:00 1969 @@ -330,6 +335,7 @@ SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 LIMIT 10) AS t1 90 | 90.1 | Wed Dec 31 16:00:00.09 1969 | 90 | 90.1 | Wed Dec 31 16:00:00.09 1969 (10 rows) +SET enable_indexscan = ON; -- Test normal chunk append can be used in a parallel worker :PREFIX SELECT * FROM (SELECT * FROM "test" WHERE i >= 999000 ORDER BY i) AS t1 JOIN (SELECT * FROM "test" WHERE i >= 400000 ORDER BY i) AS t2 ON (TRUE) ORDER BY t1.i, t2.i LIMIT 10; QUERY PLAN diff --git a/test/expected/parallel-15.out b/test/expected/parallel-15.out index 34f0e848a3e..4bc5a7a276e 100644 --- a/test/expected/parallel-15.out +++ b/test/expected/parallel-15.out @@ -286,37 +286,42 @@ SELECT count(*) FROM "test" WHERE i > 1 AND length(version()) > 0; (1 row) RESET parallel_leader_participation; --- Test parallel chunk append is used +-- Test parallel chunk append is used (index scan is disabled to trigger a parallel chunk append) SET parallel_tuple_cost = 0; -:PREFIX SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 LIMIT 10) AS t1 LEFT JOIN (SELECT * FROM "test" WHERE i < 500000 LIMIT 10) AS t2 ON (t1.i = t2.i) ORDER BY t1.i, t2.i; - QUERY PLAN ----------------------------------------------------------------------------------------------------------- +SET enable_indexscan = OFF; +:PREFIX SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 ORDER BY I LIMIT 10) AS t1 LEFT JOIN (SELECT * FROM "test" WHERE i < 500000 ORDER BY I LIMIT 10) AS t2 ON (t1.i = t2.i) ORDER BY t1.i, t2.i; + QUERY PLAN +---------------------------------------------------------------------------------------------- Sort - Sort Key: test.i, _hyper_1_1_chunk.i - -> Hash Right Join - Hash Cond: (_hyper_1_1_chunk.i = test.i) + Sort Key: test.i, _hyper_1_1_chunk_1.i + -> Merge Left Join + Merge Cond: (test.i = _hyper_1_1_chunk_1.i) -> Limit - -> Gather + -> Gather Merge Workers Planned: 2 - -> Parallel Seq Scan on _hyper_1_1_chunk - Filter: (i < 500000) - -> Hash - -> Limit - -> Gather - Workers Planned: 2 + -> Sort + Sort Key: test.i -> Result One-Time Filter: (length(version()) > 0) -> Parallel Custom Scan (ChunkAppend) on test Chunks excluded during startup: 0 -> Result One-Time Filter: (length(version()) > 0) - -> Parallel Seq Scan on _hyper_1_1_chunk _hyper_1_1_chunk_1 + -> Parallel Seq Scan on _hyper_1_1_chunk -> Result One-Time Filter: (length(version()) > 0) -> Parallel Seq Scan on _hyper_1_2_chunk -(23 rows) + -> Materialize + -> Limit + -> Gather Merge + Workers Planned: 2 + -> Sort + Sort Key: _hyper_1_1_chunk_1.i + -> Parallel Seq Scan on _hyper_1_1_chunk _hyper_1_1_chunk_1 + Filter: (i < 500000) +(27 rows) -SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 LIMIT 10) AS t1 LEFT JOIN (SELECT * FROM "test" WHERE i < 500000 LIMIT 10) AS t2 ON (t1.i = t2.i) ORDER BY t1.i, t2.i; +SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 ORDER BY I LIMIT 10) AS t1 LEFT JOIN (SELECT * FROM "test" WHERE i < 500000 ORDER BY I LIMIT 10) AS t2 ON (t1.i = t2.i) ORDER BY t1.i, t2.i; i | j | ts | i | j | ts ----+------+-----------------------------+----+------+----------------------------- 0 | 0.1 | Wed Dec 31 16:00:00 1969 | 0 | 0.1 | Wed Dec 31 16:00:00 1969 @@ -331,6 +336,7 @@ SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 LIMIT 10) AS t1 90 | 90.1 | Wed Dec 31 16:00:00.09 1969 | 90 | 90.1 | Wed Dec 31 16:00:00.09 1969 (10 rows) +SET enable_indexscan = ON; -- Test normal chunk append can be used in a parallel worker :PREFIX SELECT * FROM (SELECT * FROM "test" WHERE i >= 999000 ORDER BY i) AS t1 JOIN (SELECT * FROM "test" WHERE i >= 400000 ORDER BY i) AS t2 ON (TRUE) ORDER BY t1.i, t2.i LIMIT 10; QUERY PLAN diff --git a/test/expected/parallel-16.out b/test/expected/parallel-16.out index 34f0e848a3e..ba3bd81b657 100644 --- a/test/expected/parallel-16.out +++ b/test/expected/parallel-16.out @@ -286,37 +286,43 @@ SELECT count(*) FROM "test" WHERE i > 1 AND length(version()) > 0; (1 row) RESET parallel_leader_participation; --- Test parallel chunk append is used +-- Test parallel chunk append is used (index scan is disabled to trigger a parallel chunk append) SET parallel_tuple_cost = 0; -:PREFIX SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 LIMIT 10) AS t1 LEFT JOIN (SELECT * FROM "test" WHERE i < 500000 LIMIT 10) AS t2 ON (t1.i = t2.i) ORDER BY t1.i, t2.i; - QUERY PLAN ----------------------------------------------------------------------------------------------------------- - Sort - Sort Key: test.i, _hyper_1_1_chunk.i - -> Hash Right Join - Hash Cond: (_hyper_1_1_chunk.i = test.i) +SET enable_indexscan = OFF; +:PREFIX SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 ORDER BY I LIMIT 10) AS t1 LEFT JOIN (SELECT * FROM "test" WHERE i < 500000 ORDER BY I LIMIT 10) AS t2 ON (t1.i = t2.i) ORDER BY t1.i, t2.i; + QUERY PLAN +---------------------------------------------------------------------------------------------- + Incremental Sort + Sort Key: test.i, _hyper_1_1_chunk_1.i + Presorted Key: test.i + -> Merge Left Join + Merge Cond: (test.i = _hyper_1_1_chunk_1.i) -> Limit - -> Gather + -> Gather Merge Workers Planned: 2 - -> Parallel Seq Scan on _hyper_1_1_chunk - Filter: (i < 500000) - -> Hash - -> Limit - -> Gather - Workers Planned: 2 + -> Sort + Sort Key: test.i -> Result One-Time Filter: (length(version()) > 0) -> Parallel Custom Scan (ChunkAppend) on test Chunks excluded during startup: 0 -> Result One-Time Filter: (length(version()) > 0) - -> Parallel Seq Scan on _hyper_1_1_chunk _hyper_1_1_chunk_1 + -> Parallel Seq Scan on _hyper_1_1_chunk -> Result One-Time Filter: (length(version()) > 0) -> Parallel Seq Scan on _hyper_1_2_chunk -(23 rows) + -> Materialize + -> Limit + -> Gather Merge + Workers Planned: 2 + -> Sort + Sort Key: _hyper_1_1_chunk_1.i + -> Parallel Seq Scan on _hyper_1_1_chunk _hyper_1_1_chunk_1 + Filter: (i < 500000) +(28 rows) -SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 LIMIT 10) AS t1 LEFT JOIN (SELECT * FROM "test" WHERE i < 500000 LIMIT 10) AS t2 ON (t1.i = t2.i) ORDER BY t1.i, t2.i; +SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 ORDER BY I LIMIT 10) AS t1 LEFT JOIN (SELECT * FROM "test" WHERE i < 500000 ORDER BY I LIMIT 10) AS t2 ON (t1.i = t2.i) ORDER BY t1.i, t2.i; i | j | ts | i | j | ts ----+------+-----------------------------+----+------+----------------------------- 0 | 0.1 | Wed Dec 31 16:00:00 1969 | 0 | 0.1 | Wed Dec 31 16:00:00 1969 @@ -331,6 +337,7 @@ SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 LIMIT 10) AS t1 90 | 90.1 | Wed Dec 31 16:00:00.09 1969 | 90 | 90.1 | Wed Dec 31 16:00:00.09 1969 (10 rows) +SET enable_indexscan = ON; -- Test normal chunk append can be used in a parallel worker :PREFIX SELECT * FROM (SELECT * FROM "test" WHERE i >= 999000 ORDER BY i) AS t1 JOIN (SELECT * FROM "test" WHERE i >= 400000 ORDER BY i) AS t2 ON (TRUE) ORDER BY t1.i, t2.i LIMIT 10; QUERY PLAN diff --git a/test/sql/parallel.sql.in b/test/sql/parallel.sql.in index 14ea2135fbd..20d7de602f1 100644 --- a/test/sql/parallel.sql.in +++ b/test/sql/parallel.sql.in @@ -77,10 +77,12 @@ SET parallel_leader_participation = off; SELECT count(*) FROM "test" WHERE i > 1 AND length(version()) > 0; RESET parallel_leader_participation; --- Test parallel chunk append is used +-- Test parallel chunk append is used (index scan is disabled to trigger a parallel chunk append) SET parallel_tuple_cost = 0; -:PREFIX SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 LIMIT 10) AS t1 LEFT JOIN (SELECT * FROM "test" WHERE i < 500000 LIMIT 10) AS t2 ON (t1.i = t2.i) ORDER BY t1.i, t2.i; -SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 LIMIT 10) AS t1 LEFT JOIN (SELECT * FROM "test" WHERE i < 500000 LIMIT 10) AS t2 ON (t1.i = t2.i) ORDER BY t1.i, t2.i; +SET enable_indexscan = OFF; +:PREFIX SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 ORDER BY I LIMIT 10) AS t1 LEFT JOIN (SELECT * FROM "test" WHERE i < 500000 ORDER BY I LIMIT 10) AS t2 ON (t1.i = t2.i) ORDER BY t1.i, t2.i; +SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 ORDER BY I LIMIT 10) AS t1 LEFT JOIN (SELECT * FROM "test" WHERE i < 500000 ORDER BY I LIMIT 10) AS t2 ON (t1.i = t2.i) ORDER BY t1.i, t2.i; +SET enable_indexscan = ON; -- Test normal chunk append can be used in a parallel worker :PREFIX SELECT * FROM (SELECT * FROM "test" WHERE i >= 999000 ORDER BY i) AS t1 JOIN (SELECT * FROM "test" WHERE i >= 400000 ORDER BY i) AS t2 ON (TRUE) ORDER BY t1.i, t2.i LIMIT 10; From 34d72aa29874ae06cd5949762acdd941a0c3fd07 Mon Sep 17 00:00:00 2001 From: Ante Kresic Date: Wed, 22 Nov 2023 16:09:29 +0100 Subject: [PATCH 3/6] Use segmentwise recompression in compress policy Change compression policy to use segmentwise recompression when possible to increase performance. Segmentwise recompression decompresses rows into memory, thus reducing IO load when recompressing, making it much faster for bigger chunks. --- .unreleased/fix_6343 | 2 ++ sql/policy_internal.sql | 25 ++++++---------- tsl/test/expected/bgw_custom-13.out | 2 +- tsl/test/expected/bgw_custom-14.out | 2 +- tsl/test/expected/bgw_custom-15.out | 2 +- tsl/test/expected/bgw_custom-16.out | 2 +- tsl/test/expected/compression_bgw-13.out | 35 +++++++++++++---------- tsl/test/expected/compression_bgw-14.out | 35 +++++++++++++---------- tsl/test/expected/compression_bgw-15.out | 35 +++++++++++++---------- tsl/test/expected/compression_bgw-16.out | 35 +++++++++++++---------- tsl/test/sql/include/recompress_basic.sql | 11 +++++-- 11 files changed, 103 insertions(+), 83 deletions(-) create mode 100644 .unreleased/fix_6343 diff --git a/.unreleased/fix_6343 b/.unreleased/fix_6343 new file mode 100644 index 00000000000..98aabc3ce91 --- /dev/null +++ b/.unreleased/fix_6343 @@ -0,0 +1,2 @@ +Fixes: #6343 Enable segmentwise recompression in compression policy +Thanks: @fetchezar for reporting the issue diff --git a/sql/policy_internal.sql b/sql/policy_internal.sql index 2ea47567b39..4e786fbd041 100644 --- a/sql/policy_internal.sql +++ b/sql/policy_internal.sql @@ -123,26 +123,19 @@ BEGIN ) ) AND recompress_enabled IS TRUE THEN BEGIN - PERFORM @extschema@.decompress_chunk(chunk_rec.oid, if_compressed => true); + -- first check if there's an index. Might have to use a heuristic to determine if index usage would be efficient, + -- or if we'd better fall back to decompressing & recompressing entire chunk + IF _timescaledb_functions.get_compressed_chunk_index_for_recompression(chunk_rec.oid) IS NOT NULL THEN + PERFORM _timescaledb_functions.recompress_chunk_segmentwise(chunk_rec.oid); + ELSE + PERFORM @extschema@.decompress_chunk(chunk_rec.oid, if_compressed => true); + PERFORM @extschema@.compress_chunk(chunk_rec.oid); + END IF; EXCEPTION WHEN OTHERS THEN GET STACKED DIAGNOSTICS _message = MESSAGE_TEXT, _detail = PG_EXCEPTION_DETAIL; - RAISE WARNING 'decompressing chunk "%" failed when compression policy is executed', chunk_rec.oid::regclass::text - USING DETAIL = format('Message: (%s), Detail: (%s).', _message, _detail), - ERRCODE = sqlstate; - END; - -- SET LOCAL is only active until end of transaction. - -- While we could use SET at the start of the function we do not - -- want to bleed out search_path to caller, so we do SET LOCAL - -- again after COMMIT - BEGIN - PERFORM @extschema@.compress_chunk(chunk_rec.oid); - EXCEPTION WHEN OTHERS THEN - GET STACKED DIAGNOSTICS - _message = MESSAGE_TEXT, - _detail = PG_EXCEPTION_DETAIL; - RAISE WARNING 'compressing chunk "%" failed when compression policy is executed', chunk_rec.oid::regclass::text + RAISE WARNING 'recompressing chunk "%" failed when compression policy is executed', chunk_rec.oid::regclass::text USING DETAIL = format('Message: (%s), Detail: (%s).', _message, _detail), ERRCODE = sqlstate; END; diff --git a/tsl/test/expected/bgw_custom-13.out b/tsl/test/expected/bgw_custom-13.out index 22f691e4669..8171f5be894 100644 --- a/tsl/test/expected/bgw_custom-13.out +++ b/tsl/test/expected/bgw_custom-13.out @@ -572,7 +572,7 @@ FROM _timescaledb_config.bgw_job WHERE id = :job_id_5; --verify that job is dropped when cagg is dropped DROP MATERIALIZED VIEW conditions_summary_daily; -NOTICE: drop cascades to table _timescaledb_internal._hyper_3_10_chunk +NOTICE: drop cascades to table _timescaledb_internal._hyper_3_7_chunk SELECT id, proc_name, hypertable_id FROM _timescaledb_config.bgw_job WHERE id = :job_id_5; id | proc_name | hypertable_id diff --git a/tsl/test/expected/bgw_custom-14.out b/tsl/test/expected/bgw_custom-14.out index 6f308e7d361..8795a704a6f 100644 --- a/tsl/test/expected/bgw_custom-14.out +++ b/tsl/test/expected/bgw_custom-14.out @@ -572,7 +572,7 @@ FROM _timescaledb_config.bgw_job WHERE id = :job_id_5; --verify that job is dropped when cagg is dropped DROP MATERIALIZED VIEW conditions_summary_daily; -NOTICE: drop cascades to table _timescaledb_internal._hyper_3_10_chunk +NOTICE: drop cascades to table _timescaledb_internal._hyper_3_7_chunk SELECT id, proc_name, hypertable_id FROM _timescaledb_config.bgw_job WHERE id = :job_id_5; id | proc_name | hypertable_id diff --git a/tsl/test/expected/bgw_custom-15.out b/tsl/test/expected/bgw_custom-15.out index 6f308e7d361..8795a704a6f 100644 --- a/tsl/test/expected/bgw_custom-15.out +++ b/tsl/test/expected/bgw_custom-15.out @@ -572,7 +572,7 @@ FROM _timescaledb_config.bgw_job WHERE id = :job_id_5; --verify that job is dropped when cagg is dropped DROP MATERIALIZED VIEW conditions_summary_daily; -NOTICE: drop cascades to table _timescaledb_internal._hyper_3_10_chunk +NOTICE: drop cascades to table _timescaledb_internal._hyper_3_7_chunk SELECT id, proc_name, hypertable_id FROM _timescaledb_config.bgw_job WHERE id = :job_id_5; id | proc_name | hypertable_id diff --git a/tsl/test/expected/bgw_custom-16.out b/tsl/test/expected/bgw_custom-16.out index 6f308e7d361..8795a704a6f 100644 --- a/tsl/test/expected/bgw_custom-16.out +++ b/tsl/test/expected/bgw_custom-16.out @@ -572,7 +572,7 @@ FROM _timescaledb_config.bgw_job WHERE id = :job_id_5; --verify that job is dropped when cagg is dropped DROP MATERIALIZED VIEW conditions_summary_daily; -NOTICE: drop cascades to table _timescaledb_internal._hyper_3_10_chunk +NOTICE: drop cascades to table _timescaledb_internal._hyper_3_7_chunk SELECT id, proc_name, hypertable_id FROM _timescaledb_config.bgw_job WHERE id = :job_id_5; id | proc_name | hypertable_id diff --git a/tsl/test/expected/compression_bgw-13.out b/tsl/test/expected/compression_bgw-13.out index 74818f05ac4..9b275da800c 100644 --- a/tsl/test/expected/compression_bgw-13.out +++ b/tsl/test/expected/compression_bgw-13.out @@ -474,15 +474,16 @@ ORDER BY 1, 2; Thu Apr 02 17:00:00 2020 PDT | 13 | 1 (4 rows) ---chunk status should be unordered for the previously compressed chunk +--chunk status should be partially compressed for the previously compressed chunk SELECT chunk_status, - chunk_name as "CHUNK_NAME" + chunk_name as "CHUNK_NAME", + compressed_chunk_name as "COMPRESSED_CHUNK_NAME" FROM compressed_chunk_info_view WHERE hypertable_name = 'test2' ORDER BY chunk_name; - chunk_status | CHUNK_NAME ---------------+-------------------- - 9 | _hyper_14_62_chunk - 0 | _hyper_14_64_chunk + chunk_status | CHUNK_NAME | COMPRESSED_CHUNK_NAME +--------------+--------------------+---------------------------- + 9 | _hyper_14_62_chunk | compress_hyper_15_63_chunk + 0 | _hyper_14_64_chunk | (2 rows) SELECT add_compression_policy AS job_id @@ -490,23 +491,27 @@ SELECT add_compression_policy AS job_id CALL run_job(:job_id); CALL run_job(:job_id); -- status should be compressed --- +-- compressed chunk name should not change for +-- the partially compressed chunk indicating +-- it was done segmentwise SELECT chunk_status, - chunk_name as "CHUNK_NAME" + chunk_name as "CHUNK_NAME", + compressed_chunk_name as "COMPRESSED_CHUNK_NAME" FROM compressed_chunk_info_view WHERE hypertable_name = 'test2' ORDER BY chunk_name; - chunk_status | CHUNK_NAME ---------------+-------------------- - 1 | _hyper_14_62_chunk - 1 | _hyper_14_64_chunk + chunk_status | CHUNK_NAME | COMPRESSED_CHUNK_NAME +--------------+--------------------+---------------------------- + 1 | _hyper_14_62_chunk | compress_hyper_15_63_chunk + 1 | _hyper_14_64_chunk | compress_hyper_15_65_chunk (2 rows) \set ON_ERROR_STOP 0 -- call recompress_chunk when status is not unordered CALL recompress_chunk(:'CHUNK_NAME'::regclass, true); -psql:include/recompress_basic.sql:110: NOTICE: nothing to recompress in chunk "_hyper_14_62_chunk" +psql:include/recompress_basic.sql:115: NOTICE: nothing to recompress in chunk "_hyper_14_62_chunk" -- This will succeed and compress the chunk for the test below. CALL recompress_chunk(:'CHUNK_NAME'::regclass, false); -psql:include/recompress_basic.sql:113: ERROR: nothing to recompress in chunk "_hyper_14_62_chunk" +psql:include/recompress_basic.sql:118: ERROR: nothing to recompress in chunk "_hyper_14_62_chunk" --now decompress it , then try and recompress SELECT decompress_chunk(:'CHUNK_NAME'::regclass); decompress_chunk @@ -515,7 +520,7 @@ SELECT decompress_chunk(:'CHUNK_NAME'::regclass); (1 row) CALL recompress_chunk(:'CHUNK_NAME'::regclass); -psql:include/recompress_basic.sql:117: ERROR: call compress_chunk instead of recompress_chunk +psql:include/recompress_basic.sql:122: ERROR: call compress_chunk instead of recompress_chunk \set ON_ERROR_STOP 1 -- test recompress policy CREATE TABLE metrics(time timestamptz NOT NULL); @@ -620,7 +625,7 @@ SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'met ---- nothing to do yet CALL run_job(:JOB_RECOMPRESS); -psql:include/recompress_basic.sql:189: NOTICE: no chunks for hypertable "public.metrics" that satisfy recompress chunk policy +psql:include/recompress_basic.sql:194: NOTICE: no chunks for hypertable "public.metrics" that satisfy recompress chunk policy ---- status should be 1 SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics'; chunk_status diff --git a/tsl/test/expected/compression_bgw-14.out b/tsl/test/expected/compression_bgw-14.out index ceea22edd46..fe7271683e9 100644 --- a/tsl/test/expected/compression_bgw-14.out +++ b/tsl/test/expected/compression_bgw-14.out @@ -474,15 +474,16 @@ ORDER BY 1, 2; Thu Apr 02 17:00:00 2020 PDT | 13 | 1 (4 rows) ---chunk status should be unordered for the previously compressed chunk +--chunk status should be partially compressed for the previously compressed chunk SELECT chunk_status, - chunk_name as "CHUNK_NAME" + chunk_name as "CHUNK_NAME", + compressed_chunk_name as "COMPRESSED_CHUNK_NAME" FROM compressed_chunk_info_view WHERE hypertable_name = 'test2' ORDER BY chunk_name; - chunk_status | CHUNK_NAME ---------------+-------------------- - 9 | _hyper_14_62_chunk - 0 | _hyper_14_64_chunk + chunk_status | CHUNK_NAME | COMPRESSED_CHUNK_NAME +--------------+--------------------+---------------------------- + 9 | _hyper_14_62_chunk | compress_hyper_15_63_chunk + 0 | _hyper_14_64_chunk | (2 rows) SELECT add_compression_policy AS job_id @@ -490,23 +491,27 @@ SELECT add_compression_policy AS job_id CALL run_job(:job_id); CALL run_job(:job_id); -- status should be compressed --- +-- compressed chunk name should not change for +-- the partially compressed chunk indicating +-- it was done segmentwise SELECT chunk_status, - chunk_name as "CHUNK_NAME" + chunk_name as "CHUNK_NAME", + compressed_chunk_name as "COMPRESSED_CHUNK_NAME" FROM compressed_chunk_info_view WHERE hypertable_name = 'test2' ORDER BY chunk_name; - chunk_status | CHUNK_NAME ---------------+-------------------- - 1 | _hyper_14_62_chunk - 1 | _hyper_14_64_chunk + chunk_status | CHUNK_NAME | COMPRESSED_CHUNK_NAME +--------------+--------------------+---------------------------- + 1 | _hyper_14_62_chunk | compress_hyper_15_63_chunk + 1 | _hyper_14_64_chunk | compress_hyper_15_65_chunk (2 rows) \set ON_ERROR_STOP 0 -- call recompress_chunk when status is not unordered CALL recompress_chunk(:'CHUNK_NAME'::regclass, true); -psql:include/recompress_basic.sql:110: NOTICE: nothing to recompress in chunk "_hyper_14_62_chunk" +psql:include/recompress_basic.sql:115: NOTICE: nothing to recompress in chunk "_hyper_14_62_chunk" -- This will succeed and compress the chunk for the test below. CALL recompress_chunk(:'CHUNK_NAME'::regclass, false); -psql:include/recompress_basic.sql:113: ERROR: nothing to recompress in chunk "_hyper_14_62_chunk" +psql:include/recompress_basic.sql:118: ERROR: nothing to recompress in chunk "_hyper_14_62_chunk" --now decompress it , then try and recompress SELECT decompress_chunk(:'CHUNK_NAME'::regclass); decompress_chunk @@ -515,7 +520,7 @@ SELECT decompress_chunk(:'CHUNK_NAME'::regclass); (1 row) CALL recompress_chunk(:'CHUNK_NAME'::regclass); -psql:include/recompress_basic.sql:117: ERROR: call compress_chunk instead of recompress_chunk +psql:include/recompress_basic.sql:122: ERROR: call compress_chunk instead of recompress_chunk \set ON_ERROR_STOP 1 -- test recompress policy CREATE TABLE metrics(time timestamptz NOT NULL); @@ -620,7 +625,7 @@ SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'met ---- nothing to do yet CALL run_job(:JOB_RECOMPRESS); -psql:include/recompress_basic.sql:189: NOTICE: no chunks for hypertable "public.metrics" that satisfy recompress chunk policy +psql:include/recompress_basic.sql:194: NOTICE: no chunks for hypertable "public.metrics" that satisfy recompress chunk policy ---- status should be 1 SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics'; chunk_status diff --git a/tsl/test/expected/compression_bgw-15.out b/tsl/test/expected/compression_bgw-15.out index ceea22edd46..fe7271683e9 100644 --- a/tsl/test/expected/compression_bgw-15.out +++ b/tsl/test/expected/compression_bgw-15.out @@ -474,15 +474,16 @@ ORDER BY 1, 2; Thu Apr 02 17:00:00 2020 PDT | 13 | 1 (4 rows) ---chunk status should be unordered for the previously compressed chunk +--chunk status should be partially compressed for the previously compressed chunk SELECT chunk_status, - chunk_name as "CHUNK_NAME" + chunk_name as "CHUNK_NAME", + compressed_chunk_name as "COMPRESSED_CHUNK_NAME" FROM compressed_chunk_info_view WHERE hypertable_name = 'test2' ORDER BY chunk_name; - chunk_status | CHUNK_NAME ---------------+-------------------- - 9 | _hyper_14_62_chunk - 0 | _hyper_14_64_chunk + chunk_status | CHUNK_NAME | COMPRESSED_CHUNK_NAME +--------------+--------------------+---------------------------- + 9 | _hyper_14_62_chunk | compress_hyper_15_63_chunk + 0 | _hyper_14_64_chunk | (2 rows) SELECT add_compression_policy AS job_id @@ -490,23 +491,27 @@ SELECT add_compression_policy AS job_id CALL run_job(:job_id); CALL run_job(:job_id); -- status should be compressed --- +-- compressed chunk name should not change for +-- the partially compressed chunk indicating +-- it was done segmentwise SELECT chunk_status, - chunk_name as "CHUNK_NAME" + chunk_name as "CHUNK_NAME", + compressed_chunk_name as "COMPRESSED_CHUNK_NAME" FROM compressed_chunk_info_view WHERE hypertable_name = 'test2' ORDER BY chunk_name; - chunk_status | CHUNK_NAME ---------------+-------------------- - 1 | _hyper_14_62_chunk - 1 | _hyper_14_64_chunk + chunk_status | CHUNK_NAME | COMPRESSED_CHUNK_NAME +--------------+--------------------+---------------------------- + 1 | _hyper_14_62_chunk | compress_hyper_15_63_chunk + 1 | _hyper_14_64_chunk | compress_hyper_15_65_chunk (2 rows) \set ON_ERROR_STOP 0 -- call recompress_chunk when status is not unordered CALL recompress_chunk(:'CHUNK_NAME'::regclass, true); -psql:include/recompress_basic.sql:110: NOTICE: nothing to recompress in chunk "_hyper_14_62_chunk" +psql:include/recompress_basic.sql:115: NOTICE: nothing to recompress in chunk "_hyper_14_62_chunk" -- This will succeed and compress the chunk for the test below. CALL recompress_chunk(:'CHUNK_NAME'::regclass, false); -psql:include/recompress_basic.sql:113: ERROR: nothing to recompress in chunk "_hyper_14_62_chunk" +psql:include/recompress_basic.sql:118: ERROR: nothing to recompress in chunk "_hyper_14_62_chunk" --now decompress it , then try and recompress SELECT decompress_chunk(:'CHUNK_NAME'::regclass); decompress_chunk @@ -515,7 +520,7 @@ SELECT decompress_chunk(:'CHUNK_NAME'::regclass); (1 row) CALL recompress_chunk(:'CHUNK_NAME'::regclass); -psql:include/recompress_basic.sql:117: ERROR: call compress_chunk instead of recompress_chunk +psql:include/recompress_basic.sql:122: ERROR: call compress_chunk instead of recompress_chunk \set ON_ERROR_STOP 1 -- test recompress policy CREATE TABLE metrics(time timestamptz NOT NULL); @@ -620,7 +625,7 @@ SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'met ---- nothing to do yet CALL run_job(:JOB_RECOMPRESS); -psql:include/recompress_basic.sql:189: NOTICE: no chunks for hypertable "public.metrics" that satisfy recompress chunk policy +psql:include/recompress_basic.sql:194: NOTICE: no chunks for hypertable "public.metrics" that satisfy recompress chunk policy ---- status should be 1 SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics'; chunk_status diff --git a/tsl/test/expected/compression_bgw-16.out b/tsl/test/expected/compression_bgw-16.out index ceea22edd46..fe7271683e9 100644 --- a/tsl/test/expected/compression_bgw-16.out +++ b/tsl/test/expected/compression_bgw-16.out @@ -474,15 +474,16 @@ ORDER BY 1, 2; Thu Apr 02 17:00:00 2020 PDT | 13 | 1 (4 rows) ---chunk status should be unordered for the previously compressed chunk +--chunk status should be partially compressed for the previously compressed chunk SELECT chunk_status, - chunk_name as "CHUNK_NAME" + chunk_name as "CHUNK_NAME", + compressed_chunk_name as "COMPRESSED_CHUNK_NAME" FROM compressed_chunk_info_view WHERE hypertable_name = 'test2' ORDER BY chunk_name; - chunk_status | CHUNK_NAME ---------------+-------------------- - 9 | _hyper_14_62_chunk - 0 | _hyper_14_64_chunk + chunk_status | CHUNK_NAME | COMPRESSED_CHUNK_NAME +--------------+--------------------+---------------------------- + 9 | _hyper_14_62_chunk | compress_hyper_15_63_chunk + 0 | _hyper_14_64_chunk | (2 rows) SELECT add_compression_policy AS job_id @@ -490,23 +491,27 @@ SELECT add_compression_policy AS job_id CALL run_job(:job_id); CALL run_job(:job_id); -- status should be compressed --- +-- compressed chunk name should not change for +-- the partially compressed chunk indicating +-- it was done segmentwise SELECT chunk_status, - chunk_name as "CHUNK_NAME" + chunk_name as "CHUNK_NAME", + compressed_chunk_name as "COMPRESSED_CHUNK_NAME" FROM compressed_chunk_info_view WHERE hypertable_name = 'test2' ORDER BY chunk_name; - chunk_status | CHUNK_NAME ---------------+-------------------- - 1 | _hyper_14_62_chunk - 1 | _hyper_14_64_chunk + chunk_status | CHUNK_NAME | COMPRESSED_CHUNK_NAME +--------------+--------------------+---------------------------- + 1 | _hyper_14_62_chunk | compress_hyper_15_63_chunk + 1 | _hyper_14_64_chunk | compress_hyper_15_65_chunk (2 rows) \set ON_ERROR_STOP 0 -- call recompress_chunk when status is not unordered CALL recompress_chunk(:'CHUNK_NAME'::regclass, true); -psql:include/recompress_basic.sql:110: NOTICE: nothing to recompress in chunk "_hyper_14_62_chunk" +psql:include/recompress_basic.sql:115: NOTICE: nothing to recompress in chunk "_hyper_14_62_chunk" -- This will succeed and compress the chunk for the test below. CALL recompress_chunk(:'CHUNK_NAME'::regclass, false); -psql:include/recompress_basic.sql:113: ERROR: nothing to recompress in chunk "_hyper_14_62_chunk" +psql:include/recompress_basic.sql:118: ERROR: nothing to recompress in chunk "_hyper_14_62_chunk" --now decompress it , then try and recompress SELECT decompress_chunk(:'CHUNK_NAME'::regclass); decompress_chunk @@ -515,7 +520,7 @@ SELECT decompress_chunk(:'CHUNK_NAME'::regclass); (1 row) CALL recompress_chunk(:'CHUNK_NAME'::regclass); -psql:include/recompress_basic.sql:117: ERROR: call compress_chunk instead of recompress_chunk +psql:include/recompress_basic.sql:122: ERROR: call compress_chunk instead of recompress_chunk \set ON_ERROR_STOP 1 -- test recompress policy CREATE TABLE metrics(time timestamptz NOT NULL); @@ -620,7 +625,7 @@ SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'met ---- nothing to do yet CALL run_job(:JOB_RECOMPRESS); -psql:include/recompress_basic.sql:189: NOTICE: no chunks for hypertable "public.metrics" that satisfy recompress chunk policy +psql:include/recompress_basic.sql:194: NOTICE: no chunks for hypertable "public.metrics" that satisfy recompress chunk policy ---- status should be 1 SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics'; chunk_status diff --git a/tsl/test/sql/include/recompress_basic.sql b/tsl/test/sql/include/recompress_basic.sql index eeba8f233fa..ccde18b5721 100644 --- a/tsl/test/sql/include/recompress_basic.sql +++ b/tsl/test/sql/include/recompress_basic.sql @@ -88,9 +88,10 @@ FROM test2 GROUP BY time_bucket(INTERVAL '2 hour', timec), b ORDER BY 1, 2; ---chunk status should be unordered for the previously compressed chunk +--chunk status should be partially compressed for the previously compressed chunk SELECT chunk_status, - chunk_name as "CHUNK_NAME" + chunk_name as "CHUNK_NAME", + compressed_chunk_name as "COMPRESSED_CHUNK_NAME" FROM compressed_chunk_info_view WHERE hypertable_name = 'test2' ORDER BY chunk_name; @@ -100,8 +101,12 @@ CALL run_job(:job_id); CALL run_job(:job_id); -- status should be compressed --- +-- compressed chunk name should not change for +-- the partially compressed chunk indicating +-- it was done segmentwise SELECT chunk_status, - chunk_name as "CHUNK_NAME" + chunk_name as "CHUNK_NAME", + compressed_chunk_name as "COMPRESSED_CHUNK_NAME" FROM compressed_chunk_info_view WHERE hypertable_name = 'test2' ORDER BY chunk_name; From d896a9eefdcd1855af2e6ba2bc7f79af1c396142 Mon Sep 17 00:00:00 2001 From: Nikhil Sontakke Date: Wed, 22 Nov 2023 15:38:12 +0530 Subject: [PATCH 4/6] Fix non-default tablespaces with constraints If a hypertable uses a non-default tablespace for its primary or unique constraints with additional DEFERRABLE or INITIALLY DEFERRED characteristics then any chunk creation will fail with syntax error. We now set the tablespace via a separate command for such constraints for the chunks. Fixes #6338 --- .unreleased/fix_6339 | 2 ++ sql/chunk_constraint.sql | 16 ++++++--- sql/updates/reverse-dev.sql | 69 ++++++++++++++++++++++++++++++++++++ test/expected/constraint.out | 2 +- test/sql/constraint.sql | 2 +- 5 files changed, 85 insertions(+), 6 deletions(-) create mode 100644 .unreleased/fix_6339 diff --git a/.unreleased/fix_6339 b/.unreleased/fix_6339 new file mode 100644 index 00000000000..b4f4290cb34 --- /dev/null +++ b/.unreleased/fix_6339 @@ -0,0 +1,2 @@ +Fixes: #6339 Fix tablespace with constraints +Thanks: @lyp-bobi for reporting the issue diff --git a/sql/chunk_constraint.sql b/sql/chunk_constraint.sql index 0b0a815870d..1d205b236fe 100644 --- a/sql/chunk_constraint.sql +++ b/sql/chunk_constraint.sql @@ -40,10 +40,6 @@ BEGIN def := pg_get_constraintdef(constraint_oid); - IF indx_tablespace IS NOT NULL THEN - def := format('%s USING INDEX TABLESPACE %I', def, indx_tablespace); - END IF; - ELSIF constraint_type = 't' THEN -- constraint triggers are copied separately with normal triggers def := NULL; @@ -63,6 +59,18 @@ BEGIN $$ ALTER TABLE %I.%I ADD CONSTRAINT %I %s $$, chunk_row.schema_name, chunk_row.table_name, chunk_constraint_row.constraint_name, def ); + + -- if constraint (primary or unique) needs a tablespace then add it + -- via a separate ALTER INDEX SET TABLESPACE command. We cannot append it + -- to the "def" string above since it leads to a SYNTAX error when + -- "DEFERRABLE" or "INITIALLY DEFERRED" are used in the constraint + IF indx_tablespace IS NOT NULL THEN + EXECUTE pg_catalog.format( + $$ ALTER INDEX %I.%I SET TABLESPACE %I $$, + chunk_row.schema_name, chunk_constraint_row.constraint_name, indx_tablespace + ); + END IF; + END IF; END $BODY$ SET search_path TO pg_catalog, pg_temp; diff --git a/sql/updates/reverse-dev.sql b/sql/updates/reverse-dev.sql index 7f33372ef99..123d39f252c 100644 --- a/sql/updates/reverse-dev.sql +++ b/sql/updates/reverse-dev.sql @@ -333,3 +333,72 @@ BEGIN END LOOP; END; $$ LANGUAGE PLPGSQL; + +DROP FUNCTION _timescaledb_functions.chunk_constraint_add_table_constraint( + chunk_constraint_row _timescaledb_catalog.chunk_constraint +); + +CREATE FUNCTION _timescaledb_functions.chunk_constraint_add_table_constraint( + chunk_constraint_row _timescaledb_catalog.chunk_constraint +) + RETURNS VOID LANGUAGE PLPGSQL AS +$BODY$ +DECLARE + chunk_row _timescaledb_catalog.chunk; + hypertable_row _timescaledb_catalog.hypertable; + constraint_oid OID; + constraint_type CHAR; + check_sql TEXT; + def TEXT; + indx_tablespace NAME; + tablespace_def TEXT; +BEGIN + SELECT * INTO STRICT chunk_row FROM _timescaledb_catalog.chunk c WHERE c.id = chunk_constraint_row.chunk_id; + SELECT * INTO STRICT hypertable_row FROM _timescaledb_catalog.hypertable h WHERE h.id = chunk_row.hypertable_id; + + IF chunk_constraint_row.dimension_slice_id IS NOT NULL THEN + RAISE 'cannot create dimension constraint %', chunk_constraint_row; + ELSIF chunk_constraint_row.hypertable_constraint_name IS NOT NULL THEN + + SELECT oid, contype INTO STRICT constraint_oid, constraint_type FROM pg_constraint + WHERE conname=chunk_constraint_row.hypertable_constraint_name AND + conrelid = format('%I.%I', hypertable_row.schema_name, hypertable_row.table_name)::regclass::oid; + + IF constraint_type IN ('p','u') THEN + -- since primary keys and unique constraints are backed by an index + -- they might have an index tablespace assigned + -- the tablspace is not part of the constraint definition so + -- we have to append it explicitly to preserve it + SELECT T.spcname INTO indx_tablespace + FROM pg_constraint C, pg_class I, pg_tablespace T + WHERE C.oid = constraint_oid AND C.contype IN ('p', 'u') AND I.oid = C.conindid AND I.reltablespace = T.oid; + + def := pg_get_constraintdef(constraint_oid); + + IF indx_tablespace IS NOT NULL THEN + def := format('%s USING INDEX TABLESPACE %I', def, indx_tablespace); + END IF; + + ELSIF constraint_type = 't' THEN + -- constraint triggers are copied separately with normal triggers + def := NULL; + ELSE + def := pg_get_constraintdef(constraint_oid); + END IF; + + ELSE + RAISE 'unknown constraint type'; + END IF; + + IF def IS NOT NULL THEN + -- to allow for custom types with operators outside of pg_catalog + -- we set search_path to @extschema@ + SET LOCAL search_path TO @extschema@, pg_temp; + EXECUTE pg_catalog.format( + $$ ALTER TABLE %I.%I ADD CONSTRAINT %I %s $$, + chunk_row.schema_name, chunk_row.table_name, chunk_constraint_row.constraint_name, def + ); + + END IF; +END +$BODY$ SET search_path TO pg_catalog, pg_temp; diff --git a/test/expected/constraint.out b/test/expected/constraint.out index ec7e953f7aa..b22baab9360 100644 --- a/test/expected/constraint.out +++ b/test/expected/constraint.out @@ -812,7 +812,7 @@ CREATE TABLE tbl ( fk_id int, id int, time timestamp, -CONSTRAINT pk PRIMARY KEY (time, id) USING INDEX TABLESPACE tablespace1); +CONSTRAINT pk PRIMARY KEY (time, id) USING INDEX TABLESPACE tablespace1 DEFERRABLE INITIALLY DEFERRED); SELECT create_hypertable('tbl', 'time'); WARNING: column type "timestamp without time zone" used for "time" does not follow best practices create_hypertable diff --git a/test/sql/constraint.sql b/test/sql/constraint.sql index 1603422f0b2..9861b798e9c 100644 --- a/test/sql/constraint.sql +++ b/test/sql/constraint.sql @@ -612,7 +612,7 @@ CREATE TABLE tbl ( fk_id int, id int, time timestamp, -CONSTRAINT pk PRIMARY KEY (time, id) USING INDEX TABLESPACE tablespace1); +CONSTRAINT pk PRIMARY KEY (time, id) USING INDEX TABLESPACE tablespace1 DEFERRABLE INITIALLY DEFERRED); SELECT create_hypertable('tbl', 'time'); From fb32fe817b7c0464d1a82cd7f9598938a73f3cdb Mon Sep 17 00:00:00 2001 From: Ante Kresic Date: Fri, 24 Nov 2023 09:53:21 +0100 Subject: [PATCH 5/6] Reset compressor between recompression runs If we reuse the compressor to recompress multiple sets of tuples, internal state gets left behind from the previous run which can contain invalid data. Resetting the compressor first iteration field between runs fixes this. --- tsl/src/compression/api.c | 3 +++ tsl/src/compression/compression.c | 6 ++++++ tsl/src/compression/compression.h | 1 + 3 files changed, 10 insertions(+) diff --git a/tsl/src/compression/api.c b/tsl/src/compression/api.c index 4bb53a9c608..b84550eb416 100644 --- a/tsl/src/compression/api.c +++ b/tsl/src/compression/api.c @@ -1500,6 +1500,7 @@ tsl_recompress_chunk_segmentwise(PG_FUNCTION_ARGS) tuplesort_performsort(segment_tuplesortstate); + row_compressor_reset(&row_compressor); recompress_segment(segment_tuplesortstate, uncompressed_chunk_rel, &row_compressor); /* now any pointers returned will be garbage */ @@ -1556,6 +1557,7 @@ tsl_recompress_chunk_segmentwise(PG_FUNCTION_ARGS) uncompressed_chunk_rel, current_segment); tuplesort_performsort(segment_tuplesortstate); + row_compressor_reset(&row_compressor); recompress_segment(segment_tuplesortstate, uncompressed_chunk_rel, &row_compressor); tuplesort_end(segment_tuplesortstate); @@ -1582,6 +1584,7 @@ tsl_recompress_chunk_segmentwise(PG_FUNCTION_ARGS) if (unmatched_rows_exist) { tuplesort_performsort(segment_tuplesortstate); + row_compressor_reset(&row_compressor); row_compressor_append_sorted_rows(&row_compressor, segment_tuplesortstate, RelationGetDescr(uncompressed_chunk_rel)); diff --git a/tsl/src/compression/compression.c b/tsl/src/compression/compression.c index f6659faf5c5..b87e7f030f5 100644 --- a/tsl/src/compression/compression.c +++ b/tsl/src/compression/compression.c @@ -1292,6 +1292,12 @@ row_compressor_flush(RowCompressor *row_compressor, CommandId mycid, bool change MemoryContextReset(row_compressor->per_row_ctx); } +void +row_compressor_reset(RowCompressor *row_compressor) +{ + row_compressor->first_iteration = true; +} + void row_compressor_finish(RowCompressor *row_compressor) { diff --git a/tsl/src/compression/compression.h b/tsl/src/compression/compression.h index db72789f9b7..bc871e01340 100644 --- a/tsl/src/compression/compression.h +++ b/tsl/src/compression/compression.h @@ -359,6 +359,7 @@ extern void row_compressor_init(RowCompressor *row_compressor, TupleDesc uncompr const ColumnCompressionInfo **column_compression_info, int16 *column_offsets, int16 num_columns_in_compressed_table, bool need_bistate, bool reset_sequence, int insert_options); +extern void row_compressor_reset(RowCompressor *row_compressor); extern void row_compressor_finish(RowCompressor *row_compressor); extern void row_compressor_append_sorted_rows(RowCompressor *row_compressor, Tuplesortstate *sorted_rel, TupleDesc sorted_desc); From 2539ea237a956603a838aef71fa4455badacfdff Mon Sep 17 00:00:00 2001 From: Jan Nidzwetzki Date: Tue, 21 Nov 2023 17:25:24 +0100 Subject: [PATCH 6/6] Release 2.13.0 This release contains performance improvements, an improved hypertable DDL API and bug fixes since the 2.12.2 release. We recommend that you upgrade at the next available opportunity. In addition, it includes these noteworthy features: * Full PostgreSQL 16 support for all existing features * Vectorized aggregation execution for sum() * Track chunk creation time used in retention/compression policies **Deprecation notice: Multi-node support** TimescaleDB 2.13 is the last version that will include multi-node support. Multi-node support in 2.13 is available for PostgreSQL 13, 14 and 15. Learn more about it [here](docs/MultiNodeDeprecation.md). If you want to migrate from multi-node TimescaleDB to single-node TimescaleDB read the [migration documentation](https://docs.timescale.com/migrate/latest/multi-node-to-timescale-service/). **PostgreSQL 13 deprecation announcement** We will continue supporting PostgreSQL 13 until April 2024. Sooner to that time, we will announce the specific version of TimescaleDB in which PostgreSQL 13 support will not be included going forward. **Starting from TimescaleDB 2.13.0** * No Amazon Machine Images (AMI) are published. If you previously used AMI, please use another [installation method](https://docs.timescale.com/self-hosted/latest/install/) * Continuous Aggregates are materialized only (non-realtime) by default **Features** * #5575 Add chunk-wise sorted paths for compressed chunks * #5761 Simplify hypertable DDL API * #5890 Reduce WAL activity by freezing compressed tuples immediately * #6050 Vectorized aggregation execution for sum() * #6062 Add metadata for chunk creation time * #6077 Make Continous Aggregates materialized only (non-realtime) by default * #6177 Change show_chunks/drop_chunks using chunk creation time * #6178 Show batches/tuples decompressed during DML operations in EXPLAIN output * #6185 Keep track of catalog version * #6227 Use creation time in retention/compression policy * #6307 Add SQL function cagg_validate_query **Bugfixes** * #6188 Add GUC for setting background worker log level * #6222 Allow enabling compression on hypertable with unique expression index * #6240 Check if worker registration succeeded * #6254 Fix exception detail passing in compression_policy_execute * #6264 Fix missing bms_del_member result assignment * #6275 Fix negative bitmapset member not allowed in compression * #6280 Potential data loss when compressing a table with a partial index that matches compression order. * #6289 Add support for startup chunk exclusion with aggs * #6290 Repair relacl on upgrade * #6297 Fix segfault when creating a cagg using a NULL width in time bucket function * #6305 Make timescaledb_functions.makeaclitem strict * #6332 Fix typmod and collation for segmentby columns * #6339 Fix tablespace with constraints * #6343 Enable segmentwise recompression in compression policy **Thanks** * @fetchezar for reporting an issue with compression policy error messages * @jflambert for reporting the background worker log level issue * @torazem for reporting an issue with compression and large oids * @fetchezar for reporting an issue in the compression policy * @lyp-bobi for reporting an issue with tablespace with constraints * @pdipesh02 for contributing to the implementation of the metadata for chunk creation time, the generalized hypertable API, and show_chunks/drop_chunks using chunk creation time * @lkshminarayanan for all his work on PG16 support --- .unreleased/PR_6222 | 1 - .unreleased/enhancement_6049 | 1 - .unreleased/feature_5575 | 1 - .unreleased/feature_5761 | 2 - .unreleased/feature_5890 | 1 - .unreleased/feature_6050 | 1 - .unreleased/feature_6062 | 2 - .unreleased/feature_6077 | 1 - .unreleased/feature_6177 | 2 - .unreleased/feature_6227 | 1 - .unreleased/feature_6307 | 1 - .unreleased/fix_6188 | 2 - .unreleased/fix_6240 | 1 - .unreleased/fix_6289 | 1 - .unreleased/fix_6290 | 1 - .unreleased/fix_6305 | 1 - .unreleased/fix_6339 | 2 - .unreleased/fix_6343 | 2 - .unreleased/fix_partial_index | 1 - .unreleased/pr_6178 | 1 - .unreleased/pr_6185 | 1 - .unreleased/pr_6254 | 3 - .unreleased/pr_6264 | 1 - .unreleased/pr_6275 | 3 - .unreleased/pr_6297 | 1 - .unreleased/pr_6332 | 1 - CHANGELOG.md | 68 +++++ docs/MultiNodeDeprecation.md | 42 ++++ sql/CMakeLists.txt | 8 +- sql/updates/2.12.2--2.13.0.sql | 444 +++++++++++++++++++++++++++++++++ sql/updates/2.13.0--2.12.2.sql | 404 ++++++++++++++++++++++++++++++ sql/updates/latest-dev.sql | 444 --------------------------------- sql/updates/reverse-dev.sql | 403 ------------------------------ version.config | 2 +- 34 files changed, 964 insertions(+), 887 deletions(-) delete mode 100644 .unreleased/PR_6222 delete mode 100644 .unreleased/enhancement_6049 delete mode 100644 .unreleased/feature_5575 delete mode 100644 .unreleased/feature_5761 delete mode 100644 .unreleased/feature_5890 delete mode 100644 .unreleased/feature_6050 delete mode 100644 .unreleased/feature_6062 delete mode 100644 .unreleased/feature_6077 delete mode 100644 .unreleased/feature_6177 delete mode 100644 .unreleased/feature_6227 delete mode 100644 .unreleased/feature_6307 delete mode 100644 .unreleased/fix_6188 delete mode 100644 .unreleased/fix_6240 delete mode 100644 .unreleased/fix_6289 delete mode 100644 .unreleased/fix_6290 delete mode 100644 .unreleased/fix_6305 delete mode 100644 .unreleased/fix_6339 delete mode 100644 .unreleased/fix_6343 delete mode 100644 .unreleased/fix_partial_index delete mode 100644 .unreleased/pr_6178 delete mode 100644 .unreleased/pr_6185 delete mode 100644 .unreleased/pr_6254 delete mode 100644 .unreleased/pr_6264 delete mode 100644 .unreleased/pr_6275 delete mode 100644 .unreleased/pr_6297 delete mode 100644 .unreleased/pr_6332 create mode 100644 docs/MultiNodeDeprecation.md create mode 100644 sql/updates/2.12.2--2.13.0.sql create mode 100644 sql/updates/2.13.0--2.12.2.sql diff --git a/.unreleased/PR_6222 b/.unreleased/PR_6222 deleted file mode 100644 index bee0aeb86ae..00000000000 --- a/.unreleased/PR_6222 +++ /dev/null @@ -1 +0,0 @@ -Fixes: #6222 Allow enabling compression on hypertable with unique expression index diff --git a/.unreleased/enhancement_6049 b/.unreleased/enhancement_6049 deleted file mode 100644 index 6b77fe68b1f..00000000000 --- a/.unreleased/enhancement_6049 +++ /dev/null @@ -1 +0,0 @@ -Implements: #6130 Add CI check for incorrect catalog updates diff --git a/.unreleased/feature_5575 b/.unreleased/feature_5575 deleted file mode 100644 index 5b573dfed4e..00000000000 --- a/.unreleased/feature_5575 +++ /dev/null @@ -1 +0,0 @@ -Implements: #5575 Add chunk-wise sorted paths for compressed chunks diff --git a/.unreleased/feature_5761 b/.unreleased/feature_5761 deleted file mode 100644 index 2d5196033f7..00000000000 --- a/.unreleased/feature_5761 +++ /dev/null @@ -1,2 +0,0 @@ -Implements: #5761 Simplify hypertable DDL API -Thanks: @pdipesh02 for contributing to the implementation of the generalized hypertable API diff --git a/.unreleased/feature_5890 b/.unreleased/feature_5890 deleted file mode 100644 index b8ac85886dc..00000000000 --- a/.unreleased/feature_5890 +++ /dev/null @@ -1 +0,0 @@ -Implements: #5890 Reduce WAL activity by freezing compressed tuples immediately diff --git a/.unreleased/feature_6050 b/.unreleased/feature_6050 deleted file mode 100644 index 60cbc49f6e5..00000000000 --- a/.unreleased/feature_6050 +++ /dev/null @@ -1 +0,0 @@ -Implements: #6050 Vectorized aggregation execution for sum() diff --git a/.unreleased/feature_6062 b/.unreleased/feature_6062 deleted file mode 100644 index a0aa51e85b4..00000000000 --- a/.unreleased/feature_6062 +++ /dev/null @@ -1,2 +0,0 @@ -Implements: #6062 Add metadata for chunk creation time -Thanks: @pdipesh02 for contributing to the implementation of this feature diff --git a/.unreleased/feature_6077 b/.unreleased/feature_6077 deleted file mode 100644 index 34a4273a507..00000000000 --- a/.unreleased/feature_6077 +++ /dev/null @@ -1 +0,0 @@ -Implements: #6077 Make Continous Aggregates materialized only (non-realtime) by default diff --git a/.unreleased/feature_6177 b/.unreleased/feature_6177 deleted file mode 100644 index 79d6e4fc0dc..00000000000 --- a/.unreleased/feature_6177 +++ /dev/null @@ -1,2 +0,0 @@ -Implements: #6177 Change show_chunks/drop_chunks using chunk creation time -Thanks: @pdipesh02 for contributing to the implementation of this feature diff --git a/.unreleased/feature_6227 b/.unreleased/feature_6227 deleted file mode 100644 index 39ba8386bee..00000000000 --- a/.unreleased/feature_6227 +++ /dev/null @@ -1 +0,0 @@ -Implements: #6227 Use creation time in retention/compression policy diff --git a/.unreleased/feature_6307 b/.unreleased/feature_6307 deleted file mode 100644 index 3d2381cfac1..00000000000 --- a/.unreleased/feature_6307 +++ /dev/null @@ -1 +0,0 @@ -Implements: #6307 Add SQL function cagg_validate_query diff --git a/.unreleased/fix_6188 b/.unreleased/fix_6188 deleted file mode 100644 index 1a003a61e5b..00000000000 --- a/.unreleased/fix_6188 +++ /dev/null @@ -1,2 +0,0 @@ -Fixes: #6188 Add GUC for setting background worker log level -Thanks: @jflambert for reporting the issue diff --git a/.unreleased/fix_6240 b/.unreleased/fix_6240 deleted file mode 100644 index 2d9d2e4465f..00000000000 --- a/.unreleased/fix_6240 +++ /dev/null @@ -1 +0,0 @@ -Fixes: #6240 Check if worker registration succeeded diff --git a/.unreleased/fix_6289 b/.unreleased/fix_6289 deleted file mode 100644 index 6664512de6d..00000000000 --- a/.unreleased/fix_6289 +++ /dev/null @@ -1 +0,0 @@ -Fixes: #6289 Add support for startup chunk exclusion with aggs diff --git a/.unreleased/fix_6290 b/.unreleased/fix_6290 deleted file mode 100644 index a1ae57435cf..00000000000 --- a/.unreleased/fix_6290 +++ /dev/null @@ -1 +0,0 @@ -Fixes: #6290 Repair relacl on upgrade diff --git a/.unreleased/fix_6305 b/.unreleased/fix_6305 deleted file mode 100644 index 2cb7ae0804a..00000000000 --- a/.unreleased/fix_6305 +++ /dev/null @@ -1 +0,0 @@ -Fixes: #6305 Make _timescaledb_functions.makeaclitem strict diff --git a/.unreleased/fix_6339 b/.unreleased/fix_6339 deleted file mode 100644 index b4f4290cb34..00000000000 --- a/.unreleased/fix_6339 +++ /dev/null @@ -1,2 +0,0 @@ -Fixes: #6339 Fix tablespace with constraints -Thanks: @lyp-bobi for reporting the issue diff --git a/.unreleased/fix_6343 b/.unreleased/fix_6343 deleted file mode 100644 index 98aabc3ce91..00000000000 --- a/.unreleased/fix_6343 +++ /dev/null @@ -1,2 +0,0 @@ -Fixes: #6343 Enable segmentwise recompression in compression policy -Thanks: @fetchezar for reporting the issue diff --git a/.unreleased/fix_partial_index b/.unreleased/fix_partial_index deleted file mode 100644 index 5cb091f3338..00000000000 --- a/.unreleased/fix_partial_index +++ /dev/null @@ -1 +0,0 @@ -Fixes: #6280 Potential data loss when compressing a table with a partial index that matches compression order. diff --git a/.unreleased/pr_6178 b/.unreleased/pr_6178 deleted file mode 100644 index 15e1037f6bf..00000000000 --- a/.unreleased/pr_6178 +++ /dev/null @@ -1 +0,0 @@ -Implements: #6178 Show batches/tuples decompressed during DML operations in EXPLAIN output diff --git a/.unreleased/pr_6185 b/.unreleased/pr_6185 deleted file mode 100644 index 0f47652d542..00000000000 --- a/.unreleased/pr_6185 +++ /dev/null @@ -1 +0,0 @@ -Implements: #6185 Keep track of catalog version diff --git a/.unreleased/pr_6254 b/.unreleased/pr_6254 deleted file mode 100644 index 98d634dc65a..00000000000 --- a/.unreleased/pr_6254 +++ /dev/null @@ -1,3 +0,0 @@ -Fixes: #6254 Fix exception detail passing in compression_policy_execute - -Thanks: @fetchezar for reporting an issue with compression policy error messages diff --git a/.unreleased/pr_6264 b/.unreleased/pr_6264 deleted file mode 100644 index 98c5408d1f4..00000000000 --- a/.unreleased/pr_6264 +++ /dev/null @@ -1 +0,0 @@ -Fixes: #6264 Fix missing bms_del_member result assignment diff --git a/.unreleased/pr_6275 b/.unreleased/pr_6275 deleted file mode 100644 index cf49ffeb62d..00000000000 --- a/.unreleased/pr_6275 +++ /dev/null @@ -1,3 +0,0 @@ -Fixes: #6275 Fix negative bitmapset member not allowed in compression - -Thanks: @torazem for reporting an issue with compression and large oids diff --git a/.unreleased/pr_6297 b/.unreleased/pr_6297 deleted file mode 100644 index 98027449fdf..00000000000 --- a/.unreleased/pr_6297 +++ /dev/null @@ -1 +0,0 @@ -Fixes: #6297 Fix segfault when creating a cagg using a NULL width in time bucket function diff --git a/.unreleased/pr_6332 b/.unreleased/pr_6332 deleted file mode 100644 index 69841904ddb..00000000000 --- a/.unreleased/pr_6332 +++ /dev/null @@ -1 +0,0 @@ -Fixes: #6332 Fix typmod and collation for segmentby columns diff --git a/CHANGELOG.md b/CHANGELOG.md index 1f9a309cb7a..ebfadf35366 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,73 @@ `psql` with the `-X` flag to prevent any `.psqlrc` commands from accidentally triggering the load of a previous DB version.** +## 2.13.0 (2023-11-28) + +This release contains performance improvements, an improved hypertable DDL API +and bug fixes since the 2.12.2 release. We recommend that you upgrade at the next +available opportunity. + +In addition, it includes these noteworthy features: + +* Full PostgreSQL 16 support for all existing features +* Vectorized aggregation execution for sum() +* Track chunk creation time used in retention/compression policies + +**Deprecation notice: Multi-node support** +TimescaleDB 2.13 is the last version that will include multi-node support. Multi-node +support in 2.13 is available for PostgreSQL 13, 14 and 15. Learn more about it +[here](docs/MultiNodeDeprecation.md). + +If you want to migrate from multi-node TimescaleDB to single-node TimescaleDB read the +[migration documentation](https://docs.timescale.com/migrate/latest/multi-node-to-timescale-service/). + +**PostgreSQL 13 deprecation announcement** +We will continue supporting PostgreSQL 13 until April 2024. Sooner to that time, we will announce the specific version of TimescaleDB in which PostgreSQL 13 support will not be included going forward. + +**Starting from TimescaleDB 2.13.0** +* No Amazon Machine Images (AMI) are published. If you previously used AMI, please +use another [installation method](https://docs.timescale.com/self-hosted/latest/install/) +* Continuous Aggregates are materialized only (non-realtime) by default + +**Features** +* #5575 Add chunk-wise sorted paths for compressed chunks +* #5761 Simplify hypertable DDL API +* #5890 Reduce WAL activity by freezing compressed tuples immediately +* #6050 Vectorized aggregation execution for sum() +* #6062 Add metadata for chunk creation time +* #6077 Make Continous Aggregates materialized only (non-realtime) by default +* #6177 Change show_chunks/drop_chunks using chunk creation time +* #6178 Show batches/tuples decompressed during DML operations in EXPLAIN output +* #6185 Keep track of catalog version +* #6227 Use creation time in retention/compression policy +* #6307 Add SQL function cagg_validate_query + +**Bugfixes** +* #6188 Add GUC for setting background worker log level +* #6222 Allow enabling compression on hypertable with unique expression index +* #6240 Check if worker registration succeeded +* #6254 Fix exception detail passing in compression_policy_execute +* #6264 Fix missing bms_del_member result assignment +* #6275 Fix negative bitmapset member not allowed in compression +* #6280 Potential data loss when compressing a table with a partial index that matches compression order. +* #6289 Add support for startup chunk exclusion with aggs +* #6290 Repair relacl on upgrade +* #6297 Fix segfault when creating a cagg using a NULL width in time bucket function +* #6305 Make timescaledb_functions.makeaclitem strict +* #6332 Fix typmod and collation for segmentby columns +* #6339 Fix tablespace with constraints +* #6343 Enable segmentwise recompression in compression policy + +**Thanks** +* @fetchezar for reporting an issue with compression policy error messages +* @jflambert for reporting the background worker log level issue +* @torazem for reporting an issue with compression and large oids +* @fetchezar for reporting an issue in the compression policy +* @lyp-bobi for reporting an issue with tablespace with constraints +* @pdipesh02 for contributing to the implementation of the metadata for chunk creation time, + the generalized hypertable API, and show_chunks/drop_chunks using chunk creation time +* @lkshminarayanan for all his work on PG16 support + ## 2.12.2 (2023-10-19) This release contains bug fixes since the 2.12.1 release. @@ -24,6 +91,7 @@ We recommend that you upgrade at the next available opportunity. * #6117 Avoid decompressing batches using an empty slot * #6123 Fix concurrency errors in OSM API * #6142 do not throw an error when deprecation GUC cannot be read + **Thanks** * @symbx for reporting a crash when selecting from empty hypertables diff --git a/docs/MultiNodeDeprecation.md b/docs/MultiNodeDeprecation.md new file mode 100644 index 00000000000..83e8b9a526a --- /dev/null +++ b/docs/MultiNodeDeprecation.md @@ -0,0 +1,42 @@ +## Multi-node Deprecation + +Multi-node support has been deprecated. +TimescaleDB 2.13 is the last version that will include multi-node support. Multi-node support in 2.13 is available for PostgreSQL 13, 14 and 15. +This decision was not made lightly, and we want to provide a clear understanding of the reasoning behind this change and the path forward. + +### Why we are ending multi-node support + +We began to work on multi-node support in 2018 and released the first version in 2020 to address the growing +demand for higher scalability in TimescaleDB deployments. +The distributed architecture of multi-node allowed for horizontal scalability of writes and reads to go beyond what a single node could provide. + +While we added many improvements since the initial launch, the architecture of multi-node came with a number of +inherent limitations and challenges that have limited its adoption. +Regrettably, only about 1% of current TimescaleDB deployments utilize multi-node, and the complexity involved in +maintaining this feature has become a significant obstacle. +It’s not an isolated feature that can be kept in the product with very little effort since adding new features required +extra development and testing to ensure they also worked for multi-node. + +As we've evolved our single-node product and expanded our cloud offering to serve thousands of customers, +we've identified more efficient solutions to meet the scalability needs of our users. + +First, we’ve been able and will continue to make big improvements in the write and read performance of single-node. +We’ve scaled a single-node deployment to process 2 million inserts per second and have seen performance improvements of 10x for common queries. +You can read a summary of the latest query performance improvements [here](https://www.timescale.com/blog/8-performance-improvements-in-recent-timescaledb-releases-for-faster-query-analytics/). + +And second, we are leveraging cloud technologies that have become very mature to provide higher scalability in a more accessible way. +For example, our cloud offering uses object storage to deliver virtually [infinite storage capacity at a very low cost](https://www.timescale.com/blog/scaling-postgresql-for-cheap-introducing-tiered-storage-in-timescale/). + +For those reasons, we’ve decided to focus our efforts on improving single-node and leveraging cloud technologies to solve for high scalability and as a result we’ve ended support for multi-node. + +### What this means for you + +We understand that this change may raise questions, and we are committed to supporting you through the transition. + +For current TimescaleDB multi-node users, please refer to our [migration documentation](https://docs.timescale.com/migrate/latest/multi-node-to-timescale-service/) +for a step-by-step guide to transition to a single-node configuration. + +Alternatively, you can continue to use multi-node up to version 2.13. However, please be aware that future versions will no longer include this functionality. + +If you have any questions or feedback, you can share them in the #multi-node channel in our [community Slack](https://slack.timescale.com/). + diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt index e6ec1121719..1fa77f3149c 100644 --- a/sql/CMakeLists.txt +++ b/sql/CMakeLists.txt @@ -40,11 +40,12 @@ set(MOD_FILES updates/2.11.1--2.11.2.sql updates/2.11.2--2.12.0.sql updates/2.12.0--2.12.1.sql - updates/2.12.1--2.12.2.sql) + updates/2.12.1--2.12.2.sql + updates/2.12.2--2.13.0.sql) # The downgrade file to generate a downgrade script for the current version, as # specified in version.config -set(CURRENT_REV_FILE reverse-dev.sql) +set(CURRENT_REV_FILE 2.13.0--2.12.2.sql) # Files for generating old downgrade scripts. This should only include files for # downgrade from one version to its previous version since we do not support # skipping versions when downgrading. @@ -76,7 +77,8 @@ set(OLD_REV_FILES 2.11.2--2.11.1.sql 2.12.0--2.11.2.sql 2.12.1--2.12.0.sql - 2.12.2--2.12.1.sql) + 2.12.2--2.12.1.sql + 2.13.0--2.12.2.sql) set(MODULE_PATHNAME "$libdir/timescaledb-${PROJECT_VERSION_MOD}") set(LOADER_PATHNAME "$libdir/timescaledb") diff --git a/sql/updates/2.12.2--2.13.0.sql b/sql/updates/2.12.2--2.13.0.sql new file mode 100644 index 00000000000..a99b15de009 --- /dev/null +++ b/sql/updates/2.12.2--2.13.0.sql @@ -0,0 +1,444 @@ +CREATE TYPE _timescaledb_internal.dimension_info; + +CREATE OR REPLACE FUNCTION _timescaledb_functions.dimension_info_in(cstring) + RETURNS _timescaledb_internal.dimension_info + LANGUAGE C STRICT IMMUTABLE + AS '@MODULE_PATHNAME@', 'ts_dimension_info_in'; + +CREATE OR REPLACE FUNCTION _timescaledb_functions.dimension_info_out(_timescaledb_internal.dimension_info) + RETURNS cstring + LANGUAGE C STRICT IMMUTABLE + AS '@MODULE_PATHNAME@', 'ts_dimension_info_out'; + +CREATE TYPE _timescaledb_internal.dimension_info ( + INPUT = _timescaledb_functions.dimension_info_in, + OUTPUT = _timescaledb_functions.dimension_info_out, + INTERNALLENGTH = VARIABLE +); + +CREATE FUNCTION @extschema@.create_hypertable( + relation REGCLASS, + dimension _timescaledb_internal.dimension_info, + create_default_indexes BOOLEAN = TRUE, + if_not_exists BOOLEAN = FALSE, + migrate_data BOOLEAN = FALSE +) RETURNS TABLE(hypertable_id INT, created BOOL) AS '@MODULE_PATHNAME@', 'ts_hypertable_create_general' LANGUAGE C VOLATILE; + +CREATE FUNCTION @extschema@.add_dimension( + hypertable REGCLASS, + dimension _timescaledb_internal.dimension_info, + if_not_exists BOOLEAN = FALSE +) RETURNS TABLE(dimension_id INT, created BOOL) +AS '@MODULE_PATHNAME@', 'ts_dimension_add_general' LANGUAGE C VOLATILE; + +CREATE FUNCTION @extschema@.set_partitioning_interval( + hypertable REGCLASS, + partition_interval ANYELEMENT, + dimension_name NAME = NULL +) RETURNS VOID AS '@MODULE_PATHNAME@', 'ts_dimension_set_interval' LANGUAGE C VOLATILE; + +CREATE FUNCTION @extschema@.by_hash(column_name NAME, number_partitions INTEGER, + partition_func regproc = NULL) + RETURNS _timescaledb_internal.dimension_info LANGUAGE C + AS '@MODULE_PATHNAME@', 'ts_hash_dimension'; + +CREATE FUNCTION @extschema@.by_range(column_name NAME, + partition_interval ANYELEMENT = NULL::bigint, + partition_func regproc = NULL) + RETURNS _timescaledb_internal.dimension_info LANGUAGE C + AS '@MODULE_PATHNAME@', 'ts_range_dimension'; + +-- +-- Rebuild the catalog table `_timescaledb_catalog.chunk` to +-- add new column `creation_time` +-- +CREATE TABLE _timescaledb_internal.chunk_tmp +AS SELECT * from _timescaledb_catalog.chunk; + +CREATE TABLE _timescaledb_internal.tmp_chunk_seq_value AS +SELECT last_value, is_called FROM _timescaledb_catalog.chunk_id_seq; + +--drop foreign keys on chunk table +ALTER TABLE _timescaledb_catalog.chunk_constraint DROP CONSTRAINT +chunk_constraint_chunk_id_fkey; +ALTER TABLE _timescaledb_catalog.chunk_index DROP CONSTRAINT +chunk_index_chunk_id_fkey; +ALTER TABLE _timescaledb_catalog.chunk_data_node DROP CONSTRAINT +chunk_data_node_chunk_id_fkey; +ALTER TABLE _timescaledb_internal.bgw_policy_chunk_stats DROP CONSTRAINT +bgw_policy_chunk_stats_chunk_id_fkey; +ALTER TABLE _timescaledb_catalog.compression_chunk_size DROP CONSTRAINT +compression_chunk_size_chunk_id_fkey; +ALTER TABLE _timescaledb_catalog.compression_chunk_size DROP CONSTRAINT +compression_chunk_size_compressed_chunk_id_fkey; +ALTER TABLE _timescaledb_catalog.chunk_copy_operation DROP CONSTRAINT +chunk_copy_operation_chunk_id_fkey; + +--drop dependent views +DROP VIEW IF EXISTS timescaledb_information.hypertables; +DROP VIEW IF EXISTS timescaledb_information.chunks; +DROP VIEW IF EXISTS _timescaledb_internal.hypertable_chunk_local_size; +DROP VIEW IF EXISTS _timescaledb_internal.compressed_chunk_stats; +DROP VIEW IF EXISTS timescaledb_experimental.chunk_replication_status; + +ALTER EXTENSION timescaledb DROP TABLE _timescaledb_catalog.chunk; +ALTER EXTENSION timescaledb DROP SEQUENCE _timescaledb_catalog.chunk_id_seq; +DROP TABLE _timescaledb_catalog.chunk; + +CREATE SEQUENCE _timescaledb_catalog.chunk_id_seq MINVALUE 1; + +-- now create table without self referential foreign key +CREATE TABLE _timescaledb_catalog.chunk ( + id integer NOT NULL DEFAULT nextval('_timescaledb_catalog.chunk_id_seq'), + hypertable_id int NOT NULL, + schema_name name NOT NULL, + table_name name NOT NULL, + compressed_chunk_id integer , + dropped boolean NOT NULL DEFAULT FALSE, + status integer NOT NULL DEFAULT 0, + osm_chunk boolean NOT NULL DEFAULT FALSE, + creation_time timestamptz, + -- table constraints + CONSTRAINT chunk_pkey PRIMARY KEY (id), + CONSTRAINT chunk_schema_name_table_name_key UNIQUE (schema_name, table_name) +); + +INSERT INTO _timescaledb_catalog.chunk +( id, hypertable_id, schema_name, table_name, + compressed_chunk_id, dropped, status, osm_chunk) +SELECT id, hypertable_id, schema_name, table_name, + compressed_chunk_id, dropped, status, osm_chunk +FROM _timescaledb_internal.chunk_tmp; + +-- update creation_time for chunks +UPDATE + _timescaledb_catalog.chunk c +SET + creation_time = (pg_catalog.pg_stat_file(pg_catalog.pg_relation_filepath(r.oid))).modification +FROM + pg_class r, pg_namespace n +WHERE + r.relnamespace = n.oid + AND r.relname = c.table_name + AND n.nspname = c.schema_name + AND r.relkind = 'r' + AND c.dropped IS FALSE; + +-- Make sure that there are no record with empty creation time +UPDATE _timescaledb_catalog.chunk SET creation_time = now() WHERE creation_time IS NULL; + +--add indexes to the chunk table +CREATE INDEX chunk_hypertable_id_idx ON _timescaledb_catalog.chunk (hypertable_id); +CREATE INDEX chunk_compressed_chunk_id_idx ON _timescaledb_catalog.chunk (compressed_chunk_id); +CREATE INDEX chunk_osm_chunk_idx ON _timescaledb_catalog.chunk (osm_chunk, hypertable_id); +CREATE INDEX chunk_hypertable_id_creation_time_idx ON _timescaledb_catalog.chunk(hypertable_id, creation_time); + +ALTER SEQUENCE _timescaledb_catalog.chunk_id_seq OWNED BY _timescaledb_catalog.chunk.id; +SELECT setval('_timescaledb_catalog.chunk_id_seq', last_value, is_called) FROM _timescaledb_internal.tmp_chunk_seq_value; + +-- add self referential foreign key +ALTER TABLE _timescaledb_catalog.chunk ADD CONSTRAINT chunk_compressed_chunk_id_fkey FOREIGN KEY ( compressed_chunk_id ) + REFERENCES _timescaledb_catalog.chunk( id ); + +--add foreign key constraint +ALTER TABLE _timescaledb_catalog.chunk + ADD CONSTRAINT chunk_hypertable_id_fkey + FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable (id); + +SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.chunk', ''); +SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.chunk_id_seq', ''); + +-- Add non-null constraint +ALTER TABLE _timescaledb_catalog.chunk + ALTER COLUMN creation_time SET NOT NULL; + +--add the foreign key constraints +ALTER TABLE _timescaledb_catalog.chunk_constraint ADD CONSTRAINT +chunk_constraint_chunk_id_fkey FOREIGN KEY (chunk_id) REFERENCES _timescaledb_catalog.chunk(id); +ALTER TABLE _timescaledb_catalog.chunk_index ADD CONSTRAINT +chunk_index_chunk_id_fkey FOREIGN KEY (chunk_id) +REFERENCES _timescaledb_catalog.chunk(id) ON DELETE CASCADE; +ALTER TABLE _timescaledb_catalog.chunk_data_node ADD CONSTRAINT +chunk_data_node_chunk_id_fkey FOREIGN KEY (chunk_id) REFERENCES _timescaledb_catalog.chunk(id); +ALTER TABLE _timescaledb_internal.bgw_policy_chunk_stats ADD CONSTRAINT +bgw_policy_chunk_stats_chunk_id_fkey FOREIGN KEY (chunk_id) +REFERENCES _timescaledb_catalog.chunk(id) ON DELETE CASCADE; +ALTER TABLE _timescaledb_catalog.compression_chunk_size ADD CONSTRAINT +compression_chunk_size_chunk_id_fkey FOREIGN KEY (chunk_id) +REFERENCES _timescaledb_catalog.chunk(id) ON DELETE CASCADE; +ALTER TABLE _timescaledb_catalog.compression_chunk_size ADD CONSTRAINT +compression_chunk_size_compressed_chunk_id_fkey FOREIGN KEY (compressed_chunk_id) +REFERENCES _timescaledb_catalog.chunk(id) ON DELETE CASCADE; +ALTER TABLE _timescaledb_catalog.chunk_copy_operation ADD CONSTRAINT +chunk_copy_operation_chunk_id_fkey FOREIGN KEY (chunk_id) REFERENCES _timescaledb_catalog.chunk (id) ON DELETE CASCADE; + +--cleanup +DROP TABLE _timescaledb_internal.chunk_tmp; +DROP TABLE _timescaledb_internal.tmp_chunk_seq_value; + +GRANT SELECT ON _timescaledb_catalog.chunk_id_seq TO PUBLIC; +GRANT SELECT ON _timescaledb_catalog.chunk TO PUBLIC; +-- end recreate _timescaledb_catalog.chunk table -- + +-- +-- Rebuild the catalog table `_timescaledb_catalog.compression_chunk_size` to +-- add new column `numrows_frozen_immediately` +-- +CREATE TABLE _timescaledb_internal.compression_chunk_size_tmp + AS SELECT * from _timescaledb_catalog.compression_chunk_size; + +-- Drop depended views +-- We assume that '_timescaledb_internal.compressed_chunk_stats' was already dropped in this update +-- (see above) + +-- Drop table +ALTER EXTENSION timescaledb DROP TABLE _timescaledb_catalog.compression_chunk_size; +DROP TABLE _timescaledb_catalog.compression_chunk_size; + +CREATE TABLE _timescaledb_catalog.compression_chunk_size ( + chunk_id integer NOT NULL, + compressed_chunk_id integer NOT NULL, + uncompressed_heap_size bigint NOT NULL, + uncompressed_toast_size bigint NOT NULL, + uncompressed_index_size bigint NOT NULL, + compressed_heap_size bigint NOT NULL, + compressed_toast_size bigint NOT NULL, + compressed_index_size bigint NOT NULL, + numrows_pre_compression bigint, + numrows_post_compression bigint, + numrows_frozen_immediately bigint, + -- table constraints + CONSTRAINT compression_chunk_size_pkey PRIMARY KEY (chunk_id), + CONSTRAINT compression_chunk_size_chunk_id_fkey FOREIGN KEY (chunk_id) REFERENCES _timescaledb_catalog.chunk (id) ON DELETE CASCADE, + CONSTRAINT compression_chunk_size_compressed_chunk_id_fkey FOREIGN KEY (compressed_chunk_id) REFERENCES _timescaledb_catalog.chunk (id) ON DELETE CASCADE +); + +INSERT INTO _timescaledb_catalog.compression_chunk_size +(chunk_id, compressed_chunk_id, uncompressed_heap_size, uncompressed_toast_size, + uncompressed_index_size, compressed_heap_size, compressed_toast_size, + compressed_index_size, numrows_pre_compression, numrows_post_compression, numrows_frozen_immediately) +SELECT chunk_id, compressed_chunk_id, uncompressed_heap_size, uncompressed_toast_size, + uncompressed_index_size, compressed_heap_size, compressed_toast_size, + compressed_index_size, numrows_pre_compression, numrows_post_compression, 0 +FROM _timescaledb_internal.compression_chunk_size_tmp; + +DROP TABLE _timescaledb_internal.compression_chunk_size_tmp; + +SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.compression_chunk_size', ''); + +GRANT SELECT ON _timescaledb_catalog.compression_chunk_size TO PUBLIC; + +-- End modify `_timescaledb_catalog.compression_chunk_size` + +DROP FUNCTION @extschema@.drop_chunks(REGCLASS, "any", "any", BOOL); +CREATE FUNCTION @extschema@.drop_chunks( + relation REGCLASS, + older_than "any" = NULL, + newer_than "any" = NULL, + verbose BOOLEAN = FALSE, + created_before "any" = NULL, + created_after "any" = NULL + ) RETURNS SETOF TEXT AS '@MODULE_PATHNAME@', 'ts_chunk_drop_chunks' + LANGUAGE C VOLATILE PARALLEL UNSAFE; + +DROP FUNCTION @extschema@.show_chunks(REGCLASS, "any", "any"); +CREATE FUNCTION @extschema@.show_chunks( + relation REGCLASS, + older_than "any" = NULL, + newer_than "any" = NULL, + created_before "any" = NULL, + created_after "any" = NULL + ) RETURNS SETOF REGCLASS AS '@MODULE_PATHNAME@', 'ts_chunk_show_chunks' + LANGUAGE C STABLE PARALLEL SAFE; + +DROP FUNCTION @extschema@.add_retention_policy(REGCLASS, "any", BOOL, INTERVAL, TIMESTAMPTZ, TEXT); +CREATE FUNCTION @extschema@.add_retention_policy( + relation REGCLASS, + drop_after "any" = NULL, + if_not_exists BOOL = false, + schedule_interval INTERVAL = NULL, + initial_start TIMESTAMPTZ = NULL, + timezone TEXT = NULL, + drop_created_before INTERVAL = NULL +) +RETURNS INTEGER AS '@MODULE_PATHNAME@', 'ts_policy_retention_add' +LANGUAGE C VOLATILE; + +DROP FUNCTION @extschema@.add_compression_policy(REGCLASS, "any", BOOL, INTERVAL, TIMESTAMPTZ, TEXT); +CREATE FUNCTION @extschema@.add_compression_policy( + hypertable REGCLASS, + compress_after "any" = NULL, + if_not_exists BOOL = false, + schedule_interval INTERVAL = NULL, + initial_start TIMESTAMPTZ = NULL, + timezone TEXT = NULL, + compress_created_before INTERVAL = NULL +) +RETURNS INTEGER +AS '@MODULE_PATHNAME@', 'ts_policy_compression_add' +LANGUAGE C VOLATILE; + +DROP PROCEDURE IF EXISTS _timescaledb_functions.policy_compression_execute(INTEGER, INTEGER, ANYELEMENT, INTEGER, BOOLEAN, BOOLEAN); +DROP PROCEDURE IF EXISTS _timescaledb_internal.policy_compression_execute(INTEGER, INTEGER, ANYELEMENT, INTEGER, BOOLEAN, BOOLEAN); +CREATE PROCEDURE +_timescaledb_functions.policy_compression_execute( + job_id INTEGER, + htid INTEGER, + lag ANYELEMENT, + maxchunks INTEGER, + verbose_log BOOLEAN, + recompress_enabled BOOLEAN, + use_creation_time BOOLEAN) +AS $$ +DECLARE + htoid REGCLASS; + chunk_rec RECORD; + numchunks INTEGER := 1; + _message text; + _detail text; + -- chunk status bits: + bit_compressed int := 1; + bit_compressed_unordered int := 2; + bit_frozen int := 4; + bit_compressed_partial int := 8; + creation_lag INTERVAL := NULL; +BEGIN + + -- procedures with SET clause cannot execute transaction + -- control so we adjust search_path in procedure body + SET LOCAL search_path TO pg_catalog, pg_temp; + + SELECT format('%I.%I', schema_name, table_name) INTO htoid + FROM _timescaledb_catalog.hypertable + WHERE id = htid; + + -- for the integer cases, we have to compute the lag w.r.t + -- the integer_now function and then pass on to show_chunks + IF pg_typeof(lag) IN ('BIGINT'::regtype, 'INTEGER'::regtype, 'SMALLINT'::regtype) THEN + -- cannot have use_creation_time set with this + IF use_creation_time IS TRUE THEN + RAISE EXCEPTION 'job % cannot use creation time with integer_now function', job_id; + END IF; + lag := _timescaledb_functions.subtract_integer_from_now(htoid, lag::BIGINT); + END IF; + + -- if use_creation_time has been specified then the lag needs to be used with the + -- "compress_created_before" argument. Otherwise the usual "older_than" argument + -- is good enough + IF use_creation_time IS TRUE THEN + creation_lag := lag; + lag := NULL; + END IF; + + FOR chunk_rec IN + SELECT + show.oid, ch.schema_name, ch.table_name, ch.status + FROM + @extschema@.show_chunks(htoid, older_than => lag, created_before => creation_lag) AS show(oid) + INNER JOIN pg_class pgc ON pgc.oid = show.oid + INNER JOIN pg_namespace pgns ON pgc.relnamespace = pgns.oid + INNER JOIN _timescaledb_catalog.chunk ch ON ch.table_name = pgc.relname AND ch.schema_name = pgns.nspname AND ch.hypertable_id = htid + WHERE + ch.dropped IS FALSE + AND ( + ch.status = 0 OR + ( + ch.status & bit_compressed > 0 AND ( + ch.status & bit_compressed_unordered > 0 OR + ch.status & bit_compressed_partial > 0 + ) + ) + ) + LOOP + IF chunk_rec.status = 0 THEN + BEGIN + PERFORM @extschema@.compress_chunk( chunk_rec.oid ); + EXCEPTION WHEN OTHERS THEN + GET STACKED DIAGNOSTICS + _message = MESSAGE_TEXT, + _detail = PG_EXCEPTION_DETAIL; + RAISE WARNING 'compressing chunk "%" failed when compression policy is executed', chunk_rec.oid::regclass::text + USING DETAIL = format('Message: (%s), Detail: (%s).', _message, _detail), + ERRCODE = sqlstate; + END; + ELSIF + ( + chunk_rec.status & bit_compressed > 0 AND ( + chunk_rec.status & bit_compressed_unordered > 0 OR + chunk_rec.status & bit_compressed_partial > 0 + ) + ) AND recompress_enabled IS TRUE THEN + BEGIN + PERFORM @extschema@.decompress_chunk(chunk_rec.oid, if_compressed => true); + EXCEPTION WHEN OTHERS THEN + RAISE WARNING 'decompressing chunk "%" failed when compression policy is executed', chunk_rec.oid::regclass::text + USING DETAIL = format('Message: (%s), Detail: (%s).', _message, _detail), + ERRCODE = sqlstate; + END; + -- SET LOCAL is only active until end of transaction. + -- While we could use SET at the start of the function we do not + -- want to bleed out search_path to caller, so we do SET LOCAL + -- again after COMMIT + BEGIN + PERFORM @extschema@.compress_chunk(chunk_rec.oid); + EXCEPTION WHEN OTHERS THEN + RAISE WARNING 'compressing chunk "%" failed when compression policy is executed', chunk_rec.oid::regclass::text + USING DETAIL = format('Message: (%s), Detail: (%s).', _message, _detail), + ERRCODE = sqlstate; + END; + END IF; + COMMIT; + -- SET LOCAL is only active until end of transaction. + -- While we could use SET at the start of the function we do not + -- want to bleed out search_path to caller, so we do SET LOCAL + -- again after COMMIT + SET LOCAL search_path TO pg_catalog, pg_temp; + IF verbose_log THEN + RAISE LOG 'job % completed processing chunk %.%', job_id, chunk_rec.schema_name, chunk_rec.table_name; + END IF; + numchunks := numchunks + 1; + IF maxchunks > 0 AND numchunks >= maxchunks THEN + EXIT; + END IF; + END LOOP; +END; +$$ LANGUAGE PLPGSQL; + +-- fix atttypmod and attcollation for segmentby columns +DO $$ +DECLARE + htc_id INTEGER; + htc REGCLASS; + _attname NAME; + _atttypmod INTEGER; + _attcollation OID; +BEGIN + -- find any segmentby columns where typmod and collation in + -- the compressed hypertable does not match the uncompressed + -- hypertable values + FOR htc_id, htc, _attname, _atttypmod, _attcollation IN + SELECT cat.htc_id, cat.htc, pga.attname, ht_mod, ht_coll + FROM pg_attribute pga + INNER JOIN + ( + SELECT + htc.id AS htc_id, + format('%I.%I',htc.schema_name,htc.table_name) AS htc, + att_ht.atttypmod AS ht_mod, + att_ht.attcollation AS ht_coll, + c.attname + FROM _timescaledb_catalog.hypertable_compression c + INNER JOIN _timescaledb_catalog.hypertable ht ON ht.id=c.hypertable_id + INNER JOIN pg_attribute att_ht ON att_ht.attname = c.attname AND att_ht.attrelid = format('%I.%I',ht.schema_name,ht.table_name)::regclass + INNER JOIN _timescaledb_catalog.hypertable htc ON htc.id=ht.compressed_hypertable_id + WHERE c.segmentby_column_index > 0 + ) cat ON cat.htc::regclass = pga.attrelid AND cat.attname = pga.attname + WHERE pga.atttypmod <> ht_mod OR pga.attcollation <> ht_coll + LOOP + -- fix typmod and collation for the compressed hypertable and all compressed chunks + UPDATE pg_attribute SET atttypmod = _atttypmod, attcollation = _attcollation WHERE attname = _attname AND attrelid IN ( + SELECT format('%I.%I',schema_name,table_name)::regclass from _timescaledb_catalog.chunk WHERE hypertable_id = htc_id AND NOT dropped UNION ALL SELECT htc + ); + END LOOP; +END +$$; diff --git a/sql/updates/2.13.0--2.12.2.sql b/sql/updates/2.13.0--2.12.2.sql new file mode 100644 index 00000000000..123d39f252c --- /dev/null +++ b/sql/updates/2.13.0--2.12.2.sql @@ -0,0 +1,404 @@ +-- API changes related to hypertable generalization +DROP FUNCTION IF EXISTS @extschema@.add_dimension(regclass,dimension_info,boolean); +DROP FUNCTION IF EXISTS @extschema@.create_hypertable(regclass,dimension_info,boolean,boolean,boolean); +DROP FUNCTION IF EXISTS @extschema@.set_partitioning_interval(regclass,anyelement,name); +DROP FUNCTION IF EXISTS @extschema@.by_hash(name,integer,regproc); +DROP FUNCTION IF EXISTS @extschema@.by_range(name,anyelement,regproc); + +DROP TYPE IF EXISTS _timescaledb_internal.dimension_info CASCADE; + +-- +-- Rebuild the catalog table `_timescaledb_catalog.chunk` +-- +-- We need to recreate the catalog from scratch because when we drop a column +-- Postgres marks `pg_attribute.attisdropped=TRUE` instead of removing it from +-- the `pg_catalog.pg_attribute` table. +-- +-- If we downgrade and upgrade the extension without rebuilding the catalog table it +-- will mess up `pg_attribute.attnum` and we will end up with issues when trying +-- to update data in those catalog tables. + +-- Recreate _timescaledb_catalog.chunk table -- +CREATE TABLE _timescaledb_internal.chunk_tmp +AS SELECT * from _timescaledb_catalog.chunk; + +CREATE TABLE _timescaledb_internal.tmp_chunk_seq_value AS +SELECT last_value, is_called FROM _timescaledb_catalog.chunk_id_seq; + +--drop foreign keys on chunk table +ALTER TABLE _timescaledb_catalog.chunk_constraint DROP CONSTRAINT +chunk_constraint_chunk_id_fkey; +ALTER TABLE _timescaledb_catalog.chunk_index DROP CONSTRAINT +chunk_index_chunk_id_fkey; +ALTER TABLE _timescaledb_catalog.chunk_data_node DROP CONSTRAINT +chunk_data_node_chunk_id_fkey; +ALTER TABLE _timescaledb_internal.bgw_policy_chunk_stats DROP CONSTRAINT +bgw_policy_chunk_stats_chunk_id_fkey; +ALTER TABLE _timescaledb_catalog.compression_chunk_size DROP CONSTRAINT +compression_chunk_size_chunk_id_fkey; +ALTER TABLE _timescaledb_catalog.compression_chunk_size DROP CONSTRAINT +compression_chunk_size_compressed_chunk_id_fkey; +ALTER TABLE _timescaledb_catalog.chunk_copy_operation DROP CONSTRAINT +chunk_copy_operation_chunk_id_fkey; + +--drop dependent views +DROP VIEW IF EXISTS timescaledb_information.hypertables; +DROP VIEW IF EXISTS timescaledb_information.chunks; +DROP VIEW IF EXISTS _timescaledb_internal.hypertable_chunk_local_size; +DROP VIEW IF EXISTS _timescaledb_internal.compressed_chunk_stats; +DROP VIEW IF EXISTS timescaledb_experimental.chunk_replication_status; + +ALTER EXTENSION timescaledb DROP TABLE _timescaledb_catalog.chunk; +ALTER EXTENSION timescaledb DROP SEQUENCE _timescaledb_catalog.chunk_id_seq; +DROP TABLE _timescaledb_catalog.chunk; + +CREATE SEQUENCE _timescaledb_catalog.chunk_id_seq MINVALUE 1; + +-- now create table without self referential foreign key +CREATE TABLE _timescaledb_catalog.chunk ( + id integer NOT NULL DEFAULT nextval('_timescaledb_catalog.chunk_id_seq'), + hypertable_id int NOT NULL, + schema_name name NOT NULL, + table_name name NOT NULL, + compressed_chunk_id integer , + dropped boolean NOT NULL DEFAULT FALSE, + status integer NOT NULL DEFAULT 0, + osm_chunk boolean NOT NULL DEFAULT FALSE, + -- table constraints + CONSTRAINT chunk_pkey PRIMARY KEY (id), + CONSTRAINT chunk_schema_name_table_name_key UNIQUE (schema_name, table_name) +); + +INSERT INTO _timescaledb_catalog.chunk +( id, hypertable_id, schema_name, table_name, + compressed_chunk_id, dropped, status, osm_chunk) +SELECT id, hypertable_id, schema_name, table_name, + compressed_chunk_id, dropped, status, osm_chunk +FROM _timescaledb_internal.chunk_tmp; + +--add indexes to the chunk table +CREATE INDEX chunk_hypertable_id_idx ON _timescaledb_catalog.chunk (hypertable_id); +CREATE INDEX chunk_compressed_chunk_id_idx ON _timescaledb_catalog.chunk (compressed_chunk_id); +CREATE INDEX chunk_osm_chunk_idx ON _timescaledb_catalog.chunk (osm_chunk, hypertable_id); + +ALTER SEQUENCE _timescaledb_catalog.chunk_id_seq OWNED BY _timescaledb_catalog.chunk.id; +SELECT setval('_timescaledb_catalog.chunk_id_seq', last_value, is_called) FROM _timescaledb_internal.tmp_chunk_seq_value; + +-- add self referential foreign key +ALTER TABLE _timescaledb_catalog.chunk ADD CONSTRAINT chunk_compressed_chunk_id_fkey FOREIGN KEY ( compressed_chunk_id ) + REFERENCES _timescaledb_catalog.chunk( id ); + +--add foreign key constraint +ALTER TABLE _timescaledb_catalog.chunk + ADD CONSTRAINT chunk_hypertable_id_fkey + FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable (id); + +SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.chunk', ''); +SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.chunk_id_seq', ''); + +--add the foreign key constraints +ALTER TABLE _timescaledb_catalog.chunk_constraint ADD CONSTRAINT +chunk_constraint_chunk_id_fkey FOREIGN KEY (chunk_id) REFERENCES _timescaledb_catalog.chunk(id); +ALTER TABLE _timescaledb_catalog.chunk_index ADD CONSTRAINT +chunk_index_chunk_id_fkey FOREIGN KEY (chunk_id) +REFERENCES _timescaledb_catalog.chunk(id) ON DELETE CASCADE; +ALTER TABLE _timescaledb_catalog.chunk_data_node ADD CONSTRAINT +chunk_data_node_chunk_id_fkey FOREIGN KEY (chunk_id) REFERENCES _timescaledb_catalog.chunk(id); +ALTER TABLE _timescaledb_internal.bgw_policy_chunk_stats ADD CONSTRAINT +bgw_policy_chunk_stats_chunk_id_fkey FOREIGN KEY (chunk_id) +REFERENCES _timescaledb_catalog.chunk(id) ON DELETE CASCADE; +ALTER TABLE _timescaledb_catalog.compression_chunk_size ADD CONSTRAINT +compression_chunk_size_chunk_id_fkey FOREIGN KEY (chunk_id) +REFERENCES _timescaledb_catalog.chunk(id) ON DELETE CASCADE; +ALTER TABLE _timescaledb_catalog.compression_chunk_size ADD CONSTRAINT +compression_chunk_size_compressed_chunk_id_fkey FOREIGN KEY (compressed_chunk_id) +REFERENCES _timescaledb_catalog.chunk(id) ON DELETE CASCADE; +ALTER TABLE _timescaledb_catalog.chunk_copy_operation ADD CONSTRAINT +chunk_copy_operation_chunk_id_fkey FOREIGN KEY (chunk_id) REFERENCES _timescaledb_catalog.chunk (id) ON DELETE CASCADE; + +--cleanup +DROP TABLE _timescaledb_internal.chunk_tmp; +DROP TABLE _timescaledb_internal.tmp_chunk_seq_value; + +GRANT SELECT ON _timescaledb_catalog.chunk_id_seq TO PUBLIC; +GRANT SELECT ON _timescaledb_catalog.chunk TO PUBLIC; + +-- end recreate _timescaledb_catalog.chunk table -- + + +-- +-- Rebuild the catalog table `_timescaledb_catalog.compression_chunk_size` to +-- remove column `numrows_frozen_immediately` +-- +CREATE TABLE _timescaledb_internal.compression_chunk_size_tmp + AS SELECT * from _timescaledb_catalog.compression_chunk_size; + +-- Drop depended views +-- We assume that '_timescaledb_internal.compressed_chunk_stats' was already dropped in this update +-- (see above) + +-- Drop table +ALTER EXTENSION timescaledb DROP TABLE _timescaledb_catalog.compression_chunk_size; +DROP TABLE _timescaledb_catalog.compression_chunk_size; + +CREATE TABLE _timescaledb_catalog.compression_chunk_size ( + chunk_id integer NOT NULL, + compressed_chunk_id integer NOT NULL, + uncompressed_heap_size bigint NOT NULL, + uncompressed_toast_size bigint NOT NULL, + uncompressed_index_size bigint NOT NULL, + compressed_heap_size bigint NOT NULL, + compressed_toast_size bigint NOT NULL, + compressed_index_size bigint NOT NULL, + numrows_pre_compression bigint, + numrows_post_compression bigint, + -- table constraints + CONSTRAINT compression_chunk_size_pkey PRIMARY KEY (chunk_id), + CONSTRAINT compression_chunk_size_chunk_id_fkey FOREIGN KEY (chunk_id) REFERENCES _timescaledb_catalog.chunk (id) ON DELETE CASCADE, + CONSTRAINT compression_chunk_size_compressed_chunk_id_fkey FOREIGN KEY (compressed_chunk_id) REFERENCES _timescaledb_catalog.chunk (id) ON DELETE CASCADE +); + +INSERT INTO _timescaledb_catalog.compression_chunk_size +(chunk_id, compressed_chunk_id, uncompressed_heap_size, uncompressed_toast_size, + uncompressed_index_size, compressed_heap_size, compressed_toast_size, + compressed_index_size, numrows_pre_compression, numrows_post_compression) +SELECT chunk_id, compressed_chunk_id, uncompressed_heap_size, uncompressed_toast_size, + uncompressed_index_size, compressed_heap_size, compressed_toast_size, + compressed_index_size, numrows_pre_compression, numrows_post_compression +FROM _timescaledb_internal.compression_chunk_size_tmp; + +DROP TABLE _timescaledb_internal.compression_chunk_size_tmp; + +SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.compression_chunk_size', ''); + +GRANT SELECT ON _timescaledb_catalog.compression_chunk_size TO PUBLIC; + +-- End modify `_timescaledb_catalog.compression_chunk_size` + +DROP FUNCTION @extschema@.drop_chunks(REGCLASS, "any", "any", BOOL, "any", "any"); +CREATE FUNCTION @extschema@.drop_chunks( + relation REGCLASS, + older_than "any" = NULL, + newer_than "any" = NULL, + verbose BOOLEAN = FALSE + ) RETURNS SETOF TEXT AS '@MODULE_PATHNAME@', 'ts_chunk_drop_chunks' + LANGUAGE C VOLATILE PARALLEL UNSAFE; + +DROP FUNCTION @extschema@.show_chunks(REGCLASS, "any", "any", "any", "any"); +CREATE FUNCTION @extschema@.show_chunks( + relation REGCLASS, + older_than "any" = NULL, + newer_than "any" = NULL + ) RETURNS SETOF REGCLASS AS '@MODULE_PATHNAME@', 'ts_chunk_show_chunks' + LANGUAGE C STABLE PARALLEL SAFE; + +DROP PROCEDURE IF EXISTS _timescaledb_functions.repair_relation_acls(); +DROP FUNCTION IF EXISTS _timescaledb_functions.makeaclitem(regrole, regrole, text, bool); + +DROP FUNCTION IF EXISTS _timescaledb_functions.cagg_validate_query(TEXT); + +DROP FUNCTION @extschema@.add_retention_policy(REGCLASS, "any", BOOL, INTERVAL, TIMESTAMPTZ, TEXT, INTERVAL); +CREATE FUNCTION @extschema@.add_retention_policy( + relation REGCLASS, + drop_after "any", + if_not_exists BOOL = false, + schedule_interval INTERVAL = NULL, + initial_start TIMESTAMPTZ = NULL, + timezone TEXT = NULL +) +RETURNS INTEGER AS '@MODULE_PATHNAME@', 'ts_policy_retention_add' +LANGUAGE C VOLATILE; + +DROP FUNCTION @extschema@.add_compression_policy(REGCLASS, "any", BOOL, INTERVAL, TIMESTAMPTZ, TEXT, INTERVAL); +CREATE FUNCTION @extschema@.add_compression_policy( + hypertable REGCLASS, + compress_after "any", + if_not_exists BOOL = false, + schedule_interval INTERVAL = NULL, + initial_start TIMESTAMPTZ = NULL, + timezone TEXT = NULL +) +RETURNS INTEGER +AS '@MODULE_PATHNAME@', 'ts_policy_compression_add' +LANGUAGE C VOLATILE; + +DROP PROCEDURE IF EXISTS _timescaledb_functions.policy_compression_execute(INTEGER, INTEGER, ANYELEMENT, INTEGER, BOOLEAN, BOOLEAN, BOOLEAN); +DROP PROCEDURE IF EXISTS _timescaledb_internal.policy_compression_execute(INTEGER, INTEGER, ANYELEMENT, INTEGER, BOOLEAN, BOOLEAN, BOOLEAN); +CREATE PROCEDURE +_timescaledb_functions.policy_compression_execute( + job_id INTEGER, + htid INTEGER, + lag ANYELEMENT, + maxchunks INTEGER, + verbose_log BOOLEAN, + recompress_enabled BOOLEAN) +AS $$ +DECLARE + htoid REGCLASS; + chunk_rec RECORD; + numchunks INTEGER := 1; + _message text; + _detail text; + -- chunk status bits: + bit_compressed int := 1; + bit_compressed_unordered int := 2; + bit_frozen int := 4; + bit_compressed_partial int := 8; +BEGIN + + -- procedures with SET clause cannot execute transaction + -- control so we adjust search_path in procedure body + SET LOCAL search_path TO pg_catalog, pg_temp; + + SELECT format('%I.%I', schema_name, table_name) INTO htoid + FROM _timescaledb_catalog.hypertable + WHERE id = htid; + + -- for the integer cases, we have to compute the lag w.r.t + -- the integer_now function and then pass on to show_chunks + IF pg_typeof(lag) IN ('BIGINT'::regtype, 'INTEGER'::regtype, 'SMALLINT'::regtype) THEN + lag := _timescaledb_functions.subtract_integer_from_now(htoid, lag::BIGINT); + END IF; + + FOR chunk_rec IN + SELECT + show.oid, ch.schema_name, ch.table_name, ch.status + FROM + @extschema@.show_chunks(htoid, older_than => lag) AS show(oid) + INNER JOIN pg_class pgc ON pgc.oid = show.oid + INNER JOIN pg_namespace pgns ON pgc.relnamespace = pgns.oid + INNER JOIN _timescaledb_catalog.chunk ch ON ch.table_name = pgc.relname AND ch.schema_name = pgns.nspname AND ch.hypertable_id = htid + WHERE + ch.dropped IS FALSE + AND ( + ch.status = 0 OR + ( + ch.status & bit_compressed > 0 AND ( + ch.status & bit_compressed_unordered > 0 OR + ch.status & bit_compressed_partial > 0 + ) + ) + ) + LOOP + IF chunk_rec.status = 0 THEN + BEGIN + PERFORM @extschema@.compress_chunk( chunk_rec.oid ); + EXCEPTION WHEN OTHERS THEN + GET STACKED DIAGNOSTICS + _message = MESSAGE_TEXT, + _detail = PG_EXCEPTION_DETAIL; + RAISE WARNING 'compressing chunk "%" failed when compression policy is executed', chunk_rec.oid::regclass::text + USING DETAIL = format('Message: (%s), Detail: (%s).', _message, _detail), + ERRCODE = sqlstate; + END; + ELSIF + ( + chunk_rec.status & bit_compressed > 0 AND ( + chunk_rec.status & bit_compressed_unordered > 0 OR + chunk_rec.status & bit_compressed_partial > 0 + ) + ) AND recompress_enabled IS TRUE THEN + BEGIN + PERFORM @extschema@.decompress_chunk(chunk_rec.oid, if_compressed => true); + EXCEPTION WHEN OTHERS THEN + RAISE WARNING 'decompressing chunk "%" failed when compression policy is executed', chunk_rec.oid::regclass::text + USING DETAIL = format('Message: (%s), Detail: (%s).', _message, _detail), + ERRCODE = sqlstate; + END; + -- SET LOCAL is only active until end of transaction. + -- While we could use SET at the start of the function we do not + -- want to bleed out search_path to caller, so we do SET LOCAL + -- again after COMMIT + BEGIN + PERFORM @extschema@.compress_chunk(chunk_rec.oid); + EXCEPTION WHEN OTHERS THEN + RAISE WARNING 'compressing chunk "%" failed when compression policy is executed', chunk_rec.oid::regclass::text + USING DETAIL = format('Message: (%s), Detail: (%s).', _message, _detail), + ERRCODE = sqlstate; + END; + END IF; + COMMIT; + -- SET LOCAL is only active until end of transaction. + -- While we could use SET at the start of the function we do not + -- want to bleed out search_path to caller, so we do SET LOCAL + -- again after COMMIT + SET LOCAL search_path TO pg_catalog, pg_temp; + IF verbose_log THEN + RAISE LOG 'job % completed processing chunk %.%', job_id, chunk_rec.schema_name, chunk_rec.table_name; + END IF; + numchunks := numchunks + 1; + IF maxchunks > 0 AND numchunks >= maxchunks THEN + EXIT; + END IF; + END LOOP; +END; +$$ LANGUAGE PLPGSQL; + +DROP FUNCTION _timescaledb_functions.chunk_constraint_add_table_constraint( + chunk_constraint_row _timescaledb_catalog.chunk_constraint +); + +CREATE FUNCTION _timescaledb_functions.chunk_constraint_add_table_constraint( + chunk_constraint_row _timescaledb_catalog.chunk_constraint +) + RETURNS VOID LANGUAGE PLPGSQL AS +$BODY$ +DECLARE + chunk_row _timescaledb_catalog.chunk; + hypertable_row _timescaledb_catalog.hypertable; + constraint_oid OID; + constraint_type CHAR; + check_sql TEXT; + def TEXT; + indx_tablespace NAME; + tablespace_def TEXT; +BEGIN + SELECT * INTO STRICT chunk_row FROM _timescaledb_catalog.chunk c WHERE c.id = chunk_constraint_row.chunk_id; + SELECT * INTO STRICT hypertable_row FROM _timescaledb_catalog.hypertable h WHERE h.id = chunk_row.hypertable_id; + + IF chunk_constraint_row.dimension_slice_id IS NOT NULL THEN + RAISE 'cannot create dimension constraint %', chunk_constraint_row; + ELSIF chunk_constraint_row.hypertable_constraint_name IS NOT NULL THEN + + SELECT oid, contype INTO STRICT constraint_oid, constraint_type FROM pg_constraint + WHERE conname=chunk_constraint_row.hypertable_constraint_name AND + conrelid = format('%I.%I', hypertable_row.schema_name, hypertable_row.table_name)::regclass::oid; + + IF constraint_type IN ('p','u') THEN + -- since primary keys and unique constraints are backed by an index + -- they might have an index tablespace assigned + -- the tablspace is not part of the constraint definition so + -- we have to append it explicitly to preserve it + SELECT T.spcname INTO indx_tablespace + FROM pg_constraint C, pg_class I, pg_tablespace T + WHERE C.oid = constraint_oid AND C.contype IN ('p', 'u') AND I.oid = C.conindid AND I.reltablespace = T.oid; + + def := pg_get_constraintdef(constraint_oid); + + IF indx_tablespace IS NOT NULL THEN + def := format('%s USING INDEX TABLESPACE %I', def, indx_tablespace); + END IF; + + ELSIF constraint_type = 't' THEN + -- constraint triggers are copied separately with normal triggers + def := NULL; + ELSE + def := pg_get_constraintdef(constraint_oid); + END IF; + + ELSE + RAISE 'unknown constraint type'; + END IF; + + IF def IS NOT NULL THEN + -- to allow for custom types with operators outside of pg_catalog + -- we set search_path to @extschema@ + SET LOCAL search_path TO @extschema@, pg_temp; + EXECUTE pg_catalog.format( + $$ ALTER TABLE %I.%I ADD CONSTRAINT %I %s $$, + chunk_row.schema_name, chunk_row.table_name, chunk_constraint_row.constraint_name, def + ); + + END IF; +END +$BODY$ SET search_path TO pg_catalog, pg_temp; diff --git a/sql/updates/latest-dev.sql b/sql/updates/latest-dev.sql index a99b15de009..e69de29bb2d 100644 --- a/sql/updates/latest-dev.sql +++ b/sql/updates/latest-dev.sql @@ -1,444 +0,0 @@ -CREATE TYPE _timescaledb_internal.dimension_info; - -CREATE OR REPLACE FUNCTION _timescaledb_functions.dimension_info_in(cstring) - RETURNS _timescaledb_internal.dimension_info - LANGUAGE C STRICT IMMUTABLE - AS '@MODULE_PATHNAME@', 'ts_dimension_info_in'; - -CREATE OR REPLACE FUNCTION _timescaledb_functions.dimension_info_out(_timescaledb_internal.dimension_info) - RETURNS cstring - LANGUAGE C STRICT IMMUTABLE - AS '@MODULE_PATHNAME@', 'ts_dimension_info_out'; - -CREATE TYPE _timescaledb_internal.dimension_info ( - INPUT = _timescaledb_functions.dimension_info_in, - OUTPUT = _timescaledb_functions.dimension_info_out, - INTERNALLENGTH = VARIABLE -); - -CREATE FUNCTION @extschema@.create_hypertable( - relation REGCLASS, - dimension _timescaledb_internal.dimension_info, - create_default_indexes BOOLEAN = TRUE, - if_not_exists BOOLEAN = FALSE, - migrate_data BOOLEAN = FALSE -) RETURNS TABLE(hypertable_id INT, created BOOL) AS '@MODULE_PATHNAME@', 'ts_hypertable_create_general' LANGUAGE C VOLATILE; - -CREATE FUNCTION @extschema@.add_dimension( - hypertable REGCLASS, - dimension _timescaledb_internal.dimension_info, - if_not_exists BOOLEAN = FALSE -) RETURNS TABLE(dimension_id INT, created BOOL) -AS '@MODULE_PATHNAME@', 'ts_dimension_add_general' LANGUAGE C VOLATILE; - -CREATE FUNCTION @extschema@.set_partitioning_interval( - hypertable REGCLASS, - partition_interval ANYELEMENT, - dimension_name NAME = NULL -) RETURNS VOID AS '@MODULE_PATHNAME@', 'ts_dimension_set_interval' LANGUAGE C VOLATILE; - -CREATE FUNCTION @extschema@.by_hash(column_name NAME, number_partitions INTEGER, - partition_func regproc = NULL) - RETURNS _timescaledb_internal.dimension_info LANGUAGE C - AS '@MODULE_PATHNAME@', 'ts_hash_dimension'; - -CREATE FUNCTION @extschema@.by_range(column_name NAME, - partition_interval ANYELEMENT = NULL::bigint, - partition_func regproc = NULL) - RETURNS _timescaledb_internal.dimension_info LANGUAGE C - AS '@MODULE_PATHNAME@', 'ts_range_dimension'; - --- --- Rebuild the catalog table `_timescaledb_catalog.chunk` to --- add new column `creation_time` --- -CREATE TABLE _timescaledb_internal.chunk_tmp -AS SELECT * from _timescaledb_catalog.chunk; - -CREATE TABLE _timescaledb_internal.tmp_chunk_seq_value AS -SELECT last_value, is_called FROM _timescaledb_catalog.chunk_id_seq; - ---drop foreign keys on chunk table -ALTER TABLE _timescaledb_catalog.chunk_constraint DROP CONSTRAINT -chunk_constraint_chunk_id_fkey; -ALTER TABLE _timescaledb_catalog.chunk_index DROP CONSTRAINT -chunk_index_chunk_id_fkey; -ALTER TABLE _timescaledb_catalog.chunk_data_node DROP CONSTRAINT -chunk_data_node_chunk_id_fkey; -ALTER TABLE _timescaledb_internal.bgw_policy_chunk_stats DROP CONSTRAINT -bgw_policy_chunk_stats_chunk_id_fkey; -ALTER TABLE _timescaledb_catalog.compression_chunk_size DROP CONSTRAINT -compression_chunk_size_chunk_id_fkey; -ALTER TABLE _timescaledb_catalog.compression_chunk_size DROP CONSTRAINT -compression_chunk_size_compressed_chunk_id_fkey; -ALTER TABLE _timescaledb_catalog.chunk_copy_operation DROP CONSTRAINT -chunk_copy_operation_chunk_id_fkey; - ---drop dependent views -DROP VIEW IF EXISTS timescaledb_information.hypertables; -DROP VIEW IF EXISTS timescaledb_information.chunks; -DROP VIEW IF EXISTS _timescaledb_internal.hypertable_chunk_local_size; -DROP VIEW IF EXISTS _timescaledb_internal.compressed_chunk_stats; -DROP VIEW IF EXISTS timescaledb_experimental.chunk_replication_status; - -ALTER EXTENSION timescaledb DROP TABLE _timescaledb_catalog.chunk; -ALTER EXTENSION timescaledb DROP SEQUENCE _timescaledb_catalog.chunk_id_seq; -DROP TABLE _timescaledb_catalog.chunk; - -CREATE SEQUENCE _timescaledb_catalog.chunk_id_seq MINVALUE 1; - --- now create table without self referential foreign key -CREATE TABLE _timescaledb_catalog.chunk ( - id integer NOT NULL DEFAULT nextval('_timescaledb_catalog.chunk_id_seq'), - hypertable_id int NOT NULL, - schema_name name NOT NULL, - table_name name NOT NULL, - compressed_chunk_id integer , - dropped boolean NOT NULL DEFAULT FALSE, - status integer NOT NULL DEFAULT 0, - osm_chunk boolean NOT NULL DEFAULT FALSE, - creation_time timestamptz, - -- table constraints - CONSTRAINT chunk_pkey PRIMARY KEY (id), - CONSTRAINT chunk_schema_name_table_name_key UNIQUE (schema_name, table_name) -); - -INSERT INTO _timescaledb_catalog.chunk -( id, hypertable_id, schema_name, table_name, - compressed_chunk_id, dropped, status, osm_chunk) -SELECT id, hypertable_id, schema_name, table_name, - compressed_chunk_id, dropped, status, osm_chunk -FROM _timescaledb_internal.chunk_tmp; - --- update creation_time for chunks -UPDATE - _timescaledb_catalog.chunk c -SET - creation_time = (pg_catalog.pg_stat_file(pg_catalog.pg_relation_filepath(r.oid))).modification -FROM - pg_class r, pg_namespace n -WHERE - r.relnamespace = n.oid - AND r.relname = c.table_name - AND n.nspname = c.schema_name - AND r.relkind = 'r' - AND c.dropped IS FALSE; - --- Make sure that there are no record with empty creation time -UPDATE _timescaledb_catalog.chunk SET creation_time = now() WHERE creation_time IS NULL; - ---add indexes to the chunk table -CREATE INDEX chunk_hypertable_id_idx ON _timescaledb_catalog.chunk (hypertable_id); -CREATE INDEX chunk_compressed_chunk_id_idx ON _timescaledb_catalog.chunk (compressed_chunk_id); -CREATE INDEX chunk_osm_chunk_idx ON _timescaledb_catalog.chunk (osm_chunk, hypertable_id); -CREATE INDEX chunk_hypertable_id_creation_time_idx ON _timescaledb_catalog.chunk(hypertable_id, creation_time); - -ALTER SEQUENCE _timescaledb_catalog.chunk_id_seq OWNED BY _timescaledb_catalog.chunk.id; -SELECT setval('_timescaledb_catalog.chunk_id_seq', last_value, is_called) FROM _timescaledb_internal.tmp_chunk_seq_value; - --- add self referential foreign key -ALTER TABLE _timescaledb_catalog.chunk ADD CONSTRAINT chunk_compressed_chunk_id_fkey FOREIGN KEY ( compressed_chunk_id ) - REFERENCES _timescaledb_catalog.chunk( id ); - ---add foreign key constraint -ALTER TABLE _timescaledb_catalog.chunk - ADD CONSTRAINT chunk_hypertable_id_fkey - FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable (id); - -SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.chunk', ''); -SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.chunk_id_seq', ''); - --- Add non-null constraint -ALTER TABLE _timescaledb_catalog.chunk - ALTER COLUMN creation_time SET NOT NULL; - ---add the foreign key constraints -ALTER TABLE _timescaledb_catalog.chunk_constraint ADD CONSTRAINT -chunk_constraint_chunk_id_fkey FOREIGN KEY (chunk_id) REFERENCES _timescaledb_catalog.chunk(id); -ALTER TABLE _timescaledb_catalog.chunk_index ADD CONSTRAINT -chunk_index_chunk_id_fkey FOREIGN KEY (chunk_id) -REFERENCES _timescaledb_catalog.chunk(id) ON DELETE CASCADE; -ALTER TABLE _timescaledb_catalog.chunk_data_node ADD CONSTRAINT -chunk_data_node_chunk_id_fkey FOREIGN KEY (chunk_id) REFERENCES _timescaledb_catalog.chunk(id); -ALTER TABLE _timescaledb_internal.bgw_policy_chunk_stats ADD CONSTRAINT -bgw_policy_chunk_stats_chunk_id_fkey FOREIGN KEY (chunk_id) -REFERENCES _timescaledb_catalog.chunk(id) ON DELETE CASCADE; -ALTER TABLE _timescaledb_catalog.compression_chunk_size ADD CONSTRAINT -compression_chunk_size_chunk_id_fkey FOREIGN KEY (chunk_id) -REFERENCES _timescaledb_catalog.chunk(id) ON DELETE CASCADE; -ALTER TABLE _timescaledb_catalog.compression_chunk_size ADD CONSTRAINT -compression_chunk_size_compressed_chunk_id_fkey FOREIGN KEY (compressed_chunk_id) -REFERENCES _timescaledb_catalog.chunk(id) ON DELETE CASCADE; -ALTER TABLE _timescaledb_catalog.chunk_copy_operation ADD CONSTRAINT -chunk_copy_operation_chunk_id_fkey FOREIGN KEY (chunk_id) REFERENCES _timescaledb_catalog.chunk (id) ON DELETE CASCADE; - ---cleanup -DROP TABLE _timescaledb_internal.chunk_tmp; -DROP TABLE _timescaledb_internal.tmp_chunk_seq_value; - -GRANT SELECT ON _timescaledb_catalog.chunk_id_seq TO PUBLIC; -GRANT SELECT ON _timescaledb_catalog.chunk TO PUBLIC; --- end recreate _timescaledb_catalog.chunk table -- - --- --- Rebuild the catalog table `_timescaledb_catalog.compression_chunk_size` to --- add new column `numrows_frozen_immediately` --- -CREATE TABLE _timescaledb_internal.compression_chunk_size_tmp - AS SELECT * from _timescaledb_catalog.compression_chunk_size; - --- Drop depended views --- We assume that '_timescaledb_internal.compressed_chunk_stats' was already dropped in this update --- (see above) - --- Drop table -ALTER EXTENSION timescaledb DROP TABLE _timescaledb_catalog.compression_chunk_size; -DROP TABLE _timescaledb_catalog.compression_chunk_size; - -CREATE TABLE _timescaledb_catalog.compression_chunk_size ( - chunk_id integer NOT NULL, - compressed_chunk_id integer NOT NULL, - uncompressed_heap_size bigint NOT NULL, - uncompressed_toast_size bigint NOT NULL, - uncompressed_index_size bigint NOT NULL, - compressed_heap_size bigint NOT NULL, - compressed_toast_size bigint NOT NULL, - compressed_index_size bigint NOT NULL, - numrows_pre_compression bigint, - numrows_post_compression bigint, - numrows_frozen_immediately bigint, - -- table constraints - CONSTRAINT compression_chunk_size_pkey PRIMARY KEY (chunk_id), - CONSTRAINT compression_chunk_size_chunk_id_fkey FOREIGN KEY (chunk_id) REFERENCES _timescaledb_catalog.chunk (id) ON DELETE CASCADE, - CONSTRAINT compression_chunk_size_compressed_chunk_id_fkey FOREIGN KEY (compressed_chunk_id) REFERENCES _timescaledb_catalog.chunk (id) ON DELETE CASCADE -); - -INSERT INTO _timescaledb_catalog.compression_chunk_size -(chunk_id, compressed_chunk_id, uncompressed_heap_size, uncompressed_toast_size, - uncompressed_index_size, compressed_heap_size, compressed_toast_size, - compressed_index_size, numrows_pre_compression, numrows_post_compression, numrows_frozen_immediately) -SELECT chunk_id, compressed_chunk_id, uncompressed_heap_size, uncompressed_toast_size, - uncompressed_index_size, compressed_heap_size, compressed_toast_size, - compressed_index_size, numrows_pre_compression, numrows_post_compression, 0 -FROM _timescaledb_internal.compression_chunk_size_tmp; - -DROP TABLE _timescaledb_internal.compression_chunk_size_tmp; - -SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.compression_chunk_size', ''); - -GRANT SELECT ON _timescaledb_catalog.compression_chunk_size TO PUBLIC; - --- End modify `_timescaledb_catalog.compression_chunk_size` - -DROP FUNCTION @extschema@.drop_chunks(REGCLASS, "any", "any", BOOL); -CREATE FUNCTION @extschema@.drop_chunks( - relation REGCLASS, - older_than "any" = NULL, - newer_than "any" = NULL, - verbose BOOLEAN = FALSE, - created_before "any" = NULL, - created_after "any" = NULL - ) RETURNS SETOF TEXT AS '@MODULE_PATHNAME@', 'ts_chunk_drop_chunks' - LANGUAGE C VOLATILE PARALLEL UNSAFE; - -DROP FUNCTION @extschema@.show_chunks(REGCLASS, "any", "any"); -CREATE FUNCTION @extschema@.show_chunks( - relation REGCLASS, - older_than "any" = NULL, - newer_than "any" = NULL, - created_before "any" = NULL, - created_after "any" = NULL - ) RETURNS SETOF REGCLASS AS '@MODULE_PATHNAME@', 'ts_chunk_show_chunks' - LANGUAGE C STABLE PARALLEL SAFE; - -DROP FUNCTION @extschema@.add_retention_policy(REGCLASS, "any", BOOL, INTERVAL, TIMESTAMPTZ, TEXT); -CREATE FUNCTION @extschema@.add_retention_policy( - relation REGCLASS, - drop_after "any" = NULL, - if_not_exists BOOL = false, - schedule_interval INTERVAL = NULL, - initial_start TIMESTAMPTZ = NULL, - timezone TEXT = NULL, - drop_created_before INTERVAL = NULL -) -RETURNS INTEGER AS '@MODULE_PATHNAME@', 'ts_policy_retention_add' -LANGUAGE C VOLATILE; - -DROP FUNCTION @extschema@.add_compression_policy(REGCLASS, "any", BOOL, INTERVAL, TIMESTAMPTZ, TEXT); -CREATE FUNCTION @extschema@.add_compression_policy( - hypertable REGCLASS, - compress_after "any" = NULL, - if_not_exists BOOL = false, - schedule_interval INTERVAL = NULL, - initial_start TIMESTAMPTZ = NULL, - timezone TEXT = NULL, - compress_created_before INTERVAL = NULL -) -RETURNS INTEGER -AS '@MODULE_PATHNAME@', 'ts_policy_compression_add' -LANGUAGE C VOLATILE; - -DROP PROCEDURE IF EXISTS _timescaledb_functions.policy_compression_execute(INTEGER, INTEGER, ANYELEMENT, INTEGER, BOOLEAN, BOOLEAN); -DROP PROCEDURE IF EXISTS _timescaledb_internal.policy_compression_execute(INTEGER, INTEGER, ANYELEMENT, INTEGER, BOOLEAN, BOOLEAN); -CREATE PROCEDURE -_timescaledb_functions.policy_compression_execute( - job_id INTEGER, - htid INTEGER, - lag ANYELEMENT, - maxchunks INTEGER, - verbose_log BOOLEAN, - recompress_enabled BOOLEAN, - use_creation_time BOOLEAN) -AS $$ -DECLARE - htoid REGCLASS; - chunk_rec RECORD; - numchunks INTEGER := 1; - _message text; - _detail text; - -- chunk status bits: - bit_compressed int := 1; - bit_compressed_unordered int := 2; - bit_frozen int := 4; - bit_compressed_partial int := 8; - creation_lag INTERVAL := NULL; -BEGIN - - -- procedures with SET clause cannot execute transaction - -- control so we adjust search_path in procedure body - SET LOCAL search_path TO pg_catalog, pg_temp; - - SELECT format('%I.%I', schema_name, table_name) INTO htoid - FROM _timescaledb_catalog.hypertable - WHERE id = htid; - - -- for the integer cases, we have to compute the lag w.r.t - -- the integer_now function and then pass on to show_chunks - IF pg_typeof(lag) IN ('BIGINT'::regtype, 'INTEGER'::regtype, 'SMALLINT'::regtype) THEN - -- cannot have use_creation_time set with this - IF use_creation_time IS TRUE THEN - RAISE EXCEPTION 'job % cannot use creation time with integer_now function', job_id; - END IF; - lag := _timescaledb_functions.subtract_integer_from_now(htoid, lag::BIGINT); - END IF; - - -- if use_creation_time has been specified then the lag needs to be used with the - -- "compress_created_before" argument. Otherwise the usual "older_than" argument - -- is good enough - IF use_creation_time IS TRUE THEN - creation_lag := lag; - lag := NULL; - END IF; - - FOR chunk_rec IN - SELECT - show.oid, ch.schema_name, ch.table_name, ch.status - FROM - @extschema@.show_chunks(htoid, older_than => lag, created_before => creation_lag) AS show(oid) - INNER JOIN pg_class pgc ON pgc.oid = show.oid - INNER JOIN pg_namespace pgns ON pgc.relnamespace = pgns.oid - INNER JOIN _timescaledb_catalog.chunk ch ON ch.table_name = pgc.relname AND ch.schema_name = pgns.nspname AND ch.hypertable_id = htid - WHERE - ch.dropped IS FALSE - AND ( - ch.status = 0 OR - ( - ch.status & bit_compressed > 0 AND ( - ch.status & bit_compressed_unordered > 0 OR - ch.status & bit_compressed_partial > 0 - ) - ) - ) - LOOP - IF chunk_rec.status = 0 THEN - BEGIN - PERFORM @extschema@.compress_chunk( chunk_rec.oid ); - EXCEPTION WHEN OTHERS THEN - GET STACKED DIAGNOSTICS - _message = MESSAGE_TEXT, - _detail = PG_EXCEPTION_DETAIL; - RAISE WARNING 'compressing chunk "%" failed when compression policy is executed', chunk_rec.oid::regclass::text - USING DETAIL = format('Message: (%s), Detail: (%s).', _message, _detail), - ERRCODE = sqlstate; - END; - ELSIF - ( - chunk_rec.status & bit_compressed > 0 AND ( - chunk_rec.status & bit_compressed_unordered > 0 OR - chunk_rec.status & bit_compressed_partial > 0 - ) - ) AND recompress_enabled IS TRUE THEN - BEGIN - PERFORM @extschema@.decompress_chunk(chunk_rec.oid, if_compressed => true); - EXCEPTION WHEN OTHERS THEN - RAISE WARNING 'decompressing chunk "%" failed when compression policy is executed', chunk_rec.oid::regclass::text - USING DETAIL = format('Message: (%s), Detail: (%s).', _message, _detail), - ERRCODE = sqlstate; - END; - -- SET LOCAL is only active until end of transaction. - -- While we could use SET at the start of the function we do not - -- want to bleed out search_path to caller, so we do SET LOCAL - -- again after COMMIT - BEGIN - PERFORM @extschema@.compress_chunk(chunk_rec.oid); - EXCEPTION WHEN OTHERS THEN - RAISE WARNING 'compressing chunk "%" failed when compression policy is executed', chunk_rec.oid::regclass::text - USING DETAIL = format('Message: (%s), Detail: (%s).', _message, _detail), - ERRCODE = sqlstate; - END; - END IF; - COMMIT; - -- SET LOCAL is only active until end of transaction. - -- While we could use SET at the start of the function we do not - -- want to bleed out search_path to caller, so we do SET LOCAL - -- again after COMMIT - SET LOCAL search_path TO pg_catalog, pg_temp; - IF verbose_log THEN - RAISE LOG 'job % completed processing chunk %.%', job_id, chunk_rec.schema_name, chunk_rec.table_name; - END IF; - numchunks := numchunks + 1; - IF maxchunks > 0 AND numchunks >= maxchunks THEN - EXIT; - END IF; - END LOOP; -END; -$$ LANGUAGE PLPGSQL; - --- fix atttypmod and attcollation for segmentby columns -DO $$ -DECLARE - htc_id INTEGER; - htc REGCLASS; - _attname NAME; - _atttypmod INTEGER; - _attcollation OID; -BEGIN - -- find any segmentby columns where typmod and collation in - -- the compressed hypertable does not match the uncompressed - -- hypertable values - FOR htc_id, htc, _attname, _atttypmod, _attcollation IN - SELECT cat.htc_id, cat.htc, pga.attname, ht_mod, ht_coll - FROM pg_attribute pga - INNER JOIN - ( - SELECT - htc.id AS htc_id, - format('%I.%I',htc.schema_name,htc.table_name) AS htc, - att_ht.atttypmod AS ht_mod, - att_ht.attcollation AS ht_coll, - c.attname - FROM _timescaledb_catalog.hypertable_compression c - INNER JOIN _timescaledb_catalog.hypertable ht ON ht.id=c.hypertable_id - INNER JOIN pg_attribute att_ht ON att_ht.attname = c.attname AND att_ht.attrelid = format('%I.%I',ht.schema_name,ht.table_name)::regclass - INNER JOIN _timescaledb_catalog.hypertable htc ON htc.id=ht.compressed_hypertable_id - WHERE c.segmentby_column_index > 0 - ) cat ON cat.htc::regclass = pga.attrelid AND cat.attname = pga.attname - WHERE pga.atttypmod <> ht_mod OR pga.attcollation <> ht_coll - LOOP - -- fix typmod and collation for the compressed hypertable and all compressed chunks - UPDATE pg_attribute SET atttypmod = _atttypmod, attcollation = _attcollation WHERE attname = _attname AND attrelid IN ( - SELECT format('%I.%I',schema_name,table_name)::regclass from _timescaledb_catalog.chunk WHERE hypertable_id = htc_id AND NOT dropped UNION ALL SELECT htc - ); - END LOOP; -END -$$; diff --git a/sql/updates/reverse-dev.sql b/sql/updates/reverse-dev.sql index 123d39f252c..8b137891791 100644 --- a/sql/updates/reverse-dev.sql +++ b/sql/updates/reverse-dev.sql @@ -1,404 +1 @@ --- API changes related to hypertable generalization -DROP FUNCTION IF EXISTS @extschema@.add_dimension(regclass,dimension_info,boolean); -DROP FUNCTION IF EXISTS @extschema@.create_hypertable(regclass,dimension_info,boolean,boolean,boolean); -DROP FUNCTION IF EXISTS @extschema@.set_partitioning_interval(regclass,anyelement,name); -DROP FUNCTION IF EXISTS @extschema@.by_hash(name,integer,regproc); -DROP FUNCTION IF EXISTS @extschema@.by_range(name,anyelement,regproc); -DROP TYPE IF EXISTS _timescaledb_internal.dimension_info CASCADE; - --- --- Rebuild the catalog table `_timescaledb_catalog.chunk` --- --- We need to recreate the catalog from scratch because when we drop a column --- Postgres marks `pg_attribute.attisdropped=TRUE` instead of removing it from --- the `pg_catalog.pg_attribute` table. --- --- If we downgrade and upgrade the extension without rebuilding the catalog table it --- will mess up `pg_attribute.attnum` and we will end up with issues when trying --- to update data in those catalog tables. - --- Recreate _timescaledb_catalog.chunk table -- -CREATE TABLE _timescaledb_internal.chunk_tmp -AS SELECT * from _timescaledb_catalog.chunk; - -CREATE TABLE _timescaledb_internal.tmp_chunk_seq_value AS -SELECT last_value, is_called FROM _timescaledb_catalog.chunk_id_seq; - ---drop foreign keys on chunk table -ALTER TABLE _timescaledb_catalog.chunk_constraint DROP CONSTRAINT -chunk_constraint_chunk_id_fkey; -ALTER TABLE _timescaledb_catalog.chunk_index DROP CONSTRAINT -chunk_index_chunk_id_fkey; -ALTER TABLE _timescaledb_catalog.chunk_data_node DROP CONSTRAINT -chunk_data_node_chunk_id_fkey; -ALTER TABLE _timescaledb_internal.bgw_policy_chunk_stats DROP CONSTRAINT -bgw_policy_chunk_stats_chunk_id_fkey; -ALTER TABLE _timescaledb_catalog.compression_chunk_size DROP CONSTRAINT -compression_chunk_size_chunk_id_fkey; -ALTER TABLE _timescaledb_catalog.compression_chunk_size DROP CONSTRAINT -compression_chunk_size_compressed_chunk_id_fkey; -ALTER TABLE _timescaledb_catalog.chunk_copy_operation DROP CONSTRAINT -chunk_copy_operation_chunk_id_fkey; - ---drop dependent views -DROP VIEW IF EXISTS timescaledb_information.hypertables; -DROP VIEW IF EXISTS timescaledb_information.chunks; -DROP VIEW IF EXISTS _timescaledb_internal.hypertable_chunk_local_size; -DROP VIEW IF EXISTS _timescaledb_internal.compressed_chunk_stats; -DROP VIEW IF EXISTS timescaledb_experimental.chunk_replication_status; - -ALTER EXTENSION timescaledb DROP TABLE _timescaledb_catalog.chunk; -ALTER EXTENSION timescaledb DROP SEQUENCE _timescaledb_catalog.chunk_id_seq; -DROP TABLE _timescaledb_catalog.chunk; - -CREATE SEQUENCE _timescaledb_catalog.chunk_id_seq MINVALUE 1; - --- now create table without self referential foreign key -CREATE TABLE _timescaledb_catalog.chunk ( - id integer NOT NULL DEFAULT nextval('_timescaledb_catalog.chunk_id_seq'), - hypertable_id int NOT NULL, - schema_name name NOT NULL, - table_name name NOT NULL, - compressed_chunk_id integer , - dropped boolean NOT NULL DEFAULT FALSE, - status integer NOT NULL DEFAULT 0, - osm_chunk boolean NOT NULL DEFAULT FALSE, - -- table constraints - CONSTRAINT chunk_pkey PRIMARY KEY (id), - CONSTRAINT chunk_schema_name_table_name_key UNIQUE (schema_name, table_name) -); - -INSERT INTO _timescaledb_catalog.chunk -( id, hypertable_id, schema_name, table_name, - compressed_chunk_id, dropped, status, osm_chunk) -SELECT id, hypertable_id, schema_name, table_name, - compressed_chunk_id, dropped, status, osm_chunk -FROM _timescaledb_internal.chunk_tmp; - ---add indexes to the chunk table -CREATE INDEX chunk_hypertable_id_idx ON _timescaledb_catalog.chunk (hypertable_id); -CREATE INDEX chunk_compressed_chunk_id_idx ON _timescaledb_catalog.chunk (compressed_chunk_id); -CREATE INDEX chunk_osm_chunk_idx ON _timescaledb_catalog.chunk (osm_chunk, hypertable_id); - -ALTER SEQUENCE _timescaledb_catalog.chunk_id_seq OWNED BY _timescaledb_catalog.chunk.id; -SELECT setval('_timescaledb_catalog.chunk_id_seq', last_value, is_called) FROM _timescaledb_internal.tmp_chunk_seq_value; - --- add self referential foreign key -ALTER TABLE _timescaledb_catalog.chunk ADD CONSTRAINT chunk_compressed_chunk_id_fkey FOREIGN KEY ( compressed_chunk_id ) - REFERENCES _timescaledb_catalog.chunk( id ); - ---add foreign key constraint -ALTER TABLE _timescaledb_catalog.chunk - ADD CONSTRAINT chunk_hypertable_id_fkey - FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable (id); - -SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.chunk', ''); -SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.chunk_id_seq', ''); - ---add the foreign key constraints -ALTER TABLE _timescaledb_catalog.chunk_constraint ADD CONSTRAINT -chunk_constraint_chunk_id_fkey FOREIGN KEY (chunk_id) REFERENCES _timescaledb_catalog.chunk(id); -ALTER TABLE _timescaledb_catalog.chunk_index ADD CONSTRAINT -chunk_index_chunk_id_fkey FOREIGN KEY (chunk_id) -REFERENCES _timescaledb_catalog.chunk(id) ON DELETE CASCADE; -ALTER TABLE _timescaledb_catalog.chunk_data_node ADD CONSTRAINT -chunk_data_node_chunk_id_fkey FOREIGN KEY (chunk_id) REFERENCES _timescaledb_catalog.chunk(id); -ALTER TABLE _timescaledb_internal.bgw_policy_chunk_stats ADD CONSTRAINT -bgw_policy_chunk_stats_chunk_id_fkey FOREIGN KEY (chunk_id) -REFERENCES _timescaledb_catalog.chunk(id) ON DELETE CASCADE; -ALTER TABLE _timescaledb_catalog.compression_chunk_size ADD CONSTRAINT -compression_chunk_size_chunk_id_fkey FOREIGN KEY (chunk_id) -REFERENCES _timescaledb_catalog.chunk(id) ON DELETE CASCADE; -ALTER TABLE _timescaledb_catalog.compression_chunk_size ADD CONSTRAINT -compression_chunk_size_compressed_chunk_id_fkey FOREIGN KEY (compressed_chunk_id) -REFERENCES _timescaledb_catalog.chunk(id) ON DELETE CASCADE; -ALTER TABLE _timescaledb_catalog.chunk_copy_operation ADD CONSTRAINT -chunk_copy_operation_chunk_id_fkey FOREIGN KEY (chunk_id) REFERENCES _timescaledb_catalog.chunk (id) ON DELETE CASCADE; - ---cleanup -DROP TABLE _timescaledb_internal.chunk_tmp; -DROP TABLE _timescaledb_internal.tmp_chunk_seq_value; - -GRANT SELECT ON _timescaledb_catalog.chunk_id_seq TO PUBLIC; -GRANT SELECT ON _timescaledb_catalog.chunk TO PUBLIC; - --- end recreate _timescaledb_catalog.chunk table -- - - --- --- Rebuild the catalog table `_timescaledb_catalog.compression_chunk_size` to --- remove column `numrows_frozen_immediately` --- -CREATE TABLE _timescaledb_internal.compression_chunk_size_tmp - AS SELECT * from _timescaledb_catalog.compression_chunk_size; - --- Drop depended views --- We assume that '_timescaledb_internal.compressed_chunk_stats' was already dropped in this update --- (see above) - --- Drop table -ALTER EXTENSION timescaledb DROP TABLE _timescaledb_catalog.compression_chunk_size; -DROP TABLE _timescaledb_catalog.compression_chunk_size; - -CREATE TABLE _timescaledb_catalog.compression_chunk_size ( - chunk_id integer NOT NULL, - compressed_chunk_id integer NOT NULL, - uncompressed_heap_size bigint NOT NULL, - uncompressed_toast_size bigint NOT NULL, - uncompressed_index_size bigint NOT NULL, - compressed_heap_size bigint NOT NULL, - compressed_toast_size bigint NOT NULL, - compressed_index_size bigint NOT NULL, - numrows_pre_compression bigint, - numrows_post_compression bigint, - -- table constraints - CONSTRAINT compression_chunk_size_pkey PRIMARY KEY (chunk_id), - CONSTRAINT compression_chunk_size_chunk_id_fkey FOREIGN KEY (chunk_id) REFERENCES _timescaledb_catalog.chunk (id) ON DELETE CASCADE, - CONSTRAINT compression_chunk_size_compressed_chunk_id_fkey FOREIGN KEY (compressed_chunk_id) REFERENCES _timescaledb_catalog.chunk (id) ON DELETE CASCADE -); - -INSERT INTO _timescaledb_catalog.compression_chunk_size -(chunk_id, compressed_chunk_id, uncompressed_heap_size, uncompressed_toast_size, - uncompressed_index_size, compressed_heap_size, compressed_toast_size, - compressed_index_size, numrows_pre_compression, numrows_post_compression) -SELECT chunk_id, compressed_chunk_id, uncompressed_heap_size, uncompressed_toast_size, - uncompressed_index_size, compressed_heap_size, compressed_toast_size, - compressed_index_size, numrows_pre_compression, numrows_post_compression -FROM _timescaledb_internal.compression_chunk_size_tmp; - -DROP TABLE _timescaledb_internal.compression_chunk_size_tmp; - -SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.compression_chunk_size', ''); - -GRANT SELECT ON _timescaledb_catalog.compression_chunk_size TO PUBLIC; - --- End modify `_timescaledb_catalog.compression_chunk_size` - -DROP FUNCTION @extschema@.drop_chunks(REGCLASS, "any", "any", BOOL, "any", "any"); -CREATE FUNCTION @extschema@.drop_chunks( - relation REGCLASS, - older_than "any" = NULL, - newer_than "any" = NULL, - verbose BOOLEAN = FALSE - ) RETURNS SETOF TEXT AS '@MODULE_PATHNAME@', 'ts_chunk_drop_chunks' - LANGUAGE C VOLATILE PARALLEL UNSAFE; - -DROP FUNCTION @extschema@.show_chunks(REGCLASS, "any", "any", "any", "any"); -CREATE FUNCTION @extschema@.show_chunks( - relation REGCLASS, - older_than "any" = NULL, - newer_than "any" = NULL - ) RETURNS SETOF REGCLASS AS '@MODULE_PATHNAME@', 'ts_chunk_show_chunks' - LANGUAGE C STABLE PARALLEL SAFE; - -DROP PROCEDURE IF EXISTS _timescaledb_functions.repair_relation_acls(); -DROP FUNCTION IF EXISTS _timescaledb_functions.makeaclitem(regrole, regrole, text, bool); - -DROP FUNCTION IF EXISTS _timescaledb_functions.cagg_validate_query(TEXT); - -DROP FUNCTION @extschema@.add_retention_policy(REGCLASS, "any", BOOL, INTERVAL, TIMESTAMPTZ, TEXT, INTERVAL); -CREATE FUNCTION @extschema@.add_retention_policy( - relation REGCLASS, - drop_after "any", - if_not_exists BOOL = false, - schedule_interval INTERVAL = NULL, - initial_start TIMESTAMPTZ = NULL, - timezone TEXT = NULL -) -RETURNS INTEGER AS '@MODULE_PATHNAME@', 'ts_policy_retention_add' -LANGUAGE C VOLATILE; - -DROP FUNCTION @extschema@.add_compression_policy(REGCLASS, "any", BOOL, INTERVAL, TIMESTAMPTZ, TEXT, INTERVAL); -CREATE FUNCTION @extschema@.add_compression_policy( - hypertable REGCLASS, - compress_after "any", - if_not_exists BOOL = false, - schedule_interval INTERVAL = NULL, - initial_start TIMESTAMPTZ = NULL, - timezone TEXT = NULL -) -RETURNS INTEGER -AS '@MODULE_PATHNAME@', 'ts_policy_compression_add' -LANGUAGE C VOLATILE; - -DROP PROCEDURE IF EXISTS _timescaledb_functions.policy_compression_execute(INTEGER, INTEGER, ANYELEMENT, INTEGER, BOOLEAN, BOOLEAN, BOOLEAN); -DROP PROCEDURE IF EXISTS _timescaledb_internal.policy_compression_execute(INTEGER, INTEGER, ANYELEMENT, INTEGER, BOOLEAN, BOOLEAN, BOOLEAN); -CREATE PROCEDURE -_timescaledb_functions.policy_compression_execute( - job_id INTEGER, - htid INTEGER, - lag ANYELEMENT, - maxchunks INTEGER, - verbose_log BOOLEAN, - recompress_enabled BOOLEAN) -AS $$ -DECLARE - htoid REGCLASS; - chunk_rec RECORD; - numchunks INTEGER := 1; - _message text; - _detail text; - -- chunk status bits: - bit_compressed int := 1; - bit_compressed_unordered int := 2; - bit_frozen int := 4; - bit_compressed_partial int := 8; -BEGIN - - -- procedures with SET clause cannot execute transaction - -- control so we adjust search_path in procedure body - SET LOCAL search_path TO pg_catalog, pg_temp; - - SELECT format('%I.%I', schema_name, table_name) INTO htoid - FROM _timescaledb_catalog.hypertable - WHERE id = htid; - - -- for the integer cases, we have to compute the lag w.r.t - -- the integer_now function and then pass on to show_chunks - IF pg_typeof(lag) IN ('BIGINT'::regtype, 'INTEGER'::regtype, 'SMALLINT'::regtype) THEN - lag := _timescaledb_functions.subtract_integer_from_now(htoid, lag::BIGINT); - END IF; - - FOR chunk_rec IN - SELECT - show.oid, ch.schema_name, ch.table_name, ch.status - FROM - @extschema@.show_chunks(htoid, older_than => lag) AS show(oid) - INNER JOIN pg_class pgc ON pgc.oid = show.oid - INNER JOIN pg_namespace pgns ON pgc.relnamespace = pgns.oid - INNER JOIN _timescaledb_catalog.chunk ch ON ch.table_name = pgc.relname AND ch.schema_name = pgns.nspname AND ch.hypertable_id = htid - WHERE - ch.dropped IS FALSE - AND ( - ch.status = 0 OR - ( - ch.status & bit_compressed > 0 AND ( - ch.status & bit_compressed_unordered > 0 OR - ch.status & bit_compressed_partial > 0 - ) - ) - ) - LOOP - IF chunk_rec.status = 0 THEN - BEGIN - PERFORM @extschema@.compress_chunk( chunk_rec.oid ); - EXCEPTION WHEN OTHERS THEN - GET STACKED DIAGNOSTICS - _message = MESSAGE_TEXT, - _detail = PG_EXCEPTION_DETAIL; - RAISE WARNING 'compressing chunk "%" failed when compression policy is executed', chunk_rec.oid::regclass::text - USING DETAIL = format('Message: (%s), Detail: (%s).', _message, _detail), - ERRCODE = sqlstate; - END; - ELSIF - ( - chunk_rec.status & bit_compressed > 0 AND ( - chunk_rec.status & bit_compressed_unordered > 0 OR - chunk_rec.status & bit_compressed_partial > 0 - ) - ) AND recompress_enabled IS TRUE THEN - BEGIN - PERFORM @extschema@.decompress_chunk(chunk_rec.oid, if_compressed => true); - EXCEPTION WHEN OTHERS THEN - RAISE WARNING 'decompressing chunk "%" failed when compression policy is executed', chunk_rec.oid::regclass::text - USING DETAIL = format('Message: (%s), Detail: (%s).', _message, _detail), - ERRCODE = sqlstate; - END; - -- SET LOCAL is only active until end of transaction. - -- While we could use SET at the start of the function we do not - -- want to bleed out search_path to caller, so we do SET LOCAL - -- again after COMMIT - BEGIN - PERFORM @extschema@.compress_chunk(chunk_rec.oid); - EXCEPTION WHEN OTHERS THEN - RAISE WARNING 'compressing chunk "%" failed when compression policy is executed', chunk_rec.oid::regclass::text - USING DETAIL = format('Message: (%s), Detail: (%s).', _message, _detail), - ERRCODE = sqlstate; - END; - END IF; - COMMIT; - -- SET LOCAL is only active until end of transaction. - -- While we could use SET at the start of the function we do not - -- want to bleed out search_path to caller, so we do SET LOCAL - -- again after COMMIT - SET LOCAL search_path TO pg_catalog, pg_temp; - IF verbose_log THEN - RAISE LOG 'job % completed processing chunk %.%', job_id, chunk_rec.schema_name, chunk_rec.table_name; - END IF; - numchunks := numchunks + 1; - IF maxchunks > 0 AND numchunks >= maxchunks THEN - EXIT; - END IF; - END LOOP; -END; -$$ LANGUAGE PLPGSQL; - -DROP FUNCTION _timescaledb_functions.chunk_constraint_add_table_constraint( - chunk_constraint_row _timescaledb_catalog.chunk_constraint -); - -CREATE FUNCTION _timescaledb_functions.chunk_constraint_add_table_constraint( - chunk_constraint_row _timescaledb_catalog.chunk_constraint -) - RETURNS VOID LANGUAGE PLPGSQL AS -$BODY$ -DECLARE - chunk_row _timescaledb_catalog.chunk; - hypertable_row _timescaledb_catalog.hypertable; - constraint_oid OID; - constraint_type CHAR; - check_sql TEXT; - def TEXT; - indx_tablespace NAME; - tablespace_def TEXT; -BEGIN - SELECT * INTO STRICT chunk_row FROM _timescaledb_catalog.chunk c WHERE c.id = chunk_constraint_row.chunk_id; - SELECT * INTO STRICT hypertable_row FROM _timescaledb_catalog.hypertable h WHERE h.id = chunk_row.hypertable_id; - - IF chunk_constraint_row.dimension_slice_id IS NOT NULL THEN - RAISE 'cannot create dimension constraint %', chunk_constraint_row; - ELSIF chunk_constraint_row.hypertable_constraint_name IS NOT NULL THEN - - SELECT oid, contype INTO STRICT constraint_oid, constraint_type FROM pg_constraint - WHERE conname=chunk_constraint_row.hypertable_constraint_name AND - conrelid = format('%I.%I', hypertable_row.schema_name, hypertable_row.table_name)::regclass::oid; - - IF constraint_type IN ('p','u') THEN - -- since primary keys and unique constraints are backed by an index - -- they might have an index tablespace assigned - -- the tablspace is not part of the constraint definition so - -- we have to append it explicitly to preserve it - SELECT T.spcname INTO indx_tablespace - FROM pg_constraint C, pg_class I, pg_tablespace T - WHERE C.oid = constraint_oid AND C.contype IN ('p', 'u') AND I.oid = C.conindid AND I.reltablespace = T.oid; - - def := pg_get_constraintdef(constraint_oid); - - IF indx_tablespace IS NOT NULL THEN - def := format('%s USING INDEX TABLESPACE %I', def, indx_tablespace); - END IF; - - ELSIF constraint_type = 't' THEN - -- constraint triggers are copied separately with normal triggers - def := NULL; - ELSE - def := pg_get_constraintdef(constraint_oid); - END IF; - - ELSE - RAISE 'unknown constraint type'; - END IF; - - IF def IS NOT NULL THEN - -- to allow for custom types with operators outside of pg_catalog - -- we set search_path to @extschema@ - SET LOCAL search_path TO @extschema@, pg_temp; - EXECUTE pg_catalog.format( - $$ ALTER TABLE %I.%I ADD CONSTRAINT %I %s $$, - chunk_row.schema_name, chunk_row.table_name, chunk_constraint_row.constraint_name, def - ); - - END IF; -END -$BODY$ SET search_path TO pg_catalog, pg_temp; diff --git a/version.config b/version.config index bfaac2bac53..5e2b81a77b2 100644 --- a/version.config +++ b/version.config @@ -1,3 +1,3 @@ -version = 2.13.0-dev +version = 2.13.0 update_from_version = 2.12.2 downgrade_to_version = 2.12.2