diff --git a/.unreleased/PR_6222 b/.unreleased/PR_6222
deleted file mode 100644
index bee0aeb86ae..00000000000
--- a/.unreleased/PR_6222
+++ /dev/null
@@ -1 +0,0 @@
-Fixes: #6222 Allow enabling compression on hypertable with unique expression index
diff --git a/.unreleased/enhancement_6049 b/.unreleased/enhancement_6049
deleted file mode 100644
index 6b77fe68b1f..00000000000
--- a/.unreleased/enhancement_6049
+++ /dev/null
@@ -1 +0,0 @@
-Implements: #6130 Add CI check for incorrect catalog updates
diff --git a/.unreleased/feature_5575 b/.unreleased/feature_5575
deleted file mode 100644
index 5b573dfed4e..00000000000
--- a/.unreleased/feature_5575
+++ /dev/null
@@ -1 +0,0 @@
-Implements: #5575 Add chunk-wise sorted paths for compressed chunks
diff --git a/.unreleased/feature_5761 b/.unreleased/feature_5761
deleted file mode 100644
index 2d5196033f7..00000000000
--- a/.unreleased/feature_5761
+++ /dev/null
@@ -1,2 +0,0 @@
-Implements: #5761 Simplify hypertable DDL API
-Thanks: @pdipesh02 for contributing to the implementation of the generalized hypertable API
diff --git a/.unreleased/feature_5890 b/.unreleased/feature_5890
deleted file mode 100644
index b8ac85886dc..00000000000
--- a/.unreleased/feature_5890
+++ /dev/null
@@ -1 +0,0 @@
-Implements: #5890 Reduce WAL activity by freezing compressed tuples immediately
diff --git a/.unreleased/feature_6050 b/.unreleased/feature_6050
deleted file mode 100644
index 60cbc49f6e5..00000000000
--- a/.unreleased/feature_6050
+++ /dev/null
@@ -1 +0,0 @@
-Implements: #6050 Vectorized aggregation execution for sum()
diff --git a/.unreleased/feature_6062 b/.unreleased/feature_6062
deleted file mode 100644
index a0aa51e85b4..00000000000
--- a/.unreleased/feature_6062
+++ /dev/null
@@ -1,2 +0,0 @@
-Implements: #6062 Add metadata for chunk creation time
-Thanks: @pdipesh02 for contributing to the implementation of this feature
diff --git a/.unreleased/feature_6077 b/.unreleased/feature_6077
deleted file mode 100644
index 34a4273a507..00000000000
--- a/.unreleased/feature_6077
+++ /dev/null
@@ -1 +0,0 @@
-Implements: #6077 Make Continous Aggregates materialized only (non-realtime) by default
diff --git a/.unreleased/feature_6177 b/.unreleased/feature_6177
deleted file mode 100644
index 79d6e4fc0dc..00000000000
--- a/.unreleased/feature_6177
+++ /dev/null
@@ -1,2 +0,0 @@
-Implements: #6177 Change show_chunks/drop_chunks using chunk creation time
-Thanks: @pdipesh02 for contributing to the implementation of this feature
diff --git a/.unreleased/feature_6227 b/.unreleased/feature_6227
deleted file mode 100644
index 39ba8386bee..00000000000
--- a/.unreleased/feature_6227
+++ /dev/null
@@ -1 +0,0 @@
-Implements: #6227 Use creation time in retention/compression policy
diff --git a/.unreleased/feature_6307 b/.unreleased/feature_6307
deleted file mode 100644
index 3d2381cfac1..00000000000
--- a/.unreleased/feature_6307
+++ /dev/null
@@ -1 +0,0 @@
-Implements: #6307 Add SQL function cagg_validate_query
diff --git a/.unreleased/fix_6188 b/.unreleased/fix_6188
deleted file mode 100644
index 1a003a61e5b..00000000000
--- a/.unreleased/fix_6188
+++ /dev/null
@@ -1,2 +0,0 @@
-Fixes: #6188 Add GUC for setting background worker log level
-Thanks: @jflambert for reporting the issue
diff --git a/.unreleased/fix_6240 b/.unreleased/fix_6240
deleted file mode 100644
index 2d9d2e4465f..00000000000
--- a/.unreleased/fix_6240
+++ /dev/null
@@ -1 +0,0 @@
-Fixes: #6240 Check if worker registration succeeded
diff --git a/.unreleased/fix_6289 b/.unreleased/fix_6289
deleted file mode 100644
index 6664512de6d..00000000000
--- a/.unreleased/fix_6289
+++ /dev/null
@@ -1 +0,0 @@
-Fixes: #6289 Add support for startup chunk exclusion with aggs
diff --git a/.unreleased/fix_6290 b/.unreleased/fix_6290
deleted file mode 100644
index a1ae57435cf..00000000000
--- a/.unreleased/fix_6290
+++ /dev/null
@@ -1 +0,0 @@
-Fixes: #6290 Repair relacl on upgrade
diff --git a/.unreleased/fix_6305 b/.unreleased/fix_6305
deleted file mode 100644
index 2cb7ae0804a..00000000000
--- a/.unreleased/fix_6305
+++ /dev/null
@@ -1 +0,0 @@
-Fixes: #6305 Make _timescaledb_functions.makeaclitem strict
diff --git a/.unreleased/fix_partial_index b/.unreleased/fix_partial_index
deleted file mode 100644
index 5cb091f3338..00000000000
--- a/.unreleased/fix_partial_index
+++ /dev/null
@@ -1 +0,0 @@
-Fixes: #6280 Potential data loss when compressing a table with a partial index that matches compression order.
diff --git a/.unreleased/pr_6178 b/.unreleased/pr_6178
deleted file mode 100644
index 15e1037f6bf..00000000000
--- a/.unreleased/pr_6178
+++ /dev/null
@@ -1 +0,0 @@
-Implements: #6178 Show batches/tuples decompressed during DML operations in EXPLAIN output
diff --git a/.unreleased/pr_6185 b/.unreleased/pr_6185
deleted file mode 100644
index 0f47652d542..00000000000
--- a/.unreleased/pr_6185
+++ /dev/null
@@ -1 +0,0 @@
-Implements: #6185 Keep track of catalog version
diff --git a/.unreleased/pr_6254 b/.unreleased/pr_6254
deleted file mode 100644
index 98d634dc65a..00000000000
--- a/.unreleased/pr_6254
+++ /dev/null
@@ -1,3 +0,0 @@
-Fixes: #6254 Fix exception detail passing in compression_policy_execute
-
-Thanks: @fetchezar for reporting an issue with compression policy error messages
diff --git a/.unreleased/pr_6264 b/.unreleased/pr_6264
deleted file mode 100644
index 98c5408d1f4..00000000000
--- a/.unreleased/pr_6264
+++ /dev/null
@@ -1 +0,0 @@
-Fixes: #6264 Fix missing bms_del_member result assignment
diff --git a/.unreleased/pr_6275 b/.unreleased/pr_6275
deleted file mode 100644
index cf49ffeb62d..00000000000
--- a/.unreleased/pr_6275
+++ /dev/null
@@ -1,3 +0,0 @@
-Fixes: #6275 Fix negative bitmapset member not allowed in compression
-
-Thanks: @torazem for reporting an issue with compression and large oids
diff --git a/.unreleased/pr_6297 b/.unreleased/pr_6297
deleted file mode 100644
index 98027449fdf..00000000000
--- a/.unreleased/pr_6297
+++ /dev/null
@@ -1 +0,0 @@
-Fixes: #6297 Fix segfault when creating a cagg using a NULL width in time bucket function
diff --git a/.unreleased/pr_6332 b/.unreleased/pr_6332
deleted file mode 100644
index 69841904ddb..00000000000
--- a/.unreleased/pr_6332
+++ /dev/null
@@ -1 +0,0 @@
-Fixes: #6332 Fix typmod and collation for segmentby columns
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 1f9a309cb7a..697c959e5af 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -4,6 +4,68 @@
`psql` with the `-X` flag to prevent any `.psqlrc` commands from
accidentally triggering the load of a previous DB version.**
+## 2.13.0 (2023-11-27)
+
+This release contains performance improvements, an improved hypertable DDL API
+and bug fixes since the 2.12.2 release. We recommend that you upgrade at the next
+available opportunity.
+
+In addition, it includes these noteworthy features:
+
+* Full PostgreSQL 16 support for all existing features
+* Vectorized aggregation execution for sum()
+* Track chunk creation time used in retention/compression policies
+
+**Deprecation notice: Multi-node support**
+
+TimescaleDB 2.13 is the last version that will include multi-node support. Multi-node
+support in 2.13 is available for PostgreSQL 13, 14 and 15. Learn more about it
+[here](docs/MultiNodeDeprecation.md).
+
+If you want to migrate from multi-node TimescaleDB to single-node TimescaleDB read the
+[migration documentation](https://docs.timescale.com/migrate/latest/multi-node-to-timescale-service/).
+
+**Starting from TimescaleDB 2.13.0:**
+
+* No Amazon Machine Images (AMI) are published. If you previously used AMI, please
+use another [installation method](https://docs.timescale.com/self-hosted/latest/install/)
+* Continuous Aggregates are materialized only (non-realtime) by default
+
+**Features**
+* #5575 Add chunk-wise sorted paths for compressed chunks
+* #5761 Simplify hypertable DDL API
+* #5890 Reduce WAL activity by freezing compressed tuples immediately
+* #6050 Vectorized aggregation execution for sum()
+* #6062 Add metadata for chunk creation time
+* #6077 Make Continous Aggregates materialized only (non-realtime) by default
+* #6177 Change show_chunks/drop_chunks using chunk creation time
+* #6178 Show batches/tuples decompressed during DML operations in EXPLAIN output
+* #6185 Keep track of catalog version
+* #6227 Use creation time in retention/compression policy
+* #6307 Add SQL function cagg_validate_query
+
+**Bugfixes**
+* #6188 Add GUC for setting background worker log level
+* #6222 Allow enabling compression on hypertable with unique expression index
+* #6240 Check if worker registration succeeded
+* #6254 Fix exception detail passing in compression_policy_execute
+* #6264 Fix missing bms_del_member result assignment
+* #6275 Fix negative bitmapset member not allowed in compression
+* #6280 Potential data loss when compressing a table with a partial index that matches compression order.
+* #6289 Add support for startup chunk exclusion with aggs
+* #6290 Repair relacl on upgrade
+* #6297 Fix segfault when creating a cagg using a NULL width in time bucket function
+* #6305 Make timescaledb_functions.makeaclitem strict
+* #6332 Fix typmod and collation for segmentby columns
+
+**Thanks**
+* @fetchezar for reporting an issue with compression policy error messages
+* @jflambert for reporting the background worker log level issue
+* @torazem for reporting an issue with compression and large oids
+* @pdipesh02 for contributing to the implementation of the metadata for chunk creation time,
+ the generalized hypertable API, and show_chunks/drop_chunks using chunk creation time
+* @lkshminarayanan for all his work on PG16 support
+
## 2.12.2 (2023-10-19)
This release contains bug fixes since the 2.12.1 release.
@@ -24,6 +86,7 @@ We recommend that you upgrade at the next available opportunity.
* #6117 Avoid decompressing batches using an empty slot
* #6123 Fix concurrency errors in OSM API
* #6142 do not throw an error when deprecation GUC cannot be read
+
**Thanks**
* @symbx for reporting a crash when selecting from empty hypertables
diff --git a/docs/MultiNodeDeprecation.md b/docs/MultiNodeDeprecation.md
new file mode 100644
index 00000000000..b4522665782
--- /dev/null
+++ b/docs/MultiNodeDeprecation.md
@@ -0,0 +1,59 @@
+## Multi-node Deprecation
+
+Multi-node support has been deprecated.
+TimescaleDB 2.13 is the last version that will include multi-node support.
+Multi-node support in 2.13 is available for PostgreSQL 13, 14 and 15.
+
+If you want to migrate from multi-node TimescaleDB to single-node TimescaleDB
+read the [migration documentation](https://docs.timescale.com/migrate/latest/multi-node-to-timescale-service/).
+
+### Why we have deprecated multi-node support
+
+We began to work on multi-node support in 2018 and released the first version in 2020 to provide higher scalability
+for TimescaleDB deployments. Since then and as we’ve continued to evolve our community and cloud products,
+we’ve come to the realization that multi-node was not the most forward-looking approach,
+and we have instead turned our focus to more cloud-native designs that inherently leverage compute/storage separation for higher scalability.
+
+These are the challenges we encountered with our multi-node architecture:
+- Lower development speed
+
+ Multi-node was hard to maintain and evolve and added a very high tax on any new feature we developed,
+ significantly slowing down our development speed.
+ Additionally, only ~1% of TimescaleDB deployments use multi-node.
+
+- Inconsistent developer experience
+
+ Multi-node imposed a key initial decision on developers adopting TimescaleDB: start with single-node or start with multi-node.
+ Moving from one to the other required a migration.
+ On top of that there were a number of features supported on single-node that were not available on multi-node making
+ the experience inconsistent and the choice even more complicated.
+
+- Expensive price / performance ratio
+
+ We were very far from linear scalability, not least due to the natural “performance hit” when scaling from a single-node to distributed transactions.
+ For example, handling 2x the load a single node could handle required 8 servers.
+ At the same time we’ve been able to dramatically improve the performance of single-node to handle 2 million inserts
+ per second as well as improve query performance dramatically with a
+ number of [query optimizations](https://www.timescale.com/blog/8-performance-improvements-in-recent-timescaledb-releases-for-faster-query-analytics/)
+ and [vectorized query execution](https://www.timescale.com/blog/teaching-postgres-new-tricks-simd-vectorization-for-faster-analytical-queries/).
+
+- Not designed to leverage new cloud architectures
+
+ As we’ve gained experience developing and scaling cloud solutions we’ve realized that we can solve
+ the same problems more easily through new more modern approaches that leverage cloud technologies,
+ which often separate compute and storage to scale them independently.
+ For example, we’re now leveraging object storage in our cloud offering to deliver virtually infinite
+ storage capacity at a very low cost with [Tiered Storage](https://www.timescale.com/blog/scaling-postgresql-for-cheap-introducing-tiered-storage-in-timescale/),
+ and our [columnar compression](https://www.timescale.com/blog/building-columnar-compression-in-a-row-oriented-database/) (built concurrently to multi-node)
+ offers effective per-disk capacity that’s 10-20x that of traditional Postgres.
+ Similarly, users can scale compute both vertically and horizontally by either dynamically resizing compute
+ allocated to cloud databases, or adding additional server replicas (each of which can use the same tiered storage).
+
+Given all those reasons, we’ve made the difficult decision to deprecate multi-node so we can accelerate feature development
+and performance improvements, and deliver a better developer experience for the 99% of our community that is not using multi-node.
+
+### Questions and feedback
+
+We understand the news will be disappointing to users of multi-node. We’d like to help and provide advice on the best path forward for you.
+
+If you have any questions or feedback, you can share them in the #multi-node channel in our [community Slack](https://slack.timescale.com/).
diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt
index e6ec1121719..e543061c872 100644
--- a/sql/CMakeLists.txt
+++ b/sql/CMakeLists.txt
@@ -40,7 +40,8 @@ set(MOD_FILES
updates/2.11.1--2.11.2.sql
updates/2.11.2--2.12.0.sql
updates/2.12.0--2.12.1.sql
- updates/2.12.1--2.12.2.sql)
+ updates/2.12.1--2.12.2.sql
+ updates/2.12.2--2.13.0.sql)
# The downgrade file to generate a downgrade script for the current version, as
# specified in version.config
diff --git a/sql/updates/2.12.2--2.13.0.sql b/sql/updates/2.12.2--2.13.0.sql
new file mode 100644
index 00000000000..a99b15de009
--- /dev/null
+++ b/sql/updates/2.12.2--2.13.0.sql
@@ -0,0 +1,444 @@
+CREATE TYPE _timescaledb_internal.dimension_info;
+
+CREATE OR REPLACE FUNCTION _timescaledb_functions.dimension_info_in(cstring)
+ RETURNS _timescaledb_internal.dimension_info
+ LANGUAGE C STRICT IMMUTABLE
+ AS '@MODULE_PATHNAME@', 'ts_dimension_info_in';
+
+CREATE OR REPLACE FUNCTION _timescaledb_functions.dimension_info_out(_timescaledb_internal.dimension_info)
+ RETURNS cstring
+ LANGUAGE C STRICT IMMUTABLE
+ AS '@MODULE_PATHNAME@', 'ts_dimension_info_out';
+
+CREATE TYPE _timescaledb_internal.dimension_info (
+ INPUT = _timescaledb_functions.dimension_info_in,
+ OUTPUT = _timescaledb_functions.dimension_info_out,
+ INTERNALLENGTH = VARIABLE
+);
+
+CREATE FUNCTION @extschema@.create_hypertable(
+ relation REGCLASS,
+ dimension _timescaledb_internal.dimension_info,
+ create_default_indexes BOOLEAN = TRUE,
+ if_not_exists BOOLEAN = FALSE,
+ migrate_data BOOLEAN = FALSE
+) RETURNS TABLE(hypertable_id INT, created BOOL) AS '@MODULE_PATHNAME@', 'ts_hypertable_create_general' LANGUAGE C VOLATILE;
+
+CREATE FUNCTION @extschema@.add_dimension(
+ hypertable REGCLASS,
+ dimension _timescaledb_internal.dimension_info,
+ if_not_exists BOOLEAN = FALSE
+) RETURNS TABLE(dimension_id INT, created BOOL)
+AS '@MODULE_PATHNAME@', 'ts_dimension_add_general' LANGUAGE C VOLATILE;
+
+CREATE FUNCTION @extschema@.set_partitioning_interval(
+ hypertable REGCLASS,
+ partition_interval ANYELEMENT,
+ dimension_name NAME = NULL
+) RETURNS VOID AS '@MODULE_PATHNAME@', 'ts_dimension_set_interval' LANGUAGE C VOLATILE;
+
+CREATE FUNCTION @extschema@.by_hash(column_name NAME, number_partitions INTEGER,
+ partition_func regproc = NULL)
+ RETURNS _timescaledb_internal.dimension_info LANGUAGE C
+ AS '@MODULE_PATHNAME@', 'ts_hash_dimension';
+
+CREATE FUNCTION @extschema@.by_range(column_name NAME,
+ partition_interval ANYELEMENT = NULL::bigint,
+ partition_func regproc = NULL)
+ RETURNS _timescaledb_internal.dimension_info LANGUAGE C
+ AS '@MODULE_PATHNAME@', 'ts_range_dimension';
+
+--
+-- Rebuild the catalog table `_timescaledb_catalog.chunk` to
+-- add new column `creation_time`
+--
+CREATE TABLE _timescaledb_internal.chunk_tmp
+AS SELECT * from _timescaledb_catalog.chunk;
+
+CREATE TABLE _timescaledb_internal.tmp_chunk_seq_value AS
+SELECT last_value, is_called FROM _timescaledb_catalog.chunk_id_seq;
+
+--drop foreign keys on chunk table
+ALTER TABLE _timescaledb_catalog.chunk_constraint DROP CONSTRAINT
+chunk_constraint_chunk_id_fkey;
+ALTER TABLE _timescaledb_catalog.chunk_index DROP CONSTRAINT
+chunk_index_chunk_id_fkey;
+ALTER TABLE _timescaledb_catalog.chunk_data_node DROP CONSTRAINT
+chunk_data_node_chunk_id_fkey;
+ALTER TABLE _timescaledb_internal.bgw_policy_chunk_stats DROP CONSTRAINT
+bgw_policy_chunk_stats_chunk_id_fkey;
+ALTER TABLE _timescaledb_catalog.compression_chunk_size DROP CONSTRAINT
+compression_chunk_size_chunk_id_fkey;
+ALTER TABLE _timescaledb_catalog.compression_chunk_size DROP CONSTRAINT
+compression_chunk_size_compressed_chunk_id_fkey;
+ALTER TABLE _timescaledb_catalog.chunk_copy_operation DROP CONSTRAINT
+chunk_copy_operation_chunk_id_fkey;
+
+--drop dependent views
+DROP VIEW IF EXISTS timescaledb_information.hypertables;
+DROP VIEW IF EXISTS timescaledb_information.chunks;
+DROP VIEW IF EXISTS _timescaledb_internal.hypertable_chunk_local_size;
+DROP VIEW IF EXISTS _timescaledb_internal.compressed_chunk_stats;
+DROP VIEW IF EXISTS timescaledb_experimental.chunk_replication_status;
+
+ALTER EXTENSION timescaledb DROP TABLE _timescaledb_catalog.chunk;
+ALTER EXTENSION timescaledb DROP SEQUENCE _timescaledb_catalog.chunk_id_seq;
+DROP TABLE _timescaledb_catalog.chunk;
+
+CREATE SEQUENCE _timescaledb_catalog.chunk_id_seq MINVALUE 1;
+
+-- now create table without self referential foreign key
+CREATE TABLE _timescaledb_catalog.chunk (
+ id integer NOT NULL DEFAULT nextval('_timescaledb_catalog.chunk_id_seq'),
+ hypertable_id int NOT NULL,
+ schema_name name NOT NULL,
+ table_name name NOT NULL,
+ compressed_chunk_id integer ,
+ dropped boolean NOT NULL DEFAULT FALSE,
+ status integer NOT NULL DEFAULT 0,
+ osm_chunk boolean NOT NULL DEFAULT FALSE,
+ creation_time timestamptz,
+ -- table constraints
+ CONSTRAINT chunk_pkey PRIMARY KEY (id),
+ CONSTRAINT chunk_schema_name_table_name_key UNIQUE (schema_name, table_name)
+);
+
+INSERT INTO _timescaledb_catalog.chunk
+( id, hypertable_id, schema_name, table_name,
+ compressed_chunk_id, dropped, status, osm_chunk)
+SELECT id, hypertable_id, schema_name, table_name,
+ compressed_chunk_id, dropped, status, osm_chunk
+FROM _timescaledb_internal.chunk_tmp;
+
+-- update creation_time for chunks
+UPDATE
+ _timescaledb_catalog.chunk c
+SET
+ creation_time = (pg_catalog.pg_stat_file(pg_catalog.pg_relation_filepath(r.oid))).modification
+FROM
+ pg_class r, pg_namespace n
+WHERE
+ r.relnamespace = n.oid
+ AND r.relname = c.table_name
+ AND n.nspname = c.schema_name
+ AND r.relkind = 'r'
+ AND c.dropped IS FALSE;
+
+-- Make sure that there are no record with empty creation time
+UPDATE _timescaledb_catalog.chunk SET creation_time = now() WHERE creation_time IS NULL;
+
+--add indexes to the chunk table
+CREATE INDEX chunk_hypertable_id_idx ON _timescaledb_catalog.chunk (hypertable_id);
+CREATE INDEX chunk_compressed_chunk_id_idx ON _timescaledb_catalog.chunk (compressed_chunk_id);
+CREATE INDEX chunk_osm_chunk_idx ON _timescaledb_catalog.chunk (osm_chunk, hypertable_id);
+CREATE INDEX chunk_hypertable_id_creation_time_idx ON _timescaledb_catalog.chunk(hypertable_id, creation_time);
+
+ALTER SEQUENCE _timescaledb_catalog.chunk_id_seq OWNED BY _timescaledb_catalog.chunk.id;
+SELECT setval('_timescaledb_catalog.chunk_id_seq', last_value, is_called) FROM _timescaledb_internal.tmp_chunk_seq_value;
+
+-- add self referential foreign key
+ALTER TABLE _timescaledb_catalog.chunk ADD CONSTRAINT chunk_compressed_chunk_id_fkey FOREIGN KEY ( compressed_chunk_id )
+ REFERENCES _timescaledb_catalog.chunk( id );
+
+--add foreign key constraint
+ALTER TABLE _timescaledb_catalog.chunk
+ ADD CONSTRAINT chunk_hypertable_id_fkey
+ FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable (id);
+
+SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.chunk', '');
+SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.chunk_id_seq', '');
+
+-- Add non-null constraint
+ALTER TABLE _timescaledb_catalog.chunk
+ ALTER COLUMN creation_time SET NOT NULL;
+
+--add the foreign key constraints
+ALTER TABLE _timescaledb_catalog.chunk_constraint ADD CONSTRAINT
+chunk_constraint_chunk_id_fkey FOREIGN KEY (chunk_id) REFERENCES _timescaledb_catalog.chunk(id);
+ALTER TABLE _timescaledb_catalog.chunk_index ADD CONSTRAINT
+chunk_index_chunk_id_fkey FOREIGN KEY (chunk_id)
+REFERENCES _timescaledb_catalog.chunk(id) ON DELETE CASCADE;
+ALTER TABLE _timescaledb_catalog.chunk_data_node ADD CONSTRAINT
+chunk_data_node_chunk_id_fkey FOREIGN KEY (chunk_id) REFERENCES _timescaledb_catalog.chunk(id);
+ALTER TABLE _timescaledb_internal.bgw_policy_chunk_stats ADD CONSTRAINT
+bgw_policy_chunk_stats_chunk_id_fkey FOREIGN KEY (chunk_id)
+REFERENCES _timescaledb_catalog.chunk(id) ON DELETE CASCADE;
+ALTER TABLE _timescaledb_catalog.compression_chunk_size ADD CONSTRAINT
+compression_chunk_size_chunk_id_fkey FOREIGN KEY (chunk_id)
+REFERENCES _timescaledb_catalog.chunk(id) ON DELETE CASCADE;
+ALTER TABLE _timescaledb_catalog.compression_chunk_size ADD CONSTRAINT
+compression_chunk_size_compressed_chunk_id_fkey FOREIGN KEY (compressed_chunk_id)
+REFERENCES _timescaledb_catalog.chunk(id) ON DELETE CASCADE;
+ALTER TABLE _timescaledb_catalog.chunk_copy_operation ADD CONSTRAINT
+chunk_copy_operation_chunk_id_fkey FOREIGN KEY (chunk_id) REFERENCES _timescaledb_catalog.chunk (id) ON DELETE CASCADE;
+
+--cleanup
+DROP TABLE _timescaledb_internal.chunk_tmp;
+DROP TABLE _timescaledb_internal.tmp_chunk_seq_value;
+
+GRANT SELECT ON _timescaledb_catalog.chunk_id_seq TO PUBLIC;
+GRANT SELECT ON _timescaledb_catalog.chunk TO PUBLIC;
+-- end recreate _timescaledb_catalog.chunk table --
+
+--
+-- Rebuild the catalog table `_timescaledb_catalog.compression_chunk_size` to
+-- add new column `numrows_frozen_immediately`
+--
+CREATE TABLE _timescaledb_internal.compression_chunk_size_tmp
+ AS SELECT * from _timescaledb_catalog.compression_chunk_size;
+
+-- Drop depended views
+-- We assume that '_timescaledb_internal.compressed_chunk_stats' was already dropped in this update
+-- (see above)
+
+-- Drop table
+ALTER EXTENSION timescaledb DROP TABLE _timescaledb_catalog.compression_chunk_size;
+DROP TABLE _timescaledb_catalog.compression_chunk_size;
+
+CREATE TABLE _timescaledb_catalog.compression_chunk_size (
+ chunk_id integer NOT NULL,
+ compressed_chunk_id integer NOT NULL,
+ uncompressed_heap_size bigint NOT NULL,
+ uncompressed_toast_size bigint NOT NULL,
+ uncompressed_index_size bigint NOT NULL,
+ compressed_heap_size bigint NOT NULL,
+ compressed_toast_size bigint NOT NULL,
+ compressed_index_size bigint NOT NULL,
+ numrows_pre_compression bigint,
+ numrows_post_compression bigint,
+ numrows_frozen_immediately bigint,
+ -- table constraints
+ CONSTRAINT compression_chunk_size_pkey PRIMARY KEY (chunk_id),
+ CONSTRAINT compression_chunk_size_chunk_id_fkey FOREIGN KEY (chunk_id) REFERENCES _timescaledb_catalog.chunk (id) ON DELETE CASCADE,
+ CONSTRAINT compression_chunk_size_compressed_chunk_id_fkey FOREIGN KEY (compressed_chunk_id) REFERENCES _timescaledb_catalog.chunk (id) ON DELETE CASCADE
+);
+
+INSERT INTO _timescaledb_catalog.compression_chunk_size
+(chunk_id, compressed_chunk_id, uncompressed_heap_size, uncompressed_toast_size,
+ uncompressed_index_size, compressed_heap_size, compressed_toast_size,
+ compressed_index_size, numrows_pre_compression, numrows_post_compression, numrows_frozen_immediately)
+SELECT chunk_id, compressed_chunk_id, uncompressed_heap_size, uncompressed_toast_size,
+ uncompressed_index_size, compressed_heap_size, compressed_toast_size,
+ compressed_index_size, numrows_pre_compression, numrows_post_compression, 0
+FROM _timescaledb_internal.compression_chunk_size_tmp;
+
+DROP TABLE _timescaledb_internal.compression_chunk_size_tmp;
+
+SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.compression_chunk_size', '');
+
+GRANT SELECT ON _timescaledb_catalog.compression_chunk_size TO PUBLIC;
+
+-- End modify `_timescaledb_catalog.compression_chunk_size`
+
+DROP FUNCTION @extschema@.drop_chunks(REGCLASS, "any", "any", BOOL);
+CREATE FUNCTION @extschema@.drop_chunks(
+ relation REGCLASS,
+ older_than "any" = NULL,
+ newer_than "any" = NULL,
+ verbose BOOLEAN = FALSE,
+ created_before "any" = NULL,
+ created_after "any" = NULL
+ ) RETURNS SETOF TEXT AS '@MODULE_PATHNAME@', 'ts_chunk_drop_chunks'
+ LANGUAGE C VOLATILE PARALLEL UNSAFE;
+
+DROP FUNCTION @extschema@.show_chunks(REGCLASS, "any", "any");
+CREATE FUNCTION @extschema@.show_chunks(
+ relation REGCLASS,
+ older_than "any" = NULL,
+ newer_than "any" = NULL,
+ created_before "any" = NULL,
+ created_after "any" = NULL
+ ) RETURNS SETOF REGCLASS AS '@MODULE_PATHNAME@', 'ts_chunk_show_chunks'
+ LANGUAGE C STABLE PARALLEL SAFE;
+
+DROP FUNCTION @extschema@.add_retention_policy(REGCLASS, "any", BOOL, INTERVAL, TIMESTAMPTZ, TEXT);
+CREATE FUNCTION @extschema@.add_retention_policy(
+ relation REGCLASS,
+ drop_after "any" = NULL,
+ if_not_exists BOOL = false,
+ schedule_interval INTERVAL = NULL,
+ initial_start TIMESTAMPTZ = NULL,
+ timezone TEXT = NULL,
+ drop_created_before INTERVAL = NULL
+)
+RETURNS INTEGER AS '@MODULE_PATHNAME@', 'ts_policy_retention_add'
+LANGUAGE C VOLATILE;
+
+DROP FUNCTION @extschema@.add_compression_policy(REGCLASS, "any", BOOL, INTERVAL, TIMESTAMPTZ, TEXT);
+CREATE FUNCTION @extschema@.add_compression_policy(
+ hypertable REGCLASS,
+ compress_after "any" = NULL,
+ if_not_exists BOOL = false,
+ schedule_interval INTERVAL = NULL,
+ initial_start TIMESTAMPTZ = NULL,
+ timezone TEXT = NULL,
+ compress_created_before INTERVAL = NULL
+)
+RETURNS INTEGER
+AS '@MODULE_PATHNAME@', 'ts_policy_compression_add'
+LANGUAGE C VOLATILE;
+
+DROP PROCEDURE IF EXISTS _timescaledb_functions.policy_compression_execute(INTEGER, INTEGER, ANYELEMENT, INTEGER, BOOLEAN, BOOLEAN);
+DROP PROCEDURE IF EXISTS _timescaledb_internal.policy_compression_execute(INTEGER, INTEGER, ANYELEMENT, INTEGER, BOOLEAN, BOOLEAN);
+CREATE PROCEDURE
+_timescaledb_functions.policy_compression_execute(
+ job_id INTEGER,
+ htid INTEGER,
+ lag ANYELEMENT,
+ maxchunks INTEGER,
+ verbose_log BOOLEAN,
+ recompress_enabled BOOLEAN,
+ use_creation_time BOOLEAN)
+AS $$
+DECLARE
+ htoid REGCLASS;
+ chunk_rec RECORD;
+ numchunks INTEGER := 1;
+ _message text;
+ _detail text;
+ -- chunk status bits:
+ bit_compressed int := 1;
+ bit_compressed_unordered int := 2;
+ bit_frozen int := 4;
+ bit_compressed_partial int := 8;
+ creation_lag INTERVAL := NULL;
+BEGIN
+
+ -- procedures with SET clause cannot execute transaction
+ -- control so we adjust search_path in procedure body
+ SET LOCAL search_path TO pg_catalog, pg_temp;
+
+ SELECT format('%I.%I', schema_name, table_name) INTO htoid
+ FROM _timescaledb_catalog.hypertable
+ WHERE id = htid;
+
+ -- for the integer cases, we have to compute the lag w.r.t
+ -- the integer_now function and then pass on to show_chunks
+ IF pg_typeof(lag) IN ('BIGINT'::regtype, 'INTEGER'::regtype, 'SMALLINT'::regtype) THEN
+ -- cannot have use_creation_time set with this
+ IF use_creation_time IS TRUE THEN
+ RAISE EXCEPTION 'job % cannot use creation time with integer_now function', job_id;
+ END IF;
+ lag := _timescaledb_functions.subtract_integer_from_now(htoid, lag::BIGINT);
+ END IF;
+
+ -- if use_creation_time has been specified then the lag needs to be used with the
+ -- "compress_created_before" argument. Otherwise the usual "older_than" argument
+ -- is good enough
+ IF use_creation_time IS TRUE THEN
+ creation_lag := lag;
+ lag := NULL;
+ END IF;
+
+ FOR chunk_rec IN
+ SELECT
+ show.oid, ch.schema_name, ch.table_name, ch.status
+ FROM
+ @extschema@.show_chunks(htoid, older_than => lag, created_before => creation_lag) AS show(oid)
+ INNER JOIN pg_class pgc ON pgc.oid = show.oid
+ INNER JOIN pg_namespace pgns ON pgc.relnamespace = pgns.oid
+ INNER JOIN _timescaledb_catalog.chunk ch ON ch.table_name = pgc.relname AND ch.schema_name = pgns.nspname AND ch.hypertable_id = htid
+ WHERE
+ ch.dropped IS FALSE
+ AND (
+ ch.status = 0 OR
+ (
+ ch.status & bit_compressed > 0 AND (
+ ch.status & bit_compressed_unordered > 0 OR
+ ch.status & bit_compressed_partial > 0
+ )
+ )
+ )
+ LOOP
+ IF chunk_rec.status = 0 THEN
+ BEGIN
+ PERFORM @extschema@.compress_chunk( chunk_rec.oid );
+ EXCEPTION WHEN OTHERS THEN
+ GET STACKED DIAGNOSTICS
+ _message = MESSAGE_TEXT,
+ _detail = PG_EXCEPTION_DETAIL;
+ RAISE WARNING 'compressing chunk "%" failed when compression policy is executed', chunk_rec.oid::regclass::text
+ USING DETAIL = format('Message: (%s), Detail: (%s).', _message, _detail),
+ ERRCODE = sqlstate;
+ END;
+ ELSIF
+ (
+ chunk_rec.status & bit_compressed > 0 AND (
+ chunk_rec.status & bit_compressed_unordered > 0 OR
+ chunk_rec.status & bit_compressed_partial > 0
+ )
+ ) AND recompress_enabled IS TRUE THEN
+ BEGIN
+ PERFORM @extschema@.decompress_chunk(chunk_rec.oid, if_compressed => true);
+ EXCEPTION WHEN OTHERS THEN
+ RAISE WARNING 'decompressing chunk "%" failed when compression policy is executed', chunk_rec.oid::regclass::text
+ USING DETAIL = format('Message: (%s), Detail: (%s).', _message, _detail),
+ ERRCODE = sqlstate;
+ END;
+ -- SET LOCAL is only active until end of transaction.
+ -- While we could use SET at the start of the function we do not
+ -- want to bleed out search_path to caller, so we do SET LOCAL
+ -- again after COMMIT
+ BEGIN
+ PERFORM @extschema@.compress_chunk(chunk_rec.oid);
+ EXCEPTION WHEN OTHERS THEN
+ RAISE WARNING 'compressing chunk "%" failed when compression policy is executed', chunk_rec.oid::regclass::text
+ USING DETAIL = format('Message: (%s), Detail: (%s).', _message, _detail),
+ ERRCODE = sqlstate;
+ END;
+ END IF;
+ COMMIT;
+ -- SET LOCAL is only active until end of transaction.
+ -- While we could use SET at the start of the function we do not
+ -- want to bleed out search_path to caller, so we do SET LOCAL
+ -- again after COMMIT
+ SET LOCAL search_path TO pg_catalog, pg_temp;
+ IF verbose_log THEN
+ RAISE LOG 'job % completed processing chunk %.%', job_id, chunk_rec.schema_name, chunk_rec.table_name;
+ END IF;
+ numchunks := numchunks + 1;
+ IF maxchunks > 0 AND numchunks >= maxchunks THEN
+ EXIT;
+ END IF;
+ END LOOP;
+END;
+$$ LANGUAGE PLPGSQL;
+
+-- fix atttypmod and attcollation for segmentby columns
+DO $$
+DECLARE
+ htc_id INTEGER;
+ htc REGCLASS;
+ _attname NAME;
+ _atttypmod INTEGER;
+ _attcollation OID;
+BEGIN
+ -- find any segmentby columns where typmod and collation in
+ -- the compressed hypertable does not match the uncompressed
+ -- hypertable values
+ FOR htc_id, htc, _attname, _atttypmod, _attcollation IN
+ SELECT cat.htc_id, cat.htc, pga.attname, ht_mod, ht_coll
+ FROM pg_attribute pga
+ INNER JOIN
+ (
+ SELECT
+ htc.id AS htc_id,
+ format('%I.%I',htc.schema_name,htc.table_name) AS htc,
+ att_ht.atttypmod AS ht_mod,
+ att_ht.attcollation AS ht_coll,
+ c.attname
+ FROM _timescaledb_catalog.hypertable_compression c
+ INNER JOIN _timescaledb_catalog.hypertable ht ON ht.id=c.hypertable_id
+ INNER JOIN pg_attribute att_ht ON att_ht.attname = c.attname AND att_ht.attrelid = format('%I.%I',ht.schema_name,ht.table_name)::regclass
+ INNER JOIN _timescaledb_catalog.hypertable htc ON htc.id=ht.compressed_hypertable_id
+ WHERE c.segmentby_column_index > 0
+ ) cat ON cat.htc::regclass = pga.attrelid AND cat.attname = pga.attname
+ WHERE pga.atttypmod <> ht_mod OR pga.attcollation <> ht_coll
+ LOOP
+ -- fix typmod and collation for the compressed hypertable and all compressed chunks
+ UPDATE pg_attribute SET atttypmod = _atttypmod, attcollation = _attcollation WHERE attname = _attname AND attrelid IN (
+ SELECT format('%I.%I',schema_name,table_name)::regclass from _timescaledb_catalog.chunk WHERE hypertable_id = htc_id AND NOT dropped UNION ALL SELECT htc
+ );
+ END LOOP;
+END
+$$;
diff --git a/sql/updates/latest-dev.sql b/sql/updates/latest-dev.sql
index a99b15de009..e69de29bb2d 100644
--- a/sql/updates/latest-dev.sql
+++ b/sql/updates/latest-dev.sql
@@ -1,444 +0,0 @@
-CREATE TYPE _timescaledb_internal.dimension_info;
-
-CREATE OR REPLACE FUNCTION _timescaledb_functions.dimension_info_in(cstring)
- RETURNS _timescaledb_internal.dimension_info
- LANGUAGE C STRICT IMMUTABLE
- AS '@MODULE_PATHNAME@', 'ts_dimension_info_in';
-
-CREATE OR REPLACE FUNCTION _timescaledb_functions.dimension_info_out(_timescaledb_internal.dimension_info)
- RETURNS cstring
- LANGUAGE C STRICT IMMUTABLE
- AS '@MODULE_PATHNAME@', 'ts_dimension_info_out';
-
-CREATE TYPE _timescaledb_internal.dimension_info (
- INPUT = _timescaledb_functions.dimension_info_in,
- OUTPUT = _timescaledb_functions.dimension_info_out,
- INTERNALLENGTH = VARIABLE
-);
-
-CREATE FUNCTION @extschema@.create_hypertable(
- relation REGCLASS,
- dimension _timescaledb_internal.dimension_info,
- create_default_indexes BOOLEAN = TRUE,
- if_not_exists BOOLEAN = FALSE,
- migrate_data BOOLEAN = FALSE
-) RETURNS TABLE(hypertable_id INT, created BOOL) AS '@MODULE_PATHNAME@', 'ts_hypertable_create_general' LANGUAGE C VOLATILE;
-
-CREATE FUNCTION @extschema@.add_dimension(
- hypertable REGCLASS,
- dimension _timescaledb_internal.dimension_info,
- if_not_exists BOOLEAN = FALSE
-) RETURNS TABLE(dimension_id INT, created BOOL)
-AS '@MODULE_PATHNAME@', 'ts_dimension_add_general' LANGUAGE C VOLATILE;
-
-CREATE FUNCTION @extschema@.set_partitioning_interval(
- hypertable REGCLASS,
- partition_interval ANYELEMENT,
- dimension_name NAME = NULL
-) RETURNS VOID AS '@MODULE_PATHNAME@', 'ts_dimension_set_interval' LANGUAGE C VOLATILE;
-
-CREATE FUNCTION @extschema@.by_hash(column_name NAME, number_partitions INTEGER,
- partition_func regproc = NULL)
- RETURNS _timescaledb_internal.dimension_info LANGUAGE C
- AS '@MODULE_PATHNAME@', 'ts_hash_dimension';
-
-CREATE FUNCTION @extschema@.by_range(column_name NAME,
- partition_interval ANYELEMENT = NULL::bigint,
- partition_func regproc = NULL)
- RETURNS _timescaledb_internal.dimension_info LANGUAGE C
- AS '@MODULE_PATHNAME@', 'ts_range_dimension';
-
---
--- Rebuild the catalog table `_timescaledb_catalog.chunk` to
--- add new column `creation_time`
---
-CREATE TABLE _timescaledb_internal.chunk_tmp
-AS SELECT * from _timescaledb_catalog.chunk;
-
-CREATE TABLE _timescaledb_internal.tmp_chunk_seq_value AS
-SELECT last_value, is_called FROM _timescaledb_catalog.chunk_id_seq;
-
---drop foreign keys on chunk table
-ALTER TABLE _timescaledb_catalog.chunk_constraint DROP CONSTRAINT
-chunk_constraint_chunk_id_fkey;
-ALTER TABLE _timescaledb_catalog.chunk_index DROP CONSTRAINT
-chunk_index_chunk_id_fkey;
-ALTER TABLE _timescaledb_catalog.chunk_data_node DROP CONSTRAINT
-chunk_data_node_chunk_id_fkey;
-ALTER TABLE _timescaledb_internal.bgw_policy_chunk_stats DROP CONSTRAINT
-bgw_policy_chunk_stats_chunk_id_fkey;
-ALTER TABLE _timescaledb_catalog.compression_chunk_size DROP CONSTRAINT
-compression_chunk_size_chunk_id_fkey;
-ALTER TABLE _timescaledb_catalog.compression_chunk_size DROP CONSTRAINT
-compression_chunk_size_compressed_chunk_id_fkey;
-ALTER TABLE _timescaledb_catalog.chunk_copy_operation DROP CONSTRAINT
-chunk_copy_operation_chunk_id_fkey;
-
---drop dependent views
-DROP VIEW IF EXISTS timescaledb_information.hypertables;
-DROP VIEW IF EXISTS timescaledb_information.chunks;
-DROP VIEW IF EXISTS _timescaledb_internal.hypertable_chunk_local_size;
-DROP VIEW IF EXISTS _timescaledb_internal.compressed_chunk_stats;
-DROP VIEW IF EXISTS timescaledb_experimental.chunk_replication_status;
-
-ALTER EXTENSION timescaledb DROP TABLE _timescaledb_catalog.chunk;
-ALTER EXTENSION timescaledb DROP SEQUENCE _timescaledb_catalog.chunk_id_seq;
-DROP TABLE _timescaledb_catalog.chunk;
-
-CREATE SEQUENCE _timescaledb_catalog.chunk_id_seq MINVALUE 1;
-
--- now create table without self referential foreign key
-CREATE TABLE _timescaledb_catalog.chunk (
- id integer NOT NULL DEFAULT nextval('_timescaledb_catalog.chunk_id_seq'),
- hypertable_id int NOT NULL,
- schema_name name NOT NULL,
- table_name name NOT NULL,
- compressed_chunk_id integer ,
- dropped boolean NOT NULL DEFAULT FALSE,
- status integer NOT NULL DEFAULT 0,
- osm_chunk boolean NOT NULL DEFAULT FALSE,
- creation_time timestamptz,
- -- table constraints
- CONSTRAINT chunk_pkey PRIMARY KEY (id),
- CONSTRAINT chunk_schema_name_table_name_key UNIQUE (schema_name, table_name)
-);
-
-INSERT INTO _timescaledb_catalog.chunk
-( id, hypertable_id, schema_name, table_name,
- compressed_chunk_id, dropped, status, osm_chunk)
-SELECT id, hypertable_id, schema_name, table_name,
- compressed_chunk_id, dropped, status, osm_chunk
-FROM _timescaledb_internal.chunk_tmp;
-
--- update creation_time for chunks
-UPDATE
- _timescaledb_catalog.chunk c
-SET
- creation_time = (pg_catalog.pg_stat_file(pg_catalog.pg_relation_filepath(r.oid))).modification
-FROM
- pg_class r, pg_namespace n
-WHERE
- r.relnamespace = n.oid
- AND r.relname = c.table_name
- AND n.nspname = c.schema_name
- AND r.relkind = 'r'
- AND c.dropped IS FALSE;
-
--- Make sure that there are no record with empty creation time
-UPDATE _timescaledb_catalog.chunk SET creation_time = now() WHERE creation_time IS NULL;
-
---add indexes to the chunk table
-CREATE INDEX chunk_hypertable_id_idx ON _timescaledb_catalog.chunk (hypertable_id);
-CREATE INDEX chunk_compressed_chunk_id_idx ON _timescaledb_catalog.chunk (compressed_chunk_id);
-CREATE INDEX chunk_osm_chunk_idx ON _timescaledb_catalog.chunk (osm_chunk, hypertable_id);
-CREATE INDEX chunk_hypertable_id_creation_time_idx ON _timescaledb_catalog.chunk(hypertable_id, creation_time);
-
-ALTER SEQUENCE _timescaledb_catalog.chunk_id_seq OWNED BY _timescaledb_catalog.chunk.id;
-SELECT setval('_timescaledb_catalog.chunk_id_seq', last_value, is_called) FROM _timescaledb_internal.tmp_chunk_seq_value;
-
--- add self referential foreign key
-ALTER TABLE _timescaledb_catalog.chunk ADD CONSTRAINT chunk_compressed_chunk_id_fkey FOREIGN KEY ( compressed_chunk_id )
- REFERENCES _timescaledb_catalog.chunk( id );
-
---add foreign key constraint
-ALTER TABLE _timescaledb_catalog.chunk
- ADD CONSTRAINT chunk_hypertable_id_fkey
- FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable (id);
-
-SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.chunk', '');
-SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.chunk_id_seq', '');
-
--- Add non-null constraint
-ALTER TABLE _timescaledb_catalog.chunk
- ALTER COLUMN creation_time SET NOT NULL;
-
---add the foreign key constraints
-ALTER TABLE _timescaledb_catalog.chunk_constraint ADD CONSTRAINT
-chunk_constraint_chunk_id_fkey FOREIGN KEY (chunk_id) REFERENCES _timescaledb_catalog.chunk(id);
-ALTER TABLE _timescaledb_catalog.chunk_index ADD CONSTRAINT
-chunk_index_chunk_id_fkey FOREIGN KEY (chunk_id)
-REFERENCES _timescaledb_catalog.chunk(id) ON DELETE CASCADE;
-ALTER TABLE _timescaledb_catalog.chunk_data_node ADD CONSTRAINT
-chunk_data_node_chunk_id_fkey FOREIGN KEY (chunk_id) REFERENCES _timescaledb_catalog.chunk(id);
-ALTER TABLE _timescaledb_internal.bgw_policy_chunk_stats ADD CONSTRAINT
-bgw_policy_chunk_stats_chunk_id_fkey FOREIGN KEY (chunk_id)
-REFERENCES _timescaledb_catalog.chunk(id) ON DELETE CASCADE;
-ALTER TABLE _timescaledb_catalog.compression_chunk_size ADD CONSTRAINT
-compression_chunk_size_chunk_id_fkey FOREIGN KEY (chunk_id)
-REFERENCES _timescaledb_catalog.chunk(id) ON DELETE CASCADE;
-ALTER TABLE _timescaledb_catalog.compression_chunk_size ADD CONSTRAINT
-compression_chunk_size_compressed_chunk_id_fkey FOREIGN KEY (compressed_chunk_id)
-REFERENCES _timescaledb_catalog.chunk(id) ON DELETE CASCADE;
-ALTER TABLE _timescaledb_catalog.chunk_copy_operation ADD CONSTRAINT
-chunk_copy_operation_chunk_id_fkey FOREIGN KEY (chunk_id) REFERENCES _timescaledb_catalog.chunk (id) ON DELETE CASCADE;
-
---cleanup
-DROP TABLE _timescaledb_internal.chunk_tmp;
-DROP TABLE _timescaledb_internal.tmp_chunk_seq_value;
-
-GRANT SELECT ON _timescaledb_catalog.chunk_id_seq TO PUBLIC;
-GRANT SELECT ON _timescaledb_catalog.chunk TO PUBLIC;
--- end recreate _timescaledb_catalog.chunk table --
-
---
--- Rebuild the catalog table `_timescaledb_catalog.compression_chunk_size` to
--- add new column `numrows_frozen_immediately`
---
-CREATE TABLE _timescaledb_internal.compression_chunk_size_tmp
- AS SELECT * from _timescaledb_catalog.compression_chunk_size;
-
--- Drop depended views
--- We assume that '_timescaledb_internal.compressed_chunk_stats' was already dropped in this update
--- (see above)
-
--- Drop table
-ALTER EXTENSION timescaledb DROP TABLE _timescaledb_catalog.compression_chunk_size;
-DROP TABLE _timescaledb_catalog.compression_chunk_size;
-
-CREATE TABLE _timescaledb_catalog.compression_chunk_size (
- chunk_id integer NOT NULL,
- compressed_chunk_id integer NOT NULL,
- uncompressed_heap_size bigint NOT NULL,
- uncompressed_toast_size bigint NOT NULL,
- uncompressed_index_size bigint NOT NULL,
- compressed_heap_size bigint NOT NULL,
- compressed_toast_size bigint NOT NULL,
- compressed_index_size bigint NOT NULL,
- numrows_pre_compression bigint,
- numrows_post_compression bigint,
- numrows_frozen_immediately bigint,
- -- table constraints
- CONSTRAINT compression_chunk_size_pkey PRIMARY KEY (chunk_id),
- CONSTRAINT compression_chunk_size_chunk_id_fkey FOREIGN KEY (chunk_id) REFERENCES _timescaledb_catalog.chunk (id) ON DELETE CASCADE,
- CONSTRAINT compression_chunk_size_compressed_chunk_id_fkey FOREIGN KEY (compressed_chunk_id) REFERENCES _timescaledb_catalog.chunk (id) ON DELETE CASCADE
-);
-
-INSERT INTO _timescaledb_catalog.compression_chunk_size
-(chunk_id, compressed_chunk_id, uncompressed_heap_size, uncompressed_toast_size,
- uncompressed_index_size, compressed_heap_size, compressed_toast_size,
- compressed_index_size, numrows_pre_compression, numrows_post_compression, numrows_frozen_immediately)
-SELECT chunk_id, compressed_chunk_id, uncompressed_heap_size, uncompressed_toast_size,
- uncompressed_index_size, compressed_heap_size, compressed_toast_size,
- compressed_index_size, numrows_pre_compression, numrows_post_compression, 0
-FROM _timescaledb_internal.compression_chunk_size_tmp;
-
-DROP TABLE _timescaledb_internal.compression_chunk_size_tmp;
-
-SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.compression_chunk_size', '');
-
-GRANT SELECT ON _timescaledb_catalog.compression_chunk_size TO PUBLIC;
-
--- End modify `_timescaledb_catalog.compression_chunk_size`
-
-DROP FUNCTION @extschema@.drop_chunks(REGCLASS, "any", "any", BOOL);
-CREATE FUNCTION @extschema@.drop_chunks(
- relation REGCLASS,
- older_than "any" = NULL,
- newer_than "any" = NULL,
- verbose BOOLEAN = FALSE,
- created_before "any" = NULL,
- created_after "any" = NULL
- ) RETURNS SETOF TEXT AS '@MODULE_PATHNAME@', 'ts_chunk_drop_chunks'
- LANGUAGE C VOLATILE PARALLEL UNSAFE;
-
-DROP FUNCTION @extschema@.show_chunks(REGCLASS, "any", "any");
-CREATE FUNCTION @extschema@.show_chunks(
- relation REGCLASS,
- older_than "any" = NULL,
- newer_than "any" = NULL,
- created_before "any" = NULL,
- created_after "any" = NULL
- ) RETURNS SETOF REGCLASS AS '@MODULE_PATHNAME@', 'ts_chunk_show_chunks'
- LANGUAGE C STABLE PARALLEL SAFE;
-
-DROP FUNCTION @extschema@.add_retention_policy(REGCLASS, "any", BOOL, INTERVAL, TIMESTAMPTZ, TEXT);
-CREATE FUNCTION @extschema@.add_retention_policy(
- relation REGCLASS,
- drop_after "any" = NULL,
- if_not_exists BOOL = false,
- schedule_interval INTERVAL = NULL,
- initial_start TIMESTAMPTZ = NULL,
- timezone TEXT = NULL,
- drop_created_before INTERVAL = NULL
-)
-RETURNS INTEGER AS '@MODULE_PATHNAME@', 'ts_policy_retention_add'
-LANGUAGE C VOLATILE;
-
-DROP FUNCTION @extschema@.add_compression_policy(REGCLASS, "any", BOOL, INTERVAL, TIMESTAMPTZ, TEXT);
-CREATE FUNCTION @extschema@.add_compression_policy(
- hypertable REGCLASS,
- compress_after "any" = NULL,
- if_not_exists BOOL = false,
- schedule_interval INTERVAL = NULL,
- initial_start TIMESTAMPTZ = NULL,
- timezone TEXT = NULL,
- compress_created_before INTERVAL = NULL
-)
-RETURNS INTEGER
-AS '@MODULE_PATHNAME@', 'ts_policy_compression_add'
-LANGUAGE C VOLATILE;
-
-DROP PROCEDURE IF EXISTS _timescaledb_functions.policy_compression_execute(INTEGER, INTEGER, ANYELEMENT, INTEGER, BOOLEAN, BOOLEAN);
-DROP PROCEDURE IF EXISTS _timescaledb_internal.policy_compression_execute(INTEGER, INTEGER, ANYELEMENT, INTEGER, BOOLEAN, BOOLEAN);
-CREATE PROCEDURE
-_timescaledb_functions.policy_compression_execute(
- job_id INTEGER,
- htid INTEGER,
- lag ANYELEMENT,
- maxchunks INTEGER,
- verbose_log BOOLEAN,
- recompress_enabled BOOLEAN,
- use_creation_time BOOLEAN)
-AS $$
-DECLARE
- htoid REGCLASS;
- chunk_rec RECORD;
- numchunks INTEGER := 1;
- _message text;
- _detail text;
- -- chunk status bits:
- bit_compressed int := 1;
- bit_compressed_unordered int := 2;
- bit_frozen int := 4;
- bit_compressed_partial int := 8;
- creation_lag INTERVAL := NULL;
-BEGIN
-
- -- procedures with SET clause cannot execute transaction
- -- control so we adjust search_path in procedure body
- SET LOCAL search_path TO pg_catalog, pg_temp;
-
- SELECT format('%I.%I', schema_name, table_name) INTO htoid
- FROM _timescaledb_catalog.hypertable
- WHERE id = htid;
-
- -- for the integer cases, we have to compute the lag w.r.t
- -- the integer_now function and then pass on to show_chunks
- IF pg_typeof(lag) IN ('BIGINT'::regtype, 'INTEGER'::regtype, 'SMALLINT'::regtype) THEN
- -- cannot have use_creation_time set with this
- IF use_creation_time IS TRUE THEN
- RAISE EXCEPTION 'job % cannot use creation time with integer_now function', job_id;
- END IF;
- lag := _timescaledb_functions.subtract_integer_from_now(htoid, lag::BIGINT);
- END IF;
-
- -- if use_creation_time has been specified then the lag needs to be used with the
- -- "compress_created_before" argument. Otherwise the usual "older_than" argument
- -- is good enough
- IF use_creation_time IS TRUE THEN
- creation_lag := lag;
- lag := NULL;
- END IF;
-
- FOR chunk_rec IN
- SELECT
- show.oid, ch.schema_name, ch.table_name, ch.status
- FROM
- @extschema@.show_chunks(htoid, older_than => lag, created_before => creation_lag) AS show(oid)
- INNER JOIN pg_class pgc ON pgc.oid = show.oid
- INNER JOIN pg_namespace pgns ON pgc.relnamespace = pgns.oid
- INNER JOIN _timescaledb_catalog.chunk ch ON ch.table_name = pgc.relname AND ch.schema_name = pgns.nspname AND ch.hypertable_id = htid
- WHERE
- ch.dropped IS FALSE
- AND (
- ch.status = 0 OR
- (
- ch.status & bit_compressed > 0 AND (
- ch.status & bit_compressed_unordered > 0 OR
- ch.status & bit_compressed_partial > 0
- )
- )
- )
- LOOP
- IF chunk_rec.status = 0 THEN
- BEGIN
- PERFORM @extschema@.compress_chunk( chunk_rec.oid );
- EXCEPTION WHEN OTHERS THEN
- GET STACKED DIAGNOSTICS
- _message = MESSAGE_TEXT,
- _detail = PG_EXCEPTION_DETAIL;
- RAISE WARNING 'compressing chunk "%" failed when compression policy is executed', chunk_rec.oid::regclass::text
- USING DETAIL = format('Message: (%s), Detail: (%s).', _message, _detail),
- ERRCODE = sqlstate;
- END;
- ELSIF
- (
- chunk_rec.status & bit_compressed > 0 AND (
- chunk_rec.status & bit_compressed_unordered > 0 OR
- chunk_rec.status & bit_compressed_partial > 0
- )
- ) AND recompress_enabled IS TRUE THEN
- BEGIN
- PERFORM @extschema@.decompress_chunk(chunk_rec.oid, if_compressed => true);
- EXCEPTION WHEN OTHERS THEN
- RAISE WARNING 'decompressing chunk "%" failed when compression policy is executed', chunk_rec.oid::regclass::text
- USING DETAIL = format('Message: (%s), Detail: (%s).', _message, _detail),
- ERRCODE = sqlstate;
- END;
- -- SET LOCAL is only active until end of transaction.
- -- While we could use SET at the start of the function we do not
- -- want to bleed out search_path to caller, so we do SET LOCAL
- -- again after COMMIT
- BEGIN
- PERFORM @extschema@.compress_chunk(chunk_rec.oid);
- EXCEPTION WHEN OTHERS THEN
- RAISE WARNING 'compressing chunk "%" failed when compression policy is executed', chunk_rec.oid::regclass::text
- USING DETAIL = format('Message: (%s), Detail: (%s).', _message, _detail),
- ERRCODE = sqlstate;
- END;
- END IF;
- COMMIT;
- -- SET LOCAL is only active until end of transaction.
- -- While we could use SET at the start of the function we do not
- -- want to bleed out search_path to caller, so we do SET LOCAL
- -- again after COMMIT
- SET LOCAL search_path TO pg_catalog, pg_temp;
- IF verbose_log THEN
- RAISE LOG 'job % completed processing chunk %.%', job_id, chunk_rec.schema_name, chunk_rec.table_name;
- END IF;
- numchunks := numchunks + 1;
- IF maxchunks > 0 AND numchunks >= maxchunks THEN
- EXIT;
- END IF;
- END LOOP;
-END;
-$$ LANGUAGE PLPGSQL;
-
--- fix atttypmod and attcollation for segmentby columns
-DO $$
-DECLARE
- htc_id INTEGER;
- htc REGCLASS;
- _attname NAME;
- _atttypmod INTEGER;
- _attcollation OID;
-BEGIN
- -- find any segmentby columns where typmod and collation in
- -- the compressed hypertable does not match the uncompressed
- -- hypertable values
- FOR htc_id, htc, _attname, _atttypmod, _attcollation IN
- SELECT cat.htc_id, cat.htc, pga.attname, ht_mod, ht_coll
- FROM pg_attribute pga
- INNER JOIN
- (
- SELECT
- htc.id AS htc_id,
- format('%I.%I',htc.schema_name,htc.table_name) AS htc,
- att_ht.atttypmod AS ht_mod,
- att_ht.attcollation AS ht_coll,
- c.attname
- FROM _timescaledb_catalog.hypertable_compression c
- INNER JOIN _timescaledb_catalog.hypertable ht ON ht.id=c.hypertable_id
- INNER JOIN pg_attribute att_ht ON att_ht.attname = c.attname AND att_ht.attrelid = format('%I.%I',ht.schema_name,ht.table_name)::regclass
- INNER JOIN _timescaledb_catalog.hypertable htc ON htc.id=ht.compressed_hypertable_id
- WHERE c.segmentby_column_index > 0
- ) cat ON cat.htc::regclass = pga.attrelid AND cat.attname = pga.attname
- WHERE pga.atttypmod <> ht_mod OR pga.attcollation <> ht_coll
- LOOP
- -- fix typmod and collation for the compressed hypertable and all compressed chunks
- UPDATE pg_attribute SET atttypmod = _atttypmod, attcollation = _attcollation WHERE attname = _attname AND attrelid IN (
- SELECT format('%I.%I',schema_name,table_name)::regclass from _timescaledb_catalog.chunk WHERE hypertable_id = htc_id AND NOT dropped UNION ALL SELECT htc
- );
- END LOOP;
-END
-$$;
diff --git a/version.config b/version.config
index bfaac2bac53..240850e13d0 100644
--- a/version.config
+++ b/version.config
@@ -1,3 +1,3 @@
-version = 2.13.0-dev
-update_from_version = 2.12.2
+version = 2.14.0-dev
+update_from_version = 2.13.0
downgrade_to_version = 2.12.2