From d7c8e3c15310ca9d0de372145340f8d15a14947d Mon Sep 17 00:00:00 2001
From: Mats Kindahl <mats@timescale.com>
Date: Wed, 27 Nov 2024 08:23:02 +0100
Subject: [PATCH 1/8] Remove backward direction for ABI tests

Versions of the extension cannot be expected to always work in the
backwards direction so we cannot reliably test that builds against
later versions of PostgreSQL (e.g., 17.3) work with earlier versions
(e.g., 17.0) if new functions are introduced in the later version.
---
 .github/workflows/abi.yaml | 37 ++++++-------------------------------
 1 file changed, 6 insertions(+), 31 deletions(-)

diff --git a/.github/workflows/abi.yaml b/.github/workflows/abi.yaml
index d3b34607193..b4240ab1afb 100644
--- a/.github/workflows/abi.yaml
+++ b/.github/workflows/abi.yaml
@@ -44,43 +44,18 @@ jobs:
     strategy:
       fail-fast: false
       matrix:
-        dir: [ "forward", "backward" ]
         pg: [ 14, 15, 16, 17 ]
         include:
-          - dir: backward
-            pg: 14
-            builder: ${{ fromJson(needs.config.outputs.pg14_latest) }}-alpine3.19
-            tester: ${{ fromJson(needs.config.outputs.pg14_abi_min) }}-alpine
-            ignores: memoize
-          - dir: forward
-            pg: 14
+          - pg: 14
             builder: ${{ fromJson(needs.config.outputs.pg14_abi_min) }}-alpine
             tester: ${{ fromJson(needs.config.outputs.pg14_latest) }}-alpine3.19
-          - dir: backward
-            pg: 15
-            builder: ${{ fromJson(needs.config.outputs.pg15_latest) }}-alpine3.19
-            tester: ${{ fromJson(needs.config.outputs.pg15_abi_min) }}-alpine
-          - dir: forward
-            pg: 15
+          - pg: 15
             builder: ${{ fromJson(needs.config.outputs.pg15_abi_min) }}-alpine
             tester: ${{ fromJson(needs.config.outputs.pg15_latest) }}-alpine3.19
-          - dir: backward
-            pg: 16
-            builder: ${{ fromJson(needs.config.outputs.pg16_latest) }}-alpine3.19
-            tester: ${{ fromJson(needs.config.outputs.pg16_abi_min) }}-alpine
-            # this test has issues with 16.0 version of pg_dump binary
-            # which affects backwards test only 
-            ignores: pg_dump_unprivileged
-          - dir: forward
-            pg: 16
+          - pg: 16
             builder: ${{ fromJson(needs.config.outputs.pg16_abi_min) }}-alpine
             tester: ${{ fromJson(needs.config.outputs.pg16_latest) }}-alpine3.19
-          - dir: backward
-            pg: 17
-            builder: ${{ fromJson(needs.config.outputs.pg17_latest) }}-alpine3.19
-            tester: ${{ fromJson(needs.config.outputs.pg17_abi_min) }}-alpine
-          - dir: forward
-            pg: 17
+          - pg: 17
             builder: ${{ fromJson(needs.config.outputs.pg17_abi_min) }}-alpine
             tester: ${{ fromJson(needs.config.outputs.pg17_latest) }}-alpine3.19
 
@@ -89,7 +64,7 @@ jobs:
     - name: Checkout TimescaleDB
       uses: actions/checkout@v4
 
-    - name: Build extension
+    - name: Build extension with ${{ matrix.builder }}
       run: |
         BUILDER_IMAGE="postgres:${{matrix.builder}}"
 
@@ -113,7 +88,7 @@ jobs:
           cp `pg_config --pkglibdir`/timescaledb*.so build_abi/install_lib
         EOF
 
-    - name: Run tests
+    - name: Run tests on server ${{ matrix.tester }}
       run: |
         TEST_IMAGE="postgres:${{ matrix.tester }}"
 

From 7ba7e30e268b986b5341edafcb74ac201a8531c5 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Fabr=C3=ADzio=20de=20Royes=20Mello?=
 <fabriziomello@gmail.com>
Date: Fri, 29 Nov 2024 17:27:32 -0300
Subject: [PATCH 2/8] Warning for the old CAggs format in all PG versions

---
 sql/updates/post-update.sql | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/sql/updates/post-update.sql b/sql/updates/post-update.sql
index 0dd2d8a8fc7..b682305ea39 100644
--- a/sql/updates/post-update.sql
+++ b/sql/updates/post-update.sql
@@ -144,10 +144,9 @@ BEGIN
         format('%I.%I', user_view_schema, user_view_name)
       FROM _timescaledb_catalog.continuous_agg
       WHERE finalized IS FALSE
-      AND current_setting('server_version_num')::int >= 150000
       ORDER BY 1
     LOOP
-      RAISE WARNING 'Continuous Aggregate: % with old format will not be supported on PostgreSQL version greater or equal to 15. You should upgrade to the new format', cagg_name;
+      RAISE WARNING 'Continuous Aggregate "%" with old format will not be supported in the next version. You should use `cagg_migrate` procedure to migrate to the new format.', cagg_name;
     END LOOP;
 END $$;
 

From 155ca6f7ef2925735c7063cd9178edd185c17009 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Fabr=C3=ADzio=20de=20Royes=20Mello?=
 <fabriziomello@gmail.com>
Date: Fri, 29 Nov 2024 16:09:25 -0300
Subject: [PATCH 3/8] Fix CAgg permissions regression leftovers

We missed to re-enable ON_ERROR_STOP after some failed tests.
---
 tsl/test/expected/cagg_permissions-14.out | 4 ++++
 tsl/test/expected/cagg_permissions-15.out | 4 ++++
 tsl/test/expected/cagg_permissions-16.out | 4 ++++
 tsl/test/expected/cagg_permissions-17.out | 4 ++++
 tsl/test/sql/cagg_permissions.sql.in      | 4 ++++
 5 files changed, 20 insertions(+)

diff --git a/tsl/test/expected/cagg_permissions-14.out b/tsl/test/expected/cagg_permissions-14.out
index 3b2f6f4bc1b..04c399acbba 100644
--- a/tsl/test/expected/cagg_permissions-14.out
+++ b/tsl/test/expected/cagg_permissions-14.out
@@ -64,6 +64,7 @@ CREATE USER not_priv;
 \set ON_ERROR_STOP 0
 CREATE INDEX cagg_idx on mat_refresh_test(humidity);
 ERROR:  must be owner of hypertable "_materialized_hypertable_2"
+\set ON_ERROR_STOP 1
 \c :TEST_DBNAME :ROLE_SUPERUSER
 DROP USER not_priv;
 \c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER
@@ -193,6 +194,7 @@ group by time_bucket(100, timec), location WITH NO DATA;
 CALL refresh_continuous_aggregate('mat_perm_view_test', NULL, NULL);
 ERROR:  permission denied for function get_constant
 DROP MATERIALIZED VIEW mat_perm_view_test;
+\set ON_ERROR_STOP 1
 --can create a mat view on something with select and trigger grants
 CREATE MATERIALIZED VIEW mat_perm_view_test
 WITH ( timescaledb.continuous, timescaledb.materialized_only=true)
@@ -213,9 +215,11 @@ REVOKE SELECT ON conditions_for_perm_check_w_grant FROM public;
 insert into conditions_for_perm_check_w_grant
 select generate_series(100, 130, 10), 'POR', 65, 85, 30, 90, NULL;
 \c  :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2
+\set ON_ERROR_STOP 0
 --refresh mat view should now fail due to lack of permissions
 CALL refresh_continuous_aggregate('mat_perm_view_test', NULL, NULL);
 ERROR:  permission denied for table conditions_for_perm_check_w_grant
+\set ON_ERROR_STOP 1
 --but the old data will still be there
 SELECT * FROM mat_perm_view_test;
  location | max 
diff --git a/tsl/test/expected/cagg_permissions-15.out b/tsl/test/expected/cagg_permissions-15.out
index 3b2f6f4bc1b..04c399acbba 100644
--- a/tsl/test/expected/cagg_permissions-15.out
+++ b/tsl/test/expected/cagg_permissions-15.out
@@ -64,6 +64,7 @@ CREATE USER not_priv;
 \set ON_ERROR_STOP 0
 CREATE INDEX cagg_idx on mat_refresh_test(humidity);
 ERROR:  must be owner of hypertable "_materialized_hypertable_2"
+\set ON_ERROR_STOP 1
 \c :TEST_DBNAME :ROLE_SUPERUSER
 DROP USER not_priv;
 \c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER
@@ -193,6 +194,7 @@ group by time_bucket(100, timec), location WITH NO DATA;
 CALL refresh_continuous_aggregate('mat_perm_view_test', NULL, NULL);
 ERROR:  permission denied for function get_constant
 DROP MATERIALIZED VIEW mat_perm_view_test;
+\set ON_ERROR_STOP 1
 --can create a mat view on something with select and trigger grants
 CREATE MATERIALIZED VIEW mat_perm_view_test
 WITH ( timescaledb.continuous, timescaledb.materialized_only=true)
@@ -213,9 +215,11 @@ REVOKE SELECT ON conditions_for_perm_check_w_grant FROM public;
 insert into conditions_for_perm_check_w_grant
 select generate_series(100, 130, 10), 'POR', 65, 85, 30, 90, NULL;
 \c  :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2
+\set ON_ERROR_STOP 0
 --refresh mat view should now fail due to lack of permissions
 CALL refresh_continuous_aggregate('mat_perm_view_test', NULL, NULL);
 ERROR:  permission denied for table conditions_for_perm_check_w_grant
+\set ON_ERROR_STOP 1
 --but the old data will still be there
 SELECT * FROM mat_perm_view_test;
  location | max 
diff --git a/tsl/test/expected/cagg_permissions-16.out b/tsl/test/expected/cagg_permissions-16.out
index 3b2f6f4bc1b..04c399acbba 100644
--- a/tsl/test/expected/cagg_permissions-16.out
+++ b/tsl/test/expected/cagg_permissions-16.out
@@ -64,6 +64,7 @@ CREATE USER not_priv;
 \set ON_ERROR_STOP 0
 CREATE INDEX cagg_idx on mat_refresh_test(humidity);
 ERROR:  must be owner of hypertable "_materialized_hypertable_2"
+\set ON_ERROR_STOP 1
 \c :TEST_DBNAME :ROLE_SUPERUSER
 DROP USER not_priv;
 \c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER
@@ -193,6 +194,7 @@ group by time_bucket(100, timec), location WITH NO DATA;
 CALL refresh_continuous_aggregate('mat_perm_view_test', NULL, NULL);
 ERROR:  permission denied for function get_constant
 DROP MATERIALIZED VIEW mat_perm_view_test;
+\set ON_ERROR_STOP 1
 --can create a mat view on something with select and trigger grants
 CREATE MATERIALIZED VIEW mat_perm_view_test
 WITH ( timescaledb.continuous, timescaledb.materialized_only=true)
@@ -213,9 +215,11 @@ REVOKE SELECT ON conditions_for_perm_check_w_grant FROM public;
 insert into conditions_for_perm_check_w_grant
 select generate_series(100, 130, 10), 'POR', 65, 85, 30, 90, NULL;
 \c  :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2
+\set ON_ERROR_STOP 0
 --refresh mat view should now fail due to lack of permissions
 CALL refresh_continuous_aggregate('mat_perm_view_test', NULL, NULL);
 ERROR:  permission denied for table conditions_for_perm_check_w_grant
+\set ON_ERROR_STOP 1
 --but the old data will still be there
 SELECT * FROM mat_perm_view_test;
  location | max 
diff --git a/tsl/test/expected/cagg_permissions-17.out b/tsl/test/expected/cagg_permissions-17.out
index 82b74755ed9..ec4f177f699 100644
--- a/tsl/test/expected/cagg_permissions-17.out
+++ b/tsl/test/expected/cagg_permissions-17.out
@@ -64,6 +64,7 @@ CREATE USER not_priv;
 \set ON_ERROR_STOP 0
 CREATE INDEX cagg_idx on mat_refresh_test(humidity);
 ERROR:  must be owner of hypertable "_materialized_hypertable_2"
+\set ON_ERROR_STOP 1
 \c :TEST_DBNAME :ROLE_SUPERUSER
 DROP USER not_priv;
 \c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER
@@ -193,6 +194,7 @@ group by time_bucket(100, timec), location WITH NO DATA;
 CALL refresh_continuous_aggregate('mat_perm_view_test', NULL, NULL);
 ERROR:  permission denied for function get_constant
 DROP MATERIALIZED VIEW mat_perm_view_test;
+\set ON_ERROR_STOP 1
 --can create a mat view on something with select and trigger grants
 CREATE MATERIALIZED VIEW mat_perm_view_test
 WITH ( timescaledb.continuous, timescaledb.materialized_only=true)
@@ -213,9 +215,11 @@ REVOKE SELECT ON conditions_for_perm_check_w_grant FROM public;
 insert into conditions_for_perm_check_w_grant
 select generate_series(100, 130, 10), 'POR', 65, 85, 30, 90, NULL;
 \c  :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2
+\set ON_ERROR_STOP 0
 --refresh mat view should now fail due to lack of permissions
 CALL refresh_continuous_aggregate('mat_perm_view_test', NULL, NULL);
 ERROR:  permission denied for table conditions_for_perm_check_w_grant
+\set ON_ERROR_STOP 1
 --but the old data will still be there
 SELECT * FROM mat_perm_view_test;
  location | max 
diff --git a/tsl/test/sql/cagg_permissions.sql.in b/tsl/test/sql/cagg_permissions.sql.in
index f0a3d140ebe..dd1d3c54a72 100644
--- a/tsl/test/sql/cagg_permissions.sql.in
+++ b/tsl/test/sql/cagg_permissions.sql.in
@@ -62,6 +62,7 @@ CREATE USER not_priv;
 -- A user with no ownership on the Cagg cannot create index on it. -- This should fail
 \set ON_ERROR_STOP 0
 CREATE INDEX cagg_idx on mat_refresh_test(humidity);
+\set ON_ERROR_STOP 1
 \c :TEST_DBNAME :ROLE_SUPERUSER
 DROP USER not_priv;
 \c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER
@@ -182,6 +183,7 @@ group by time_bucket(100, timec), location WITH NO DATA;
 --this should fail
 CALL refresh_continuous_aggregate('mat_perm_view_test', NULL, NULL);
 DROP MATERIALIZED VIEW mat_perm_view_test;
+\set ON_ERROR_STOP 1
 
 --can create a mat view on something with select and trigger grants
 CREATE MATERIALIZED VIEW mat_perm_view_test
@@ -202,8 +204,10 @@ insert into conditions_for_perm_check_w_grant
 select generate_series(100, 130, 10), 'POR', 65, 85, 30, 90, NULL;
 
 \c  :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2
+\set ON_ERROR_STOP 0
 --refresh mat view should now fail due to lack of permissions
 CALL refresh_continuous_aggregate('mat_perm_view_test', NULL, NULL);
+\set ON_ERROR_STOP 1
 
 --but the old data will still be there
 SELECT * FROM mat_perm_view_test;

From 2a0e65dcf9565739645b1528ec2e652b80d5417e Mon Sep 17 00:00:00 2001
From: Mats Kindahl <mats@timescale.com>
Date: Thu, 21 Nov 2024 11:55:29 +0100
Subject: [PATCH 4/8] Add aliases for compression functions and views

This adds a set of aliases for existing compression functions and view
to better reflect the column store capabilities of compressed
hypertables.

The procedures `convert_to_rowstore` and `convert_to_columnstore` are
added as aliases to `decompress_chunk` and `compress_chunk`
respectively. We change these from functions to procedures to be able
to split the conversion into multiple transactions and avoid heavy
locking for longer periods.

The procedures `add_columnstore_policy` and `remove_columnstore_policy`
are added as alias for `add_compression_policy` and
`remove_compression_policy` respectively.

The functions `hypertable_columnstore_stats` and
`chunk_columnstore_stats` are added as aliases for
`hypertable_compression_stats` and `chunk_compression_stats`
respectively.

The views `hypertable_columnstore_settings`,
`chunk_columnstore_settings`, and `columnstore_settings` are added as
aliases for the corresponding views.

We also add aliases for parameters to functions and procedures that
take these.

- The parameter `timescaledb.enable_columnstore` is an alias for
  `timescaledb.compress`

- The parameter `timescaledb.segmentby` is an alias for
  `timescaledb.compress_segmentby`.

- The parameter `timescaledb.orderby` is an alias for
  `timescaledb.compress_orderby`.
---
 .unreleased/pr_7443                       |  1 +
 sql/maintenance_utils.sql                 | 13 ++++
 sql/policy_api.sql                        | 16 +++++
 sql/size_utils.sql                        | 37 ++++++++++
 sql/updates/latest-dev.sql                | 67 +++++++++++++++++
 sql/updates/reverse-dev.sql               | 13 ++++
 sql/views.sql                             |  7 ++
 src/compression_with_clause.c             |  8 +--
 src/process_utility.c                     |  6 +-
 src/ts_catalog/continuous_agg.c           | 20 +++---
 src/with_clause_parser.c                  | 29 ++++----
 src/with_clause_parser.h                  |  4 +-
 test/expected/pg_dump.out                 |  4 +-
 test/src/test_with_clause_parser.c        | 14 ++--
 tsl/src/continuous_aggs/options.c         |  2 +-
 tsl/test/expected/columnstore_aliases.out | 87 +++++++++++++++++++++++
 tsl/test/shared/expected/extension.out    |  6 ++
 tsl/test/sql/CMakeLists.txt               | 13 ++--
 tsl/test/sql/columnstore_aliases.sql      | 69 ++++++++++++++++++
 19 files changed, 370 insertions(+), 46 deletions(-)
 create mode 100644 .unreleased/pr_7443
 create mode 100644 tsl/test/expected/columnstore_aliases.out
 create mode 100644 tsl/test/sql/columnstore_aliases.sql

diff --git a/.unreleased/pr_7443 b/.unreleased/pr_7443
new file mode 100644
index 00000000000..7196037db05
--- /dev/null
+++ b/.unreleased/pr_7443
@@ -0,0 +1 @@
+Implements: #7443 Add Hypercore function and view aliases
diff --git a/sql/maintenance_utils.sql b/sql/maintenance_utils.sql
index 430f62dd81c..339ea07beeb 100644
--- a/sql/maintenance_utils.sql
+++ b/sql/maintenance_utils.sql
@@ -39,11 +39,24 @@ CREATE OR REPLACE FUNCTION @extschema@.compress_chunk(
     hypercore_use_access_method BOOL = NULL
 ) RETURNS REGCLASS AS '@MODULE_PATHNAME@', 'ts_compress_chunk' LANGUAGE C VOLATILE;
 
+-- Alias for compress_chunk above.
+CREATE OR REPLACE PROCEDURE @extschema@.convert_to_columnstore(
+    chunk REGCLASS,
+    if_not_columnstore BOOLEAN = true,
+    recompress BOOLEAN = false,
+    hypercore_use_access_method BOOL = NULL
+) AS '@MODULE_PATHNAME@', 'ts_compress_chunk' LANGUAGE C;
+
 CREATE OR REPLACE FUNCTION @extschema@.decompress_chunk(
     uncompressed_chunk REGCLASS,
     if_compressed BOOLEAN = true
 ) RETURNS REGCLASS AS '@MODULE_PATHNAME@', 'ts_decompress_chunk' LANGUAGE C STRICT VOLATILE;
 
+CREATE OR REPLACE PROCEDURE @extschema@.convert_to_rowstore(
+    chunk REGCLASS,
+    if_columnstore BOOLEAN = true
+) AS '@MODULE_PATHNAME@', 'ts_decompress_chunk' LANGUAGE C;
+
 CREATE OR REPLACE FUNCTION _timescaledb_functions.recompress_chunk_segmentwise(
     uncompressed_chunk REGCLASS,
     if_compressed BOOLEAN = true
diff --git a/sql/policy_api.sql b/sql/policy_api.sql
index ac352e8943c..83a8a39049f 100644
--- a/sql/policy_api.sql
+++ b/sql/policy_api.sql
@@ -59,10 +59,26 @@ RETURNS INTEGER
 AS '@MODULE_PATHNAME@', 'ts_policy_compression_add'
 LANGUAGE C VOLATILE; -- not strict because we need to set different default values for schedule_interval
 
+CREATE OR REPLACE PROCEDURE @extschema@.add_columnstore_policy(
+    hypertable REGCLASS,
+    after "any" = NULL,
+    if_not_exists BOOL = false,
+    schedule_interval INTERVAL = NULL,
+    initial_start TIMESTAMPTZ = NULL,
+    timezone TEXT = NULL,
+    created_before INTERVAL = NULL,
+    hypercore_use_access_method BOOL = NULL
+) LANGUAGE C AS '@MODULE_PATHNAME@', 'ts_policy_compression_add';
+
 CREATE OR REPLACE FUNCTION @extschema@.remove_compression_policy(hypertable REGCLASS, if_exists BOOL = false) RETURNS BOOL
 AS '@MODULE_PATHNAME@', 'ts_policy_compression_remove'
 LANGUAGE C VOLATILE STRICT;
 
+CREATE OR REPLACE PROCEDURE @extschema@.remove_columnstore_policy(
+       hypertable REGCLASS,
+       if_exists BOOL = false
+) LANGUAGE C AS '@MODULE_PATHNAME@', 'ts_policy_compression_remove';
+
 /* continuous aggregates policy */
 CREATE OR REPLACE FUNCTION @extschema@.add_continuous_aggregate_policy(
     continuous_aggregate REGCLASS, start_offset "any",
diff --git a/sql/size_utils.sql b/sql/size_utils.sql
index 27bd7da9438..8dbfb49957c 100644
--- a/sql/size_utils.sql
+++ b/sql/size_utils.sql
@@ -544,6 +544,25 @@ BEGIN
 END;
 $BODY$ SET search_path TO pg_catalog, pg_temp;
 
+CREATE OR REPLACE FUNCTION @extschema@.chunk_columnstore_stats (hypertable REGCLASS)
+    RETURNS TABLE (
+        chunk_schema name,
+        chunk_name name,
+        compression_status text,
+        before_compression_table_bytes bigint,
+        before_compression_index_bytes bigint,
+        before_compression_toast_bytes bigint,
+        before_compression_total_bytes bigint,
+        after_compression_table_bytes bigint,
+        after_compression_index_bytes bigint,
+        after_compression_toast_bytes bigint,
+        after_compression_total_bytes bigint,
+        node_name name)
+    LANGUAGE SQL
+    STABLE STRICT
+    AS 'SELECT * FROM @extschema@.chunk_compression_stats($1)'
+    SET search_path TO pg_catalog, pg_temp;
+
 -- Get compression statistics for a hypertable that has
 -- compression enabled
 CREATE OR REPLACE FUNCTION @extschema@.hypertable_compression_stats (hypertable REGCLASS)
@@ -581,6 +600,24 @@ $BODY$
         ch.node_name;
 $BODY$ SET search_path TO pg_catalog, pg_temp;
 
+CREATE OR REPLACE FUNCTION @extschema@.hypertable_columnstore_stats (hypertable REGCLASS)
+    RETURNS TABLE (
+        total_chunks bigint,
+        number_compressed_chunks bigint,
+        before_compression_table_bytes bigint,
+        before_compression_index_bytes bigint,
+        before_compression_toast_bytes bigint,
+        before_compression_total_bytes bigint,
+        after_compression_table_bytes bigint,
+        after_compression_index_bytes bigint,
+        after_compression_toast_bytes bigint,
+        after_compression_total_bytes bigint,
+        node_name name)
+    LANGUAGE SQL
+    STABLE STRICT
+    AS 'SELECT * FROM @extschema@.hypertable_compression_stats($1)'
+    SET search_path TO pg_catalog, pg_temp;
+
 -------------Get index size for hypertables -------
 --schema_name      - schema_name for hypertable index
 -- index_name      - index on hyper table
diff --git a/sql/updates/latest-dev.sql b/sql/updates/latest-dev.sql
index 66ad63058fb..d233b0ae5f8 100644
--- a/sql/updates/latest-dev.sql
+++ b/sql/updates/latest-dev.sql
@@ -47,3 +47,70 @@ LANGUAGE C VOLATILE;
 DROP PROCEDURE IF EXISTS _timescaledb_functions.policy_compression_execute(job_id INTEGER, htid INTEGER, lag ANYELEMENT, maxchunks INTEGER, verbose_log BOOLEAN, recompress_enabled  BOOLEAN, use_creation_time BOOLEAN);
 
 DROP PROCEDURE IF EXISTS _timescaledb_functions.policy_compression(job_id INTEGER, config JSONB);
+
+CREATE PROCEDURE @extschema@.convert_to_columnstore(
+    chunk REGCLASS,
+    if_not_columnstore BOOLEAN = true,
+    recompress BOOLEAN = false,
+    hypercore_use_access_method BOOL = NULL)
+AS '@MODULE_PATHNAME@', 'ts_update_placeholder'
+LANGUAGE C;
+
+CREATE PROCEDURE @extschema@.convert_to_rowstore(
+    chunk REGCLASS,
+    if_columnstore BOOLEAN = true)
+AS '@MODULE_PATHNAME@', 'ts_update_placeholder'
+LANGUAGE C;
+
+CREATE PROCEDURE @extschema@.add_columnstore_policy(
+    hypertable REGCLASS,
+    after "any" = NULL,
+    if_not_exists BOOL = false,
+    schedule_interval INTERVAL = NULL,
+    initial_start TIMESTAMPTZ = NULL,
+    timezone TEXT = NULL,
+    created_before INTERVAL = NULL,
+    hypercore_use_access_method BOOL = NULL
+) LANGUAGE C AS '@MODULE_PATHNAME@', 'ts_update_placeholder';
+
+CREATE PROCEDURE @extschema@.remove_columnstore_policy(
+       hypertable REGCLASS,
+       if_exists BOOL = false
+) LANGUAGE C AS '@MODULE_PATHNAME@', 'ts_update_placeholder';
+
+CREATE FUNCTION @extschema@.chunk_columnstore_stats (hypertable REGCLASS)
+    RETURNS TABLE (
+        chunk_schema name,
+        chunk_name name,
+        compression_status text,
+        before_compression_table_bytes bigint,
+        before_compression_index_bytes bigint,
+        before_compression_toast_bytes bigint,
+        before_compression_total_bytes bigint,
+        after_compression_table_bytes bigint,
+        after_compression_index_bytes bigint,
+        after_compression_toast_bytes bigint,
+        after_compression_total_bytes bigint,
+        node_name name)
+    LANGUAGE SQL
+    STABLE STRICT
+    AS 'SELECT * FROM @extschema@.chunk_compression_stats($1)'
+    SET search_path TO pg_catalog, pg_temp;
+
+CREATE FUNCTION @extschema@.hypertable_columnstore_stats (hypertable REGCLASS)
+    RETURNS TABLE (
+        total_chunks bigint,
+        number_compressed_chunks bigint,
+        before_compression_table_bytes bigint,
+        before_compression_index_bytes bigint,
+        before_compression_toast_bytes bigint,
+        before_compression_total_bytes bigint,
+        after_compression_table_bytes bigint,
+        after_compression_index_bytes bigint,
+        after_compression_toast_bytes bigint,
+        after_compression_total_bytes bigint,
+        node_name name)
+    LANGUAGE SQL
+    STABLE STRICT
+    AS 'SELECT * FROM @extschema@.hypertable_compression_stats($1)'
+    SET search_path TO pg_catalog, pg_temp;
diff --git a/sql/updates/reverse-dev.sql b/sql/updates/reverse-dev.sql
index e6817621d57..6b75bc1c851 100644
--- a/sql/updates/reverse-dev.sql
+++ b/sql/updates/reverse-dev.sql
@@ -44,3 +44,16 @@ LANGUAGE C VOLATILE;
 DROP PROCEDURE IF EXISTS _timescaledb_functions.policy_compression_execute(job_id INTEGER, htid INTEGER, lag ANYELEMENT, maxchunks INTEGER, verbose_log BOOLEAN, recompress_enabled  BOOLEAN, use_creation_time BOOLEAN, useam BOOLEAN);
 
 DROP PROCEDURE IF EXISTS _timescaledb_functions.policy_compression(job_id INTEGER, config JSONB);
+DROP PROCEDURE IF EXISTS @extschema@.convert_to_columnstore(REGCLASS, BOOLEAN, BOOLEAN, BOOLEAN);
+DROP PROCEDURE IF EXISTS @extschema@.convert_to_rowstore(REGCLASS, BOOLEAN);
+DROP PROCEDURE IF EXISTS @extschema@.add_columnstore_policy(REGCLASS, "any", BOOL, INTERVAL, TIMESTAMPTZ, TEXT, INTERVAL, BOOL);
+DROP PROCEDURE IF EXISTS @extschema@.remove_columnstore_policy(REGCLASS, BOOL);
+DROP FUNCTION IF EXISTS @extschema@.hypertable_columnstore_stats(REGCLASS);
+DROP FUNCTION IF EXISTS @extschema@.chunk_columnstore_stats(REGCLASS);
+
+ALTER EXTENSION timescaledb DROP VIEW timescaledb_information.hypertable_columnstore_settings;
+ALTER EXTENSION timescaledb DROP VIEW timescaledb_information.chunk_columnstore_settings;
+
+DROP VIEW timescaledb_information.hypertable_columnstore_settings;
+DROP VIEW timescaledb_information.chunk_columnstore_settings;
+
diff --git a/sql/views.sql b/sql/views.sql
index d771e624ead..b483e35c2ae 100644
--- a/sql/views.sql
+++ b/sql/views.sql
@@ -403,5 +403,12 @@ CREATE OR REPLACE VIEW timescaledb_information.chunk_compression_settings AS
 		FROM unnest(s.orderby, s.orderby_desc, s.orderby_nullsfirst) un(orderby, "desc", nullsfirst)
 	) un ON true;
 
+
+CREATE OR REPLACE VIEW timescaledb_information.hypertable_columnstore_settings
+AS SELECT * FROM timescaledb_information.hypertable_compression_settings;
+
+CREATE OR REPLACE VIEW timescaledb_information.chunk_columnstore_settings AS
+SELECT * FROM timescaledb_information.chunk_compression_settings;
+
 GRANT SELECT ON ALL TABLES IN SCHEMA timescaledb_information TO PUBLIC;
 
diff --git a/src/compression_with_clause.c b/src/compression_with_clause.c
index 4e92a5a1d1f..37ae1e97fed 100644
--- a/src/compression_with_clause.c
+++ b/src/compression_with_clause.c
@@ -25,20 +25,20 @@
 
 static const WithClauseDefinition compress_hypertable_with_clause_def[] = {
 		[CompressEnabled] = {
-			.arg_name = "compress",
+			.arg_names = {"compress", "enable_columnstore", NULL},
 			.type_id = BOOLOID,
 			.default_val = (Datum)false,
 		},
 		[CompressSegmentBy] = {
-			 .arg_name = "compress_segmentby",
+			.arg_names = {"compress_segmentby", "segmentby", NULL},
 			 .type_id = TEXTOID,
 		},
 		[CompressOrderBy] = {
-			 .arg_name = "compress_orderby",
+			.arg_names = {"compress_orderby", "orderby", NULL},
 			 .type_id = TEXTOID,
 		},
 		[CompressChunkTimeInterval] = {
-			 .arg_name = "compress_chunk_time_interval",
+			.arg_names = {"compress_chunk_time_interval", NULL},
 			 .type_id = INTERVALOID,
 		},
 };
diff --git a/src/process_utility.c b/src/process_utility.c
index 039a134ee90..54701b5e2c6 100644
--- a/src/process_utility.c
+++ b/src/process_utility.c
@@ -2960,10 +2960,10 @@ typedef enum HypertableIndexFlags
 } HypertableIndexFlags;
 
 static const WithClauseDefinition index_with_clauses[] = {
-	[HypertableIndexFlagMultiTransaction] = {.arg_name = "transaction_per_chunk", .type_id = BOOLOID,},
+	[HypertableIndexFlagMultiTransaction] = {.arg_names = {"transaction_per_chunk", NULL}, .type_id = BOOLOID,},
 #ifdef DEBUG
-	[HypertableIndexFlagBarrierTable] = {.arg_name = "barrier_table", .type_id = REGCLASSOID,},
-	[HypertableIndexFlagMaxChunks] = {.arg_name = "max_chunks", .type_id = INT4OID, .default_val = (Datum)-1},
+	[HypertableIndexFlagBarrierTable] = {.arg_names = {"barrier_table", NULL}, .type_id = REGCLASSOID,},
+	[HypertableIndexFlagMaxChunks] = {.arg_names = {"max_chunks", NULL}, .type_id = INT4OID, .default_val = (Datum)-1},
 #endif
 };
 
diff --git a/src/ts_catalog/continuous_agg.c b/src/ts_catalog/continuous_agg.c
index f9612dc0070..a786b4bf44a 100644
--- a/src/ts_catalog/continuous_agg.c
+++ b/src/ts_catalog/continuous_agg.c
@@ -48,39 +48,39 @@
 
 static const WithClauseDefinition continuous_aggregate_with_clause_def[] = {
 		[ContinuousEnabled] = {
-			.arg_name = "continuous",
+			.arg_names = {"continuous", NULL},
 			.type_id = BOOLOID,
 			.default_val = (Datum)false,
 		},
 		[ContinuousViewOptionCreateGroupIndex] = {
-			.arg_name = "create_group_indexes",
+			.arg_names = {"create_group_indexes", NULL},
 			.type_id = BOOLOID,
 			.default_val = (Datum)true,
 		},
 		[ContinuousViewOptionMaterializedOnly] = {
-			.arg_name = "materialized_only",
+			.arg_names = {"materialized_only", NULL},
 			.type_id = BOOLOID,
 			.default_val = (Datum)true,
 		},
 		[ContinuousViewOptionCompress] = {
-			.arg_name = "compress",
+			.arg_names = {"enable_columnstore", "compress", NULL},
 			.type_id = BOOLOID,
 		},
 		[ContinuousViewOptionFinalized] = {
-			.arg_name = "finalized",
+			.arg_names = {"finalized", NULL},
 			.type_id = BOOLOID,
 			.default_val = (Datum)true,
 		},
 		[ContinuousViewOptionCompressSegmentBy] = {
-			 .arg_name = "compress_segmentby",
-			 .type_id = TEXTOID,
+			.arg_names = {"segmentby", "compress_segmentby", NULL},
+			.type_id = TEXTOID,
 		},
 		[ContinuousViewOptionCompressOrderBy] = {
-			 .arg_name = "compress_orderby",
+			.arg_names = {"orderby", "compress_orderby", NULL},
 			 .type_id = TEXTOID,
 		},
 		[ContinuousViewOptionCompressChunkTimeInterval] = {
-			 .arg_name = "compress_chunk_time_interval",
+			.arg_names = {"compress_chunk_time_interval", NULL},
 			 .type_id = INTERVALOID,
 		},
 };
@@ -127,7 +127,7 @@ ts_continuous_agg_get_compression_defelems(const WithClauseResult *with_clauses)
 		{
 			Node *value = (Node *) makeString(ts_with_clause_result_deparse_value(input));
 			DefElem *elem = makeDefElemExtended(EXTENSION_NAMESPACE,
-												(char *) def.arg_name,
+												(char *) def.arg_names[0],
 												value,
 												DEFELEM_UNSPEC,
 												-1);
diff --git a/src/with_clause_parser.c b/src/with_clause_parser.c
index 715c65a386c..790cb2dd718 100644
--- a/src/with_clause_parser.c
+++ b/src/with_clause_parser.c
@@ -86,20 +86,23 @@ ts_with_clauses_parse(const List *def_elems, const WithClauseDefinition *args, S
 
 		for (i = 0; i < nargs; i++)
 		{
-			if (pg_strcasecmp(def->defname, args[i].arg_name) == 0)
+			for (int j = 0; args[i].arg_names[j] != NULL; ++j)
 			{
-				argument_recognized = true;
-
-				if (!results[i].is_default)
-					ereport(ERROR,
-							(errcode(ERRCODE_AMBIGUOUS_PARAMETER),
-							 errmsg("duplicate parameter \"%s.%s\"",
-									def->defnamespace,
-									def->defname)));
-
-				results[i].parsed = parse_arg(args[i], def);
-				results[i].is_default = false;
-				break;
+				if (pg_strcasecmp(def->defname, args[i].arg_names[j]) == 0)
+				{
+					argument_recognized = true;
+
+					if (!results[i].is_default)
+						ereport(ERROR,
+								(errcode(ERRCODE_AMBIGUOUS_PARAMETER),
+								 errmsg("duplicate parameter \"%s.%s\"",
+										def->defnamespace,
+										def->defname)));
+
+					results[i].parsed = parse_arg(args[i], def);
+					results[i].is_default = false;
+					break;
+				}
 			}
 		}
 
diff --git a/src/with_clause_parser.h b/src/with_clause_parser.h
index 52e8fe2cdfb..b75bded9487 100644
--- a/src/with_clause_parser.h
+++ b/src/with_clause_parser.h
@@ -14,7 +14,9 @@
 
 typedef struct WithClauseDefinition
 {
-	const char *arg_name;
+	/* Alternative names for the parameters. The first one is the "main" one
+	 * when it comes to printouts.*/
+	const char *arg_names[5];
 	Oid type_id;
 	Datum default_val;
 } WithClauseDefinition;
diff --git a/test/expected/pg_dump.out b/test/expected/pg_dump.out
index 2245563d970..0f477faa982 100644
--- a/test/expected/pg_dump.out
+++ b/test/expected/pg_dump.out
@@ -571,18 +571,20 @@ WHERE   refclassid = 'pg_catalog.pg_extension'::pg_catalog.regclass AND
  _timescaledb_internal.compressed_chunk_stats
  _timescaledb_internal.hypertable_chunk_local_size
  timescaledb_experimental.policies
+ timescaledb_information.chunk_columnstore_settings
  timescaledb_information.chunk_compression_settings
  timescaledb_information.chunks
  timescaledb_information.compression_settings
  timescaledb_information.continuous_aggregates
  timescaledb_information.dimensions
+ timescaledb_information.hypertable_columnstore_settings
  timescaledb_information.hypertable_compression_settings
  timescaledb_information.hypertables
  timescaledb_information.job_errors
  timescaledb_information.job_history
  timescaledb_information.job_stats
  timescaledb_information.jobs
-(24 rows)
+(26 rows)
 
 -- Make sure we can't run our restoring functions as a normal perm user as that would disable functionality for the whole db
 \c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER
diff --git a/test/src/test_with_clause_parser.c b/test/src/test_with_clause_parser.c
index e4a637b87f2..7e5d8bd5a71 100644
--- a/test/src/test_with_clause_parser.c
+++ b/test/src/test_with_clause_parser.c
@@ -180,15 +180,15 @@ typedef enum TestArgs
 } TestArgs;
 
 static WithClauseDefinition test_args[] = {
-	[TestArgUnimpl] = { .arg_name = "unimplemented",
+	[TestArgUnimpl] = { .arg_names = {"unimplemented", NULL},
 						.type_id = InvalidOid, },
-	[TestArgBool] = { .arg_name = "bool", .type_id = BOOLOID, },
-	[TestArgInt32] = { .arg_name = "int32", .type_id = INT4OID, },
-	[TestArgDefault] = { .arg_name = "default",
+	[TestArgBool] = { .arg_names = {"bool", NULL}, .type_id = BOOLOID, },
+	[TestArgInt32] = { .arg_names = {"int32", NULL}, .type_id = INT4OID, },
+	[TestArgDefault] = { .arg_names = {"default", NULL},
 						 .type_id = INT4OID,
 						 .default_val = (Datum)-100 },
-	[TestArgName] = { .arg_name = "name", .type_id = NAMEOID, },
-	[TestArgRegclass] = { .arg_name = "regclass",
+	[TestArgName] = { .arg_names = {"name", NULL}, .type_id = NAMEOID, },
+	[TestArgRegclass] = { .arg_names = {"regclass", NULL},
 						  .type_id = REGCLASSOID },
 };
 
@@ -247,7 +247,7 @@ TS_TEST_FN(ts_test_with_clause_parse)
 	nulls = palloc(sizeof(*nulls) * funcctx->tuple_desc->natts);
 	memset(nulls, true, sizeof(*nulls) * funcctx->tuple_desc->natts);
 
-	values[0] = CStringGetTextDatum(test_args[result->i].arg_name);
+	values[0] = CStringGetTextDatum(test_args[result->i].arg_names[0]);
 	nulls[0] = false;
 	if (!result->parsed[result->i].is_default || result->i == TestArgDefault)
 	{
diff --git a/tsl/src/continuous_aggs/options.c b/tsl/src/continuous_aggs/options.c
index cefa73b0e45..ab333f79b33 100644
--- a/tsl/src/continuous_aggs/options.c
+++ b/tsl/src/continuous_aggs/options.c
@@ -142,7 +142,7 @@ cagg_alter_compression(ContinuousAgg *agg, Hypertable *mat_ht, List *compress_de
 				with_clause_options[i] = default_with_clause_options[i];
 				elog(NOTICE,
 					 "defaulting %s to %s",
-					 with_clause_options[i].definition->arg_name,
+					 with_clause_options[i].definition->arg_names[0],
 					 ts_with_clause_result_deparse_value(&with_clause_options[i]));
 			}
 		}
diff --git a/tsl/test/expected/columnstore_aliases.out b/tsl/test/expected/columnstore_aliases.out
new file mode 100644
index 00000000000..bdd25c31a6f
--- /dev/null
+++ b/tsl/test/expected/columnstore_aliases.out
@@ -0,0 +1,87 @@
+-- This file and its contents are licensed under the Timescale License.
+-- Please see the included NOTICE for copyright information and
+-- LICENSE-TIMESCALE for a copy of the license.
+CREATE PROCEDURE
+       convert_hypertable_to_columnstore(regclass)
+LANGUAGE plpgsql AS $$
+DECLARE
+  chunk REGCLASS;
+BEGIN
+   FOR chunk IN SELECT show_chunks($1)
+   LOOP
+      CALL convert_to_columnstore(chunk);
+   END LOOP;
+END
+$$;
+CREATE PROCEDURE
+       convert_hypertable_to_rowstore(regclass)
+LANGUAGE plpgsql AS $$
+DECLARE
+  chunk REGCLASS;
+BEGIN
+   FOR chunk IN SELECT show_chunks($1)
+   LOOP
+      CALL convert_to_rowstore(chunk);
+   END LOOP;
+END
+$$;
+-- These are mostly taken from compression_ddl.sql and are only
+-- intended to check that aliases work. In that sense, the actual
+-- result of each query is not particularly important.
+CREATE TABLE test1 (ts timestamptz, i integer, b bigint, t text);
+SELECT * FROM create_hypertable('test1', 'ts');
+NOTICE:  adding not-null constraint to column "ts"
+ hypertable_id | schema_name | table_name | created 
+---------------+-------------+------------+---------
+             1 | public      | test1      | t
+(1 row)
+
+INSERT INTO test1 SELECT t,  random(), random(), random()::text
+FROM generate_series('2018-03-02 1:00'::TIMESTAMPTZ, '2018-03-28 1:00', '1 hour') t;
+ALTER TABLE test1 set (
+      timescaledb.enable_columnstore,
+      timescaledb.segmentby = 'b',
+      timescaledb.orderby = 'ts desc'
+);
+CALL convert_hypertable_to_columnstore('test1');
+CALL convert_hypertable_to_rowstore('test1');
+CALL convert_hypertable_to_columnstore('test1');
+-- Pick one chunk to play with and test option names. We mostly use
+-- default since we are only interested in that the option names are
+-- accepted.
+SELECT chunk FROM show_chunks('test1') tbl(chunk) LIMIT 1 \gset
+CALL convert_to_rowstore(:'chunk', if_columnstore => true);
+CALL convert_to_columnstore(:'chunk',
+     if_not_columnstore => true,
+     recompress => false,
+     hypercore_use_access_method => false);
+CALL add_columnstore_policy('test1', interval '1 day');
+CALL remove_columnstore_policy('test1');
+SELECT * FROM timescaledb_information.hypertable_columnstore_settings;
+ hypertable | segmentby | orderby | compress_interval_length 
+------------+-----------+---------+--------------------------
+ test1      | b         | ts DESC | 
+(1 row)
+
+SELECT * FROM timescaledb_information.chunk_columnstore_settings ORDER BY chunk;
+ hypertable |                 chunk                  | segmentby | orderby 
+------------+----------------------------------------+-----------+---------
+ test1      | _timescaledb_internal._hyper_1_1_chunk | b         | ts DESC
+ test1      | _timescaledb_internal._hyper_1_2_chunk | b         | ts DESC
+ test1      | _timescaledb_internal._hyper_1_3_chunk | b         | ts DESC
+ test1      | _timescaledb_internal._hyper_1_4_chunk | b         | ts DESC
+(4 rows)
+
+VACUUM FULL test1;
+-- We only care about the column names for the result. They should be
+-- the same as for the original function.
+SELECT * FROM chunk_columnstore_stats('test1') where 1 = 2 order by chunk_name;
+ chunk_schema | chunk_name | compression_status | before_compression_table_bytes | before_compression_index_bytes | before_compression_toast_bytes | before_compression_total_bytes | after_compression_table_bytes | after_compression_index_bytes | after_compression_toast_bytes | after_compression_total_bytes | node_name 
+--------------+------------+--------------------+--------------------------------+--------------------------------+--------------------------------+--------------------------------+-------------------------------+-------------------------------+-------------------------------+-------------------------------+-----------
+(0 rows)
+
+SELECT * FROM hypertable_columnstore_stats('test1') where 1 = 2;
+ total_chunks | number_compressed_chunks | before_compression_table_bytes | before_compression_index_bytes | before_compression_toast_bytes | before_compression_total_bytes | after_compression_table_bytes | after_compression_index_bytes | after_compression_toast_bytes | after_compression_total_bytes | node_name 
+--------------+--------------------------+--------------------------------+--------------------------------+--------------------------------+--------------------------------+-------------------------------+-------------------------------+-------------------------------+-------------------------------+-----------
+(0 rows)
+
diff --git a/tsl/test/shared/expected/extension.out b/tsl/test/shared/expected/extension.out
index 0e70e20d99a..81c34114f72 100644
--- a/tsl/test/shared/expected/extension.out
+++ b/tsl/test/shared/expected/extension.out
@@ -210,6 +210,7 @@ ORDER BY pronamespace::regnamespace::text COLLATE "C", p.oid::regprocedure::text
  ts_hypercore_handler(internal)
  ts_hypercore_proxy_handler(internal)
  ts_now_mock()
+ add_columnstore_policy(regclass,"any",boolean,interval,timestamp with time zone,text,interval,boolean)
  add_compression_policy(regclass,"any",boolean,interval,timestamp with time zone,text,interval,boolean)
  add_continuous_aggregate_policy(regclass,"any","any",interval,boolean,timestamp with time zone,text)
  add_dimension(regclass,_timescaledb_internal.dimension_info,boolean)
@@ -223,9 +224,12 @@ ORDER BY pronamespace::regnamespace::text COLLATE "C", p.oid::regprocedure::text
  by_hash(name,integer,regproc)
  by_range(name,anyelement,regproc)
  cagg_migrate(regclass,boolean,boolean)
+ chunk_columnstore_stats(regclass)
  chunk_compression_stats(regclass)
  chunks_detailed_size(regclass)
  compress_chunk(regclass,boolean,boolean,boolean)
+ convert_to_columnstore(regclass,boolean,boolean,boolean)
+ convert_to_rowstore(regclass,boolean)
  create_hypertable(regclass,_timescaledb_internal.dimension_info,boolean,boolean,boolean)
  create_hypertable(regclass,name,name,integer,name,name,anyelement,boolean,boolean,regproc,boolean,text,regproc,regproc)
  decompress_chunk(regclass,boolean)
@@ -239,6 +243,7 @@ ORDER BY pronamespace::regnamespace::text COLLATE "C", p.oid::regprocedure::text
  histogram(double precision,double precision,double precision,integer)
  hypertable_approximate_detailed_size(regclass)
  hypertable_approximate_size(regclass)
+ hypertable_columnstore_stats(regclass)
  hypertable_compression_stats(regclass)
  hypertable_detailed_size(regclass)
  hypertable_index_size(regclass)
@@ -253,6 +258,7 @@ ORDER BY pronamespace::regnamespace::text COLLATE "C", p.oid::regprocedure::text
  move_chunk(regclass,name,name,regclass,boolean)
  recompress_chunk(regclass,boolean)
  refresh_continuous_aggregate(regclass,"any","any")
+ remove_columnstore_policy(regclass,boolean)
  remove_compression_policy(regclass,boolean)
  remove_continuous_aggregate_policy(regclass,boolean,boolean)
  remove_reorder_policy(regclass,boolean)
diff --git a/tsl/test/sql/CMakeLists.txt b/tsl/test/sql/CMakeLists.txt
index bae570319dc..35ab0747ed0 100644
--- a/tsl/test/sql/CMakeLists.txt
+++ b/tsl/test/sql/CMakeLists.txt
@@ -4,9 +4,9 @@ include(GenerateTestSchedule)
 # so unless you have a good reason, add new test files here.
 set(TEST_FILES
     agg_partials_pushdown.sql
-    bgw_security.sql
-    bgw_policy.sql
     bgw_job_ddl.sql
+    bgw_policy.sql
+    bgw_security.sql
     cagg_deprecated_bucket_ng.sql
     cagg_errors.sql
     cagg_invalidation.sql
@@ -14,24 +14,25 @@ set(TEST_FILES
     cagg_refresh.sql
     cagg_utils.sql
     cagg_watermark.sql
+    columnstore_aliases.sql
     compress_auto_sparse_index.sql
     compress_default.sql
     compress_dml_copy.sql
     compress_float8_corrupt.sql
-    compressed_detoaster.sql
     compressed_collation.sql
+    compressed_detoaster.sql
     compression.sql
-    compression_create_compressed_table.sql
     compression_conflicts.sql
+    compression_create_compressed_table.sql
     compression_defaults.sql
     compression_fks.sql
     compression_insert.sql
     compression_policy.sql
     compression_qualpushdown.sql
-    compression_settings.sql
     compression_sequence_num_removal.sql
-    compression_sorted_merge_distinct.sql
+    compression_settings.sql
     compression_sorted_merge_columns.sql
+    compression_sorted_merge_distinct.sql
     decompress_index.sql
     foreign_keys.sql
     move.sql
diff --git a/tsl/test/sql/columnstore_aliases.sql b/tsl/test/sql/columnstore_aliases.sql
new file mode 100644
index 00000000000..adebabf5888
--- /dev/null
+++ b/tsl/test/sql/columnstore_aliases.sql
@@ -0,0 +1,69 @@
+-- This file and its contents are licensed under the Timescale License.
+-- Please see the included NOTICE for copyright information and
+-- LICENSE-TIMESCALE for a copy of the license.
+
+CREATE PROCEDURE
+       convert_hypertable_to_columnstore(regclass)
+LANGUAGE plpgsql AS $$
+DECLARE
+  chunk REGCLASS;
+BEGIN
+   FOR chunk IN SELECT show_chunks($1)
+   LOOP
+      CALL convert_to_columnstore(chunk);
+   END LOOP;
+END
+$$;
+
+CREATE PROCEDURE
+       convert_hypertable_to_rowstore(regclass)
+LANGUAGE plpgsql AS $$
+DECLARE
+  chunk REGCLASS;
+BEGIN
+   FOR chunk IN SELECT show_chunks($1)
+   LOOP
+      CALL convert_to_rowstore(chunk);
+   END LOOP;
+END
+$$;
+
+-- These are mostly taken from compression_ddl.sql and are only
+-- intended to check that aliases work. In that sense, the actual
+-- result of each query is not particularly important.
+CREATE TABLE test1 (ts timestamptz, i integer, b bigint, t text);
+SELECT * FROM create_hypertable('test1', 'ts');
+INSERT INTO test1 SELECT t,  random(), random(), random()::text
+FROM generate_series('2018-03-02 1:00'::TIMESTAMPTZ, '2018-03-28 1:00', '1 hour') t;
+ALTER TABLE test1 set (
+      timescaledb.enable_columnstore,
+      timescaledb.segmentby = 'b',
+      timescaledb.orderby = 'ts desc'
+);
+
+CALL convert_hypertable_to_columnstore('test1');
+CALL convert_hypertable_to_rowstore('test1');
+CALL convert_hypertable_to_columnstore('test1');
+
+-- Pick one chunk to play with and test option names. We mostly use
+-- default since we are only interested in that the option names are
+-- accepted.
+SELECT chunk FROM show_chunks('test1') tbl(chunk) LIMIT 1 \gset
+CALL convert_to_rowstore(:'chunk', if_columnstore => true);
+CALL convert_to_columnstore(:'chunk',
+     if_not_columnstore => true,
+     recompress => false,
+     hypercore_use_access_method => false);
+
+CALL add_columnstore_policy('test1', interval '1 day');
+CALL remove_columnstore_policy('test1');
+
+SELECT * FROM timescaledb_information.hypertable_columnstore_settings;
+SELECT * FROM timescaledb_information.chunk_columnstore_settings ORDER BY chunk;
+
+VACUUM FULL test1;
+
+-- We only care about the column names for the result. They should be
+-- the same as for the original function.
+SELECT * FROM chunk_columnstore_stats('test1') where 1 = 2 order by chunk_name;
+SELECT * FROM hypertable_columnstore_stats('test1') where 1 = 2;

From 089a8304a58d516b3afb90462e7a4b12298baf61 Mon Sep 17 00:00:00 2001
From: Mats Kindahl <mats@timescale.com>
Date: Mon, 2 Dec 2024 09:09:38 +0100
Subject: [PATCH 5/8] Fix issue with PR approval workflow

Remove event information from workflow and count the number of files
rather than try to test for them. Also include approvers that have
"NONE" as author status.
---
 .github/workflows/pr-approvals.yaml | 32 +++++++++++++++++++----------
 1 file changed, 21 insertions(+), 11 deletions(-)

diff --git a/.github/workflows/pr-approvals.yaml b/.github/workflows/pr-approvals.yaml
index 349b29638b8..32fa57001bb 100644
--- a/.github/workflows/pr-approvals.yaml
+++ b/.github/workflows/pr-approvals.yaml
@@ -25,27 +25,37 @@ jobs:
           GH_TOKEN: ${{ github.token }}
           PR_NUMBER: ${{ github.event.number }}
         run: |
-          echo "Event is: "
-          cat <<EOF
-            ${{ toJSON(github.event) }}
-          EOF
           echo "PR number is $PR_NUMBER"
           echo "$BODY" | egrep -qsi '^disable-check:.*\<approval-count\>'
           if [[ $? -ne 0 ]]; then
             # Get the list of modified files in this pull request
             echo "Modified files: "
             gh pr view $PR_NUMBER --json files
-            # Get modified files, but exclude those that are workflow
-            # files or are related to Hypercore table access
-            # method. These require only a single reviewer.
-            files=$(gh pr view $PR_NUMBER --json files --jq '.files.[].path | select(startswith(".github") or test("hypercore|columnar_scan") | not)')
+            # Get the number of modified files, but exclude those that
+            # are workflow files or are related to Hypercore table
+            # access method. These require only a single reviewer.
+            files=$(gh pr view $PR_NUMBER --json files --jq '[.files.[].path | select(startswith(".github") or test("hypercore|columnar_scan") | not)] | length')
 
             # Get the number of approvals in this pull request
             echo "Reviews: "
             gh pr view $PR_NUMBER --json reviews
-            approvals=$(gh pr view $PR_NUMBER --json reviews --jq '[.reviews.[] | select((.authorAssociation == "MEMBER" or .authorAssociation == "CONTRIBUTOR") and .state == "APPROVED")] | length')
-
-            if [[ $approvals -lt 2 ]] && [[ "${files}" ]] ; then
+            approvals=$(
+                gh pr view $PR_NUMBER --json reviews --jq '
+                    [
+                        .reviews.[]
+                        | select(
+                            (
+                                .authorAssociation == "NONE"
+                                or .authorAssociation == "MEMBER"
+                                or .authorAssociation == "CONTRIBUTOR"
+                            )
+                            and .state == "APPROVED"
+                          )
+                    ] | length
+                  '
+            )
+            echo "approvals: $approvals, files: $files"
+            if [[ $approvals -lt 2 ]] && [[ $files -gt 0 ]] ; then
               echo "This pull request requires 2 approvals before merging."
               echo
               echo "For trivial changes, you may disable this check by adding this trailer to the pull request message:"

From c6ad9a6be29f7b23f4edef52efbf2a74d3fc827c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Fabr=C3=ADzio=20de=20Royes=20Mello?=
 <fabriziomello@gmail.com>
Date: Mon, 2 Dec 2024 12:24:05 -0300
Subject: [PATCH 6/8] Fix flaky jit regression test

Disable hash aggregate to avoid flaky test output.
---
 tsl/test/expected/jit.out | 20 ++++++++++++--------
 tsl/test/sql/jit.sql      |  1 +
 2 files changed, 13 insertions(+), 8 deletions(-)

diff --git a/tsl/test/expected/jit.out b/tsl/test/expected/jit.out
index e9b1d3bcd48..8ea39bdf390 100644
--- a/tsl/test/expected/jit.out
+++ b/tsl/test/expected/jit.out
@@ -17,6 +17,7 @@ SET jit_above_cost=0;
 SET jit_inline_above_cost=0;
 SET jit_optimize_above_cost=0;
 SET jit_tuple_deforming=on;
+SET enable_hashagg=off;
 \ir :TEST_LOAD_NAME
 -- This file and its contents are licensed under the Timescale License.
 -- Please see the included NOTICE for copyright information and
@@ -198,16 +199,19 @@ SELECT * FROM jit_device_summary WHERE metric_spread = 1800 ORDER BY bucket DESC
                      Output: _hyper_4_6_chunk.bucket, _hyper_4_6_chunk.device_id, _hyper_4_6_chunk.metric_avg, _hyper_4_6_chunk.metric_spread
                      Index Cond: (_hyper_4_6_chunk.bucket < 'Mon Dec 31 01:00:00 2018 PST'::timestamp with time zone)
                      Filter: (_hyper_4_6_chunk.metric_spread = '1800'::double precision)
-               ->  HashAggregate
+               ->  GroupAggregate
                      Output: (time_bucket('@ 1 hour'::interval, _hyper_3_5_chunk.observation_time)), _hyper_3_5_chunk.device_id, avg(_hyper_3_5_chunk.metric), (max(_hyper_3_5_chunk.metric) - min(_hyper_3_5_chunk.metric))
-                     Group Key: time_bucket('@ 1 hour'::interval, _hyper_3_5_chunk.observation_time), _hyper_3_5_chunk.device_id
+                     Group Key: (time_bucket('@ 1 hour'::interval, _hyper_3_5_chunk.observation_time)), _hyper_3_5_chunk.device_id
                      Filter: ((max(_hyper_3_5_chunk.metric) - min(_hyper_3_5_chunk.metric)) = '1800'::double precision)
-                     ->  Result
-                           Output: time_bucket('@ 1 hour'::interval, _hyper_3_5_chunk.observation_time), _hyper_3_5_chunk.device_id, _hyper_3_5_chunk.metric
-                           ->  Index Scan using _hyper_3_5_chunk_jit_test_contagg_observation_time_idx on _timescaledb_internal._hyper_3_5_chunk
-                                 Output: _hyper_3_5_chunk.observation_time, _hyper_3_5_chunk.device_id, _hyper_3_5_chunk.metric
-                                 Index Cond: (_hyper_3_5_chunk.observation_time >= 'Mon Dec 31 01:00:00 2018 PST'::timestamp with time zone)
-(19 rows)
+                     ->  Sort
+                           Output: (time_bucket('@ 1 hour'::interval, _hyper_3_5_chunk.observation_time)), _hyper_3_5_chunk.device_id, _hyper_3_5_chunk.metric
+                           Sort Key: (time_bucket('@ 1 hour'::interval, _hyper_3_5_chunk.observation_time)), _hyper_3_5_chunk.device_id
+                           ->  Result
+                                 Output: time_bucket('@ 1 hour'::interval, _hyper_3_5_chunk.observation_time), _hyper_3_5_chunk.device_id, _hyper_3_5_chunk.metric
+                                 ->  Index Scan using _hyper_3_5_chunk_jit_test_contagg_observation_time_idx on _timescaledb_internal._hyper_3_5_chunk
+                                       Output: _hyper_3_5_chunk.observation_time, _hyper_3_5_chunk.device_id, _hyper_3_5_chunk.metric
+                                       Index Cond: (_hyper_3_5_chunk.observation_time >= 'Mon Dec 31 01:00:00 2018 PST'::timestamp with time zone)
+(22 rows)
 
 -- generate the results into two different files
 \set ECHO errors
diff --git a/tsl/test/sql/jit.sql b/tsl/test/sql/jit.sql
index 4f6ade3a9c0..3b57d9c5740 100644
--- a/tsl/test/sql/jit.sql
+++ b/tsl/test/sql/jit.sql
@@ -20,6 +20,7 @@ SET jit_above_cost=0;
 SET jit_inline_above_cost=0;
 SET jit_optimize_above_cost=0;
 SET jit_tuple_deforming=on;
+SET enable_hashagg=off;
 
 \ir :TEST_LOAD_NAME
 \set PREFIX 'EXPLAIN (VERBOSE, TIMING OFF, COSTS OFF, SUMMARY OFF)'

From 02b76fad3811aa560f9098c64b4d4a27a6e1e14a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Fabr=C3=ADzio=20de=20Royes=20Mello?=
 <fabriziomello@gmail.com>
Date: Tue, 26 Nov 2024 14:28:25 -0300
Subject: [PATCH 7/8] Don't invalidate relation cache if cagg watermark
 constify is false

It is not necessary to invalidate the relation cache during the
watermark update if the GUC `enable_cagg_watermark_constify=false`. Was
a leftover when the feature was introduced by #6325.
---
 src/ts_catalog/continuous_aggs_watermark.c | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/src/ts_catalog/continuous_aggs_watermark.c b/src/ts_catalog/continuous_aggs_watermark.c
index d7a9becb7b4..c022745f21c 100644
--- a/src/ts_catalog/continuous_aggs_watermark.c
+++ b/src/ts_catalog/continuous_aggs_watermark.c
@@ -17,6 +17,7 @@
 #include <utils/snapmgr.h>
 
 #include "debug_point.h"
+#include "guc.h"
 #include "hypertable.h"
 #include "ts_catalog/continuous_agg.h"
 #include "ts_catalog/continuous_aggs_watermark.h"
@@ -321,8 +322,10 @@ ts_cagg_watermark_update(Hypertable *mat_ht, int64 watermark, bool watermark_isn
 
 	/* If we have a real-time CAgg, it uses a watermark function. So, we have to invalidate the rel
 	 * cache to force a replanning of prepared statements. See cagg_watermark_update_internal for
-	 * more information. */
-	bool invalidate_rel_cache = !cagg->data.materialized_only;
+	 * more information. If the GUC enable_cagg_watermark_constify=false then it's not necessary
+	 * to invalidate relation cache. */
+	bool invalidate_rel_cache =
+		!cagg->data.materialized_only && ts_guc_enable_cagg_watermark_constify;
 
 	watermark = cagg_compute_watermark(cagg, watermark, watermark_isnull);
 	cagg_watermark_update_internal(mat_ht->fd.id,

From 8fe72417dca2800d330517e93f75e9633cb26300 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Fabr=C3=ADzio=20de=20Royes=20Mello?=
 <fabriziomello@gmail.com>
Date: Mon, 2 Dec 2024 18:47:04 -0300
Subject: [PATCH 8/8] Fix flaky reorder regression test

Execute manual `VACUUM` on the hypertable before reordering a chunk to
avoid flaky test output.
---
 tsl/test/expected/reorder.out | 3 ++-
 tsl/test/sql/reorder.sql      | 1 +
 2 files changed, 3 insertions(+), 1 deletion(-)

diff --git a/tsl/test/expected/reorder.out b/tsl/test/expected/reorder.out
index 9933a3f61e0..6b7257125ad 100644
--- a/tsl/test/expected/reorder.out
+++ b/tsl/test/expected/reorder.out
@@ -1273,9 +1273,10 @@ CREATE INDEX ct2_time_idx ON ct2(time DESC);
 CLUSTER ct2 USING ct2_time_idx;
 -- deleted chunks are removed correctly
 DELETE FROM ct2 where time < 2 OR val < 2;
+VACUUM ct2;
 SELECT reorder_chunk('_timescaledb_internal._hyper_2_3_chunk', verbose => TRUE);
 INFO:  reordering "_timescaledb_internal._hyper_2_3_chunk" using sequential scan and sort
-INFO:  "_hyper_2_3_chunk": found 2 removable, 3 nonremovable row versions in 1 pages
+INFO:  "_hyper_2_3_chunk": found 0 removable, 3 nonremovable row versions in 1 pages
  reorder_chunk 
 ---------------
  
diff --git a/tsl/test/sql/reorder.sql b/tsl/test/sql/reorder.sql
index 768c2b5b0f7..dda69fafc2d 100644
--- a/tsl/test/sql/reorder.sql
+++ b/tsl/test/sql/reorder.sql
@@ -200,6 +200,7 @@ CLUSTER ct2 USING ct2_time_idx;
 
 -- deleted chunks are removed correctly
 DELETE FROM ct2 where time < 2 OR val < 2;
+VACUUM ct2;
 
 SELECT reorder_chunk('_timescaledb_internal._hyper_2_3_chunk', verbose => TRUE);
 SELECT ctid, time, val FROM _timescaledb_internal._hyper_2_3_chunk ORDER BY time;