diff --git a/.unreleased/feature_5761 b/.unreleased/feature_5761 index cb1cc69d105..5bef1c63a1b 100644 --- a/.unreleased/feature_5761 +++ b/.unreleased/feature_5761 @@ -1 +1,2 @@ Implements: #5761 Simplify hypertable DDL API +Thanks: @pdipesh02 for contributing to this feature diff --git a/sql/ddl_api.sql b/sql/ddl_api.sql index 36c565c5ca3..a726788b6c6 100644 --- a/sql/ddl_api.sql +++ b/sql/ddl_api.sql @@ -62,6 +62,25 @@ CREATE OR REPLACE FUNCTION @extschema@.create_distributed_hypertable( data_nodes NAME[] = NULL ) RETURNS TABLE(hypertable_id INT, schema_name NAME, table_name NAME, created BOOL) AS '@MODULE_PATHNAME@', 'ts_hypertable_distributed_create' LANGUAGE C VOLATILE; +-- A generalized hypertable creation API that can be used to convert a PostgreSQL table +-- with TIME/SERIAL/BIGSERIAL columns to a hypertable. +-- +-- relation - The OID of the table to be converted +-- partition_column - Name of the partition column +-- partition_interval (Optional) Initial interval for the chunks +-- partition_func (Optional) Partitioning function to be used for partition column +-- create_default_indexes (Optional) Whether or not to create the default indexes +-- if_not_exists (Optional) Do not fail if table is already a hypertable +-- migrate_data (Optional) Set to true to migrate any existing data in the table to chunks +CREATE OR REPLACE FUNCTION @extschema@.create_hypertable( + relation REGCLASS, + dimension _timescaledb_internal.dimension_info, + create_default_indexes BOOLEAN = TRUE, + if_not_exists BOOLEAN = FALSE, + migrate_data BOOLEAN = FALSE +) RETURNS TABLE(hypertable_id INT, created BOOL) AS '@MODULE_PATHNAME@', 'ts_hypertable_create_general' LANGUAGE C VOLATILE; + + -- Set adaptive chunking. To disable, set chunk_target_size => 'off'. CREATE OR REPLACE FUNCTION @extschema@.set_adaptive_chunking( hypertable REGCLASS, @@ -70,7 +89,7 @@ CREATE OR REPLACE FUNCTION @extschema@.set_adaptive_chunking( OUT chunk_target_size BIGINT ) RETURNS RECORD AS '@MODULE_PATHNAME@', 'ts_chunk_adaptive_set' LANGUAGE C VOLATILE; --- Update chunk_time_interval for a hypertable. +-- Update chunk_time_interval for a hypertable [DEPRECATED]. -- -- hypertable - The OID of the table corresponding to a hypertable whose time -- interval should be updated @@ -84,6 +103,20 @@ CREATE OR REPLACE FUNCTION @extschema@.set_chunk_time_interval( dimension_name NAME = NULL ) RETURNS VOID AS '@MODULE_PATHNAME@', 'ts_dimension_set_interval' LANGUAGE C VOLATILE; +-- Update partition_interval for a hypertable. +-- +-- hypertable - The OID of the table corresponding to a hypertable whose +-- partition interval should be updated +-- partition_interval - The new interval. For hypertables with integral/serial/bigserial +-- time columns, this must be an integral type. For hypertables with a +-- TIMESTAMP/TIMESTAMPTZ/DATE type, it can be integral which is treated as +-- microseconds, or an INTERVAL type. +CREATE OR REPLACE FUNCTION @extschema@.set_partitioning_interval( + hypertable REGCLASS, + partition_interval ANYELEMENT, + dimension_name NAME = NULL +) RETURNS VOID AS '@MODULE_PATHNAME@', 'ts_dimension_set_interval' LANGUAGE C VOLATILE; + CREATE OR REPLACE FUNCTION @extschema@.set_number_partitions( hypertable REGCLASS, number_partitions INTEGER, @@ -109,12 +142,23 @@ CREATE OR REPLACE FUNCTION @extschema@.show_chunks( ) RETURNS SETOF REGCLASS AS '@MODULE_PATHNAME@', 'ts_chunk_show_chunks' LANGUAGE C STABLE PARALLEL SAFE; --- Add a dimension (of partitioning) to a hypertable +CREATE OR REPLACE FUNCTION @extschema@.ByHash(column_name NAME, number_partitions INTEGER, + partition_func regproc = NULL) + RETURNS _timescaledb_internal.dimension_info + LANGUAGE C AS '@MODULE_PATHNAME@', 'ts_hash_dimension'; + +CREATE OR REPLACE FUNCTION @extschema@.ByRange(column_name NAME, + partition_interval ANYELEMENT = NULL::bigint, + partition_func regproc = NULL) + RETURNS _timescaledb_internal.dimension_info + LANGUAGE C AS '@MODULE_PATHNAME@', 'ts_range_dimension'; + +-- Add a dimension (of partitioning) to a hypertable [DEPRECATED] -- -- hypertable - OID of the table to add a dimension to -- column_name - NAME of the column to use in partitioning for this dimension -- number_partitions - Number of partitions, for non-time dimensions --- interval_length - Size of intervals for time dimensions (can be integral or INTERVAL) +-- chunk_time_interval - Size of intervals for time dimensions (can be integral or INTERVAL) -- partitioning_func - Function used to partition the column -- if_not_exists - If set, and the dimension already exists, generate a notice instead of an error CREATE OR REPLACE FUNCTION @extschema@.add_dimension( @@ -127,6 +171,18 @@ CREATE OR REPLACE FUNCTION @extschema@.add_dimension( ) RETURNS TABLE(dimension_id INT, schema_name NAME, table_name NAME, column_name NAME, created BOOL) AS '@MODULE_PATHNAME@', 'ts_dimension_add' LANGUAGE C VOLATILE; +-- Add a dimension (of partitioning) to a hypertable. +-- +-- hypertable - OID of the table to add a dimension to +-- dimension - Dimension to add +-- if_not_exists - If set, and the dimension already exists, generate a notice instead of an error +CREATE OR REPLACE FUNCTION @extschema@.add_dimension( + hypertable REGCLASS, + dimension _timescaledb_internal.dimension_info, + if_not_exists BOOLEAN = FALSE +) RETURNS TABLE(dimension_id INT, created BOOL) +AS '@MODULE_PATHNAME@', 'ts_dimension_add_general' LANGUAGE C VOLATILE; + CREATE OR REPLACE FUNCTION @extschema@.attach_tablespace( tablespace NAME, hypertable REGCLASS, @@ -166,7 +222,7 @@ CREATE OR REPLACE FUNCTION @extschema@.delete_data_node( if_exists BOOLEAN = FALSE, force BOOLEAN = FALSE, repartition BOOLEAN = TRUE, - drop_database BOOLEAN = FALSE + drop_database BOOLEAN = FALSE ) RETURNS BOOLEAN AS '@MODULE_PATHNAME@', 'ts_data_node_delete' LANGUAGE C VOLATILE; -- Attach a data node to a distributed hypertable @@ -185,7 +241,7 @@ CREATE OR REPLACE FUNCTION @extschema@.detach_data_node( if_attached BOOLEAN = FALSE, force BOOLEAN = FALSE, repartition BOOLEAN = TRUE, - drop_remote_data BOOLEAN = FALSE + drop_remote_data BOOLEAN = FALSE ) RETURNS INTEGER AS '@MODULE_PATHNAME@', 'ts_data_node_detach' LANGUAGE C VOLATILE; @@ -222,6 +278,6 @@ CREATE OR REPLACE FUNCTION @extschema@.alter_data_node( host TEXT = NULL, database NAME = NULL, port INTEGER = NULL, - available BOOLEAN = NULL + available BOOLEAN = NULL ) RETURNS TABLE(node_name NAME, host TEXT, port INTEGER, database NAME, available BOOLEAN) AS '@MODULE_PATHNAME@', 'ts_data_node_alter' LANGUAGE C VOLATILE; diff --git a/sql/ddl_experimental.sql b/sql/ddl_experimental.sql index 3601077083e..427b4ee91c8 100644 --- a/sql/ddl_experimental.sql +++ b/sql/ddl_experimental.sql @@ -47,55 +47,3 @@ CREATE OR REPLACE FUNCTION timescaledb_experimental.subscription_exec( CREATE OR REPLACE PROCEDURE timescaledb_experimental.cleanup_copy_chunk_operation( operation_id NAME) AS '@MODULE_PATHNAME@', 'ts_copy_chunk_cleanup_proc' LANGUAGE C; - --- A generalized hypertable creation API that can be used to convert a PostgreSQL table --- with TIME/SERIAL/BIGSERIAL columns to a hypertable. --- --- relation - The OID of the table to be converted --- partition_column - Name of the partition column --- partition_interval (Optional) Initial interval for the chunks --- partition_func (Optional) Partitioning function to be used for partition column --- create_default_indexes (Optional) Whether or not to create the default indexes --- if_not_exists (Optional) Do not fail if table is already a hypertable --- migrate_data (Optional) Set to true to migrate any existing data in the table to chunks -CREATE OR REPLACE FUNCTION timescaledb_experimental.create_hypertable( - relation REGCLASS, - partition_column NAME, - partition_interval ANYELEMENT = NULL::BIGINT, - partition_func REGPROC = NULL, - create_default_indexes BOOLEAN = TRUE, - if_not_exists BOOLEAN = FALSE, - migrate_data BOOLEAN = FALSE -) RETURNS TABLE(hypertable_id INT, created BOOL) AS '@MODULE_PATHNAME@', 'ts_hypertable_create_general' LANGUAGE C VOLATILE; - --- Add a dimension (of partitioning) to a hypertable --- --- hypertable - OID of the table to add a dimension to --- column_name - NAME of the column to use in partitioning for this dimension --- number_partitions - Number of partitions, for closed dimensions --- partition_interval - Size of intervals for open dimensions (can be integral or INTERVAL) --- partition_func - Function used to partition the column --- if_not_exists - If set, and the dimension already exists, generate a notice instead of an error -CREATE OR REPLACE FUNCTION timescaledb_experimental.add_dimension( - hypertable REGCLASS, - column_name NAME, - number_partitions INTEGER = NULL, - partition_interval ANYELEMENT = NULL::BIGINT, - partition_func REGPROC = NULL, - if_not_exists BOOLEAN = FALSE -) RETURNS TABLE(dimension_id INT, created BOOL) -AS '@MODULE_PATHNAME@', 'ts_dimension_add_general' LANGUAGE C VOLATILE; - --- Update partition_interval for a hypertable. --- --- hypertable - The OID of the table corresponding to a hypertable whose --- partition interval should be updated --- partition_interval - The new interval. For hypertables with integral/serial/bigserial --- time columns, this must be an integral type. For hypertables with a --- TIMESTAMP/TIMESTAMPTZ/DATE type, it can be integral which is treated as --- microseconds, or an INTERVAL type. -CREATE OR REPLACE FUNCTION timescaledb_experimental.set_partitioning_interval( - hypertable REGCLASS, - partition_interval ANYELEMENT, - dimension_name NAME = NULL -) RETURNS VOID AS '@MODULE_PATHNAME@', 'ts_dimension_set_interval' LANGUAGE C VOLATILE; diff --git a/sql/partitioning.sql b/sql/partitioning.sql index 5637ecbd32e..8a7c6de555c 100644 --- a/sql/partitioning.sql +++ b/sql/partitioning.sql @@ -10,4 +10,3 @@ CREATE OR REPLACE FUNCTION _timescaledb_functions.get_partition_for_key(val anye CREATE OR REPLACE FUNCTION _timescaledb_functions.get_partition_hash(val anyelement) RETURNS int AS '@MODULE_PATHNAME@', 'ts_get_partition_hash' LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE; - diff --git a/sql/pre_install/types.functions.sql b/sql/pre_install/types.functions.sql index b21d1075c15..6bd7dfef2b3 100644 --- a/sql/pre_install/types.functions.sql +++ b/sql/pre_install/types.functions.sql @@ -40,3 +40,11 @@ CREATE OR REPLACE FUNCTION _timescaledb_functions.rxid_in(cstring) RETURNS @exts CREATE OR REPLACE FUNCTION _timescaledb_functions.rxid_out(@extschema@.rxid) RETURNS cstring AS '@MODULE_PATHNAME@', 'ts_remote_txn_id_out' LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE; + +CREATE OR REPLACE FUNCTION _timescaledb_functions.dimension_info_in(cstring) + RETURNS _timescaledb_internal.dimension_info + AS '@MODULE_PATHNAME@', 'ts_dimension_info_in' LANGUAGE C STRICT IMMUTABLE; + +CREATE OR REPLACE FUNCTION _timescaledb_functions.dimension_info_out(_timescaledb_internal.dimension_info) + RETURNS cstring + AS '@MODULE_PATHNAME@', 'ts_dimension_info_out' LANGUAGE C STRICT IMMUTABLE; diff --git a/sql/pre_install/types.post.sql b/sql/pre_install/types.post.sql index a222c37f776..3f2fc9b78d4 100644 --- a/sql/pre_install/types.post.sql +++ b/sql/pre_install/types.post.sql @@ -23,3 +23,14 @@ CREATE TYPE @extschema@.rxid ( input = _timescaledb_functions.rxid_in, output = _timescaledb_functions.rxid_out ); + +-- +-- Dimension type used in create_hypertable, add_dimension, etc. It is +-- deliberately an opaque type. +-- +CREATE TYPE _timescaledb_internal.dimension_info ( + INPUT = _timescaledb_functions.dimension_info_in, + OUTPUT = _timescaledb_functions.dimension_info_out, + INTERNALLENGTH = VARIABLE +); + diff --git a/sql/pre_install/types.pre.sql b/sql/pre_install/types.pre.sql index 43033ae6cf8..ccbde4ad1e7 100644 --- a/sql/pre_install/types.pre.sql +++ b/sql/pre_install/types.pre.sql @@ -12,4 +12,10 @@ CREATE TYPE _timescaledb_internal.compressed_data; -- CREATE TYPE @extschema@.rxid; +-- +-- Dimension type used in create_hypertable, add_dimension, etc. It is +-- deliberately an opaque type. +-- +CREATE TYPE _timescaledb_internal.dimension_info; + --placeholder to allow creation of functions below diff --git a/sql/updates/latest-dev.sql b/sql/updates/latest-dev.sql index 23404fe35bd..5174ca5efb9 100644 --- a/sql/updates/latest-dev.sql +++ b/sql/updates/latest-dev.sql @@ -1,25 +1,30 @@ +CREATE FUNCTION @extschema@.ByHash(column_name NAME, number_partitions INTEGER, + partition_func regproc = NULL) +RETURNS _timescaledb_internal.dimension_info LANGUAGE C AS '@MODULE_PATHNAME@', 'ts_hash_dimension'; + +CREATE FUNCTION @extschema@.ByRange(column_name NAME, + partition_interval ANYELEMENT = NULL::bigint, + partition_func regproc = NULL) +RETURNS _timescaledb_internal.dimension_info LANGUAGE C AS '@MODULE_PATHNAME@', 'ts_range_dimension'; + -- API changes related to hypertable generalization -CREATE OR REPLACE FUNCTION timescaledb_experimental.create_hypertable( +CREATE FUNCTION @extschema@.create_hypertable( relation REGCLASS, - partition_column NAME, - partition_interval ANYELEMENT = NULL::BIGINT, - partition_func REGPROC = NULL, + dimension _timescaledb_internal.dimension_info, create_default_indexes BOOLEAN = TRUE, if_not_exists BOOLEAN = FALSE, migrate_data BOOLEAN = FALSE -) RETURNS TABLE(hypertable_id INT, created BOOL) AS '@MODULE_PATHNAME@', 'ts_hypertable_create_general' LANGUAGE C VOLATILE; +) RETURNS TABLE(hypertable_id INT, created BOOL) +AS '@MODULE_PATHNAME@', 'ts_hypertable_create_general' LANGUAGE C VOLATILE; -CREATE OR REPLACE FUNCTION timescaledb_experimental.add_dimension( +CREATE FUNCTION @extschema@.add_dimension( hypertable REGCLASS, - column_name NAME, - number_partitions INTEGER = NULL, - partition_interval ANYELEMENT = NULL::BIGINT, - partition_func REGPROC = NULL, + dimension _timescaledb_internal.dimension_info, if_not_exists BOOLEAN = FALSE ) RETURNS TABLE(dimension_id INT, created BOOL) AS '@MODULE_PATHNAME@', 'ts_dimension_add_general' LANGUAGE C VOLATILE; -CREATE OR REPLACE FUNCTION timescaledb_experimental.set_partitioning_interval( +CREATE FUNCTION @extschema@.set_partitioning_interval( hypertable REGCLASS, partition_interval ANYELEMENT, dimension_name NAME = NULL diff --git a/sql/updates/reverse-dev.sql b/sql/updates/reverse-dev.sql index f3695a4944b..258964ebf62 100644 --- a/sql/updates/reverse-dev.sql +++ b/sql/updates/reverse-dev.sql @@ -273,7 +273,169 @@ ALTER FUNCTION _timescaledb_functions.finalize_agg_sfunc(internal,text,name,name ALTER FUNCTION _timescaledb_functions.partialize_agg(anyelement) SET SCHEMA _timescaledb_internal; ALTER AGGREGATE _timescaledb_functions.finalize_agg(text,name,name,name[][],bytea,anyelement) SET SCHEMA _timescaledb_internal; --- API changes related to hypertable generalization -DROP FUNCTION IF EXISTS timescaledb_experimental.create_hypertable; -DROP FUNCTION IF EXISTS timescaledb_experimental.add_dimension; -DROP FUNCTION IF EXISTS timescaledb_experimental.set_partitioning_interval; +DROP FUNCTION _timescaledb_functions.hypertable_osm_range_update(regclass, anyelement, anyelement, boolean); + +-- recreate the _timescaledb_catalog.hypertable table as new field was added +-- 1. drop CONSTRAINTS from other tables referencing the existing one +ALTER TABLE _timescaledb_config.bgw_job + DROP CONSTRAINT bgw_job_hypertable_id_fkey; +ALTER TABLE _timescaledb_catalog.chunk + DROP CONSTRAINT chunk_hypertable_id_fkey; +ALTER TABLE _timescaledb_catalog.chunk_index + DROP CONSTRAINT chunk_index_hypertable_id_fkey; +ALTER TABLE _timescaledb_catalog.continuous_agg + DROP CONSTRAINT continuous_agg_mat_hypertable_id_fkey, + DROP CONSTRAINT continuous_agg_raw_hypertable_id_fkey; +ALTER TABLE _timescaledb_catalog.continuous_aggs_bucket_function + DROP CONSTRAINT continuous_aggs_bucket_function_mat_hypertable_id_fkey; +ALTER TABLE _timescaledb_catalog.continuous_aggs_invalidation_threshold + DROP CONSTRAINT continuous_aggs_invalidation_threshold_hypertable_id_fkey; +ALTER TABLE _timescaledb_catalog.dimension + DROP CONSTRAINT dimension_hypertable_id_fkey; +ALTER TABLE _timescaledb_catalog.hypertable + DROP CONSTRAINT hypertable_compressed_hypertable_id_fkey; +ALTER TABLE _timescaledb_catalog.hypertable_compression + DROP CONSTRAINT hypertable_compression_hypertable_id_fkey; +ALTER TABLE _timescaledb_catalog.hypertable_data_node + DROP CONSTRAINT hypertable_data_node_hypertable_id_fkey; +ALTER TABLE _timescaledb_catalog.tablespace + DROP CONSTRAINT tablespace_hypertable_id_fkey; + +-- drop dependent views +ALTER EXTENSION timescaledb DROP VIEW timescaledb_information.hypertables; +ALTER EXTENSION timescaledb DROP VIEW timescaledb_information.job_stats; +ALTER EXTENSION timescaledb DROP VIEW timescaledb_information.jobs; +ALTER EXTENSION timescaledb DROP VIEW timescaledb_information.continuous_aggregates; +ALTER EXTENSION timescaledb DROP VIEW timescaledb_information.chunks; +ALTER EXTENSION timescaledb DROP VIEW timescaledb_information.dimensions; +ALTER EXTENSION timescaledb DROP VIEW timescaledb_information.compression_settings; +ALTER EXTENSION timescaledb DROP VIEW _timescaledb_internal.hypertable_chunk_local_size; +ALTER EXTENSION timescaledb DROP VIEW _timescaledb_internal.compressed_chunk_stats; +ALTER EXTENSION timescaledb DROP VIEW timescaledb_experimental.chunk_replication_status; +ALTER EXTENSION timescaledb DROP VIEW timescaledb_experimental.policies; + +DROP VIEW timescaledb_information.hypertables; +DROP VIEW timescaledb_information.job_stats; +DROP VIEW timescaledb_information.jobs; +DROP VIEW timescaledb_information.continuous_aggregates; +DROP VIEW timescaledb_information.chunks; +DROP VIEW timescaledb_information.dimensions; +DROP VIEW timescaledb_information.compression_settings; +DROP VIEW _timescaledb_internal.hypertable_chunk_local_size; +DROP VIEW _timescaledb_internal.compressed_chunk_stats; +DROP VIEW timescaledb_experimental.chunk_replication_status; +DROP VIEW timescaledb_experimental.policies; + +-- recreate table +CREATE TABLE _timescaledb_catalog.hypertable_tmp AS SELECT * FROM _timescaledb_catalog.hypertable; +CREATE TABLE _timescaledb_catalog.tmp_hypertable_seq_value AS SELECT last_value, is_called FROM _timescaledb_catalog.hypertable_id_seq; + +ALTER EXTENSION timescaledb DROP TABLE _timescaledb_catalog.hypertable; +ALTER EXTENSION timescaledb DROP SEQUENCE _timescaledb_catalog.hypertable_id_seq; + +SET timescaledb.restoring = on; -- must disable the hooks otherwise we can't do anything without the table _timescaledb_catalog.hypertable + +DROP TABLE _timescaledb_catalog.hypertable; + +CREATE SEQUENCE _timescaledb_catalog.hypertable_id_seq MINVALUE 1; +SELECT setval('_timescaledb_catalog.hypertable_id_seq', last_value, is_called) FROM _timescaledb_catalog.tmp_hypertable_seq_value; +DROP TABLE _timescaledb_catalog.tmp_hypertable_seq_value; + +CREATE TABLE _timescaledb_catalog.hypertable ( + id INTEGER PRIMARY KEY NOT NULL DEFAULT nextval('_timescaledb_catalog.hypertable_id_seq'), + schema_name name NOT NULL, + table_name name NOT NULL, + associated_schema_name name NOT NULL, + associated_table_prefix name NOT NULL, + num_dimensions smallint NOT NULL, + chunk_sizing_func_schema name NOT NULL, + chunk_sizing_func_name name NOT NULL, + chunk_target_size bigint NOT NULL, -- size in bytes + compression_state smallint NOT NULL DEFAULT 0, + compressed_hypertable_id integer, + replication_factor smallint NULL +); + +SET timescaledb.restoring = off; + +INSERT INTO _timescaledb_catalog.hypertable ( + id, + schema_name, + table_name, + associated_schema_name, + associated_table_prefix, + num_dimensions, + chunk_sizing_func_schema, + chunk_sizing_func_name, + chunk_target_size, + compression_state, + compressed_hypertable_id, + replication_factor +) +SELECT + id, + schema_name, + table_name, + associated_schema_name, + associated_table_prefix, + num_dimensions, + chunk_sizing_func_schema, + chunk_sizing_func_name, + chunk_target_size, + compression_state, + compressed_hypertable_id, + replication_factor +FROM + _timescaledb_catalog.hypertable_tmp +ORDER BY id; + +ALTER SEQUENCE _timescaledb_catalog.hypertable_id_seq OWNED BY _timescaledb_catalog.hypertable.id; +SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.hypertable', 'WHERE id >= 1'); +SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.hypertable_id_seq', ''); + +GRANT SELECT ON _timescaledb_catalog.hypertable TO PUBLIC; +GRANT SELECT ON _timescaledb_catalog.hypertable_id_seq TO PUBLIC; + +DROP TABLE _timescaledb_catalog.hypertable_tmp; +-- now add any constraints +ALTER TABLE _timescaledb_catalog.hypertable + -- ADD CONSTRAINT hypertable_pkey PRIMARY KEY (id), + ADD CONSTRAINT hypertable_associated_schema_name_associated_table_prefix_key UNIQUE (associated_schema_name, associated_table_prefix), + ADD CONSTRAINT hypertable_table_name_schema_name_key UNIQUE (table_name, schema_name), + ADD CONSTRAINT hypertable_schema_name_check CHECK (schema_name != '_timescaledb_catalog'), + -- internal compressed hypertables have compression state = 2 + ADD CONSTRAINT hypertable_dim_compress_check CHECK (num_dimensions > 0 OR compression_state = 2), + ADD CONSTRAINT hypertable_chunk_target_size_check CHECK (chunk_target_size >= 0), + ADD CONSTRAINT hypertable_compress_check CHECK ( (compression_state = 0 OR compression_state = 1 ) OR (compression_state = 2 AND compressed_hypertable_id IS NULL)), + -- replication_factor NULL: regular hypertable + -- replication_factor > 0: distributed hypertable on access node + -- replication_factor -1: distributed hypertable on data node, which is part of a larger table + ADD CONSTRAINT hypertable_replication_factor_check CHECK (replication_factor > 0 OR replication_factor = -1), + ADD CONSTRAINT hypertable_compressed_hypertable_id_fkey FOREIGN KEY (compressed_hypertable_id) REFERENCES _timescaledb_catalog.hypertable (id); + +GRANT SELECT ON TABLE _timescaledb_catalog.hypertable TO PUBLIC; + +-- 3. reestablish constraints on other tables +ALTER TABLE _timescaledb_config.bgw_job + ADD CONSTRAINT bgw_job_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; +ALTER TABLE _timescaledb_catalog.chunk + ADD CONSTRAINT chunk_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id); +ALTER TABLE _timescaledb_catalog.chunk_index + ADD CONSTRAINT chunk_index_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; +ALTER TABLE _timescaledb_catalog.continuous_agg + ADD CONSTRAINT continuous_agg_mat_hypertable_id_fkey FOREIGN KEY (mat_hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE, + ADD CONSTRAINT continuous_agg_raw_hypertable_id_fkey FOREIGN KEY (raw_hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; +ALTER TABLE _timescaledb_catalog.continuous_aggs_bucket_function + ADD CONSTRAINT continuous_aggs_bucket_function_mat_hypertable_id_fkey FOREIGN KEY (mat_hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; +ALTER TABLE _timescaledb_catalog.continuous_aggs_invalidation_threshold + ADD CONSTRAINT continuous_aggs_invalidation_threshold_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; +ALTER TABLE _timescaledb_catalog.dimension + ADD CONSTRAINT dimension_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; +ALTER TABLE _timescaledb_catalog.hypertable_compression + ADD CONSTRAINT hypertable_compression_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; +ALTER TABLE _timescaledb_catalog.hypertable_data_node + ADD CONSTRAINT hypertable_data_node_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id); +ALTER TABLE _timescaledb_catalog.tablespace + ADD CONSTRAINT tablespace_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; + + diff --git a/src/dimension.c b/src/dimension.c index e57cb1b3ee4..54c54ee92aa 100644 --- a/src/dimension.c +++ b/src/dimension.c @@ -73,6 +73,11 @@ cmp_dimension_id(const void *left, const void *right) return 0; } +TS_FUNCTION_INFO_V1(ts_hash_dimension); +TS_FUNCTION_INFO_V1(ts_range_dimension); +PG_FUNCTION_INFO_V1(ts_dimension_info_in); +PG_FUNCTION_INFO_V1(ts_dimension_info_out); + const Dimension * ts_hyperspace_get_dimension_by_id(const Hyperspace *hs, int32 id) { @@ -1303,6 +1308,23 @@ ts_dimension_set_interval(PG_FUNCTION_ARGS) TS_PREVENT_FUNC_IF_READ_ONLY(); +#if 0 +{ + HeapTuple tuple = SearchSysCache1(PROCOID, ObjectIdGetDatum(fcinfo->flinfo->fn_oid)); + if (!HeapTupleIsValid(tuple)) + elog(ERROR, "cache lookup failed for function %u", fcinfo->flinfo->fn_oid); + Form_pg_proc proc = (Form_pg_proc) GETSTRUCT(tuple); + if (strcmp(NameStr(proc->proname), "set_chunk_time_interval") == 0) + ereport(WARNING, + (errcode(ERRCODE_WARNING_DEPRECATED_FEATURE), + errmsg("function %s is deprecated", NameStr(proc->proname)), + errdetail("Function %s is deprecated and will be removed in a future version.", + NameStr(proc->proname)), + errhint("Use \"set_partitioning_interval\" instead."))); + ReleaseSysCache(tuple); + } +#endif + if (PG_ARGISNULL(0)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("hypertable cannot be NULL"))); @@ -1331,11 +1353,11 @@ ts_dimension_info_create_open(Oid table_relid, Name column_name, Datum interval, *info = (DimensionInfo){ .type = DIMENSION_TYPE_OPEN, .table_relid = table_relid, - .colname = column_name, .interval_datum = interval, .interval_type = interval_type, .partitioning_func = partitioning_func, }; + namestrcpy(&info->colname, NameStr(*column_name)); return info; } @@ -1347,11 +1369,11 @@ ts_dimension_info_create_closed(Oid table_relid, Name column_name, int32 num_sli *info = (DimensionInfo){ .type = DIMENSION_TYPE_CLOSED, .table_relid = table_relid, - .colname = column_name, .num_slices = num_slices, - .num_slices_is_set = true, + .num_slices_is_set = (num_slices > 0), .partitioning_func = partitioning_func, }; + namestrcpy(&info->colname, NameStr(*column_name)); return info; } @@ -1377,7 +1399,7 @@ dimension_info_validate_open(DimensionInfo *info) dimtype = get_func_rettype(info->partitioning_func); } - info->interval = dimension_interval_to_internal(NameStr(*info->colname), + info->interval = dimension_interval_to_internal(NameStr(info->colname), dimtype, info->interval_type, info->interval_datum, @@ -1404,7 +1426,7 @@ dimension_info_validate_closed(DimensionInfo *info) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid number of partitions for dimension \"%s\"", - NameStr(*info->colname)), + NameStr(info->colname)), errhint("A closed (space) dimension must specify between 1 and %d partitions.", PG_INT16_MAX))); } @@ -1428,12 +1450,12 @@ ts_dimension_info_validate(DimensionInfo *info) errmsg("cannot specify both the number of partitions and an interval"))); /* Check that the column exists and get its NOT NULL status */ - tuple = SearchSysCacheAttName(info->table_relid, NameStr(*info->colname)); + tuple = SearchSysCacheAttName(info->table_relid, NameStr(info->colname)); if (!HeapTupleIsValid(tuple)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("column \"%s\" does not exist", NameStr(*info->colname)))); + errmsg("column \"%s\" does not exist", NameStr(info->colname)))); datum = SysCacheGetAttr(ATTNAME, tuple, Anum_pg_attribute_atttypid, &isnull); Assert(!isnull); @@ -1463,21 +1485,21 @@ ts_dimension_info_validate(DimensionInfo *info) /* Check if the dimension already exists */ dim = ts_hyperspace_get_dimension_by_name(info->ht->space, DIMENSION_TYPE_ANY, - NameStr(*info->colname)); + NameStr(info->colname)); if (NULL != dim) { if (!info->if_not_exists) ereport(ERROR, (errcode(ERRCODE_TS_DUPLICATE_DIMENSION), - errmsg("column \"%s\" is already a dimension", NameStr(*info->colname)))); + errmsg("column \"%s\" is already a dimension", NameStr(info->colname)))); info->dimension_id = dim->fd.id; info->skip = true; ereport(NOTICE, (errmsg("column \"%s\" is already a dimension, skipping", - NameStr(*info->colname)))); + NameStr(info->colname)))); return; } } @@ -1500,12 +1522,12 @@ int32 ts_dimension_add_from_info(DimensionInfo *info) { if (info->set_not_null && info->type == DIMENSION_TYPE_OPEN) - dimension_add_not_null_on_column(info->table_relid, NameStr(*info->colname)); + dimension_add_not_null_on_column(info->table_relid, NameStr(info->colname)); Assert(info->ht != NULL); info->dimension_id = dimension_insert(info->ht->fd.id, - info->colname, + &info->colname, info->coltype, info->num_slices, info->partitioning_func, @@ -1536,6 +1558,7 @@ dimension_create_datum(FunctionCallInfo fcinfo, DimensionInfo *info, bool is_gen Datum values[Natts_generic_add_dimension]; bool nulls[Natts_generic_add_dimension] = { false }; + Assert(tupdesc->natts == Natts_generic_add_dimension); values[AttrNumberGetAttrOffset(Anum_generic_add_dimension_id)] = info->dimension_id; values[AttrNumberGetAttrOffset(Anum_generic_add_dimension_created)] = BoolGetDatum(!info->skip); @@ -1546,13 +1569,14 @@ dimension_create_datum(FunctionCallInfo fcinfo, DimensionInfo *info, bool is_gen Datum values[Natts_add_dimension]; bool nulls[Natts_add_dimension] = { false }; + Assert(tupdesc->natts == Natts_add_dimension); values[AttrNumberGetAttrOffset(Anum_add_dimension_id)] = info->dimension_id; values[AttrNumberGetAttrOffset(Anum_add_dimension_schema_name)] = NameGetDatum(&info->ht->fd.schema_name); values[AttrNumberGetAttrOffset(Anum_add_dimension_table_name)] = NameGetDatum(&info->ht->fd.table_name); values[AttrNumberGetAttrOffset(Anum_add_dimension_column_name)] = - NameGetDatum(info->colname); + NameGetDatum(&info->colname); values[AttrNumberGetAttrOffset(Anum_add_dimension_created)] = BoolGetDatum(!info->skip); tuple = heap_form_tuple(tupdesc, values, nulls); } @@ -1572,34 +1596,19 @@ dimension_create_datum(FunctionCallInfo fcinfo, DimensionInfo *info, bool is_gen * 5. IF NOT EXISTS option (bool) */ static Datum -ts_dimension_add_internal(PG_FUNCTION_ARGS, bool is_generic) +ts_dimension_add_internal(FunctionCallInfo fcinfo, DimensionInfo *info, bool is_generic) { Cache *hcache; - DimensionInfo info = { - .type = PG_ARGISNULL(2) ? DIMENSION_TYPE_OPEN : DIMENSION_TYPE_CLOSED, - .table_relid = PG_GETARG_OID(0), - .colname = PG_ARGISNULL(1) ? NULL : PG_GETARG_NAME(1), - .num_slices = PG_ARGISNULL(2) ? DatumGetInt32(-1) : PG_GETARG_INT32(2), - .num_slices_is_set = !PG_ARGISNULL(2), - .interval_datum = PG_ARGISNULL(3) ? Int32GetDatum(-1) : PG_GETARG_DATUM(3), - .interval_type = PG_ARGISNULL(3) ? InvalidOid : get_fn_expr_argtype(fcinfo->flinfo, 3), - .partitioning_func = PG_ARGISNULL(4) ? InvalidOid : PG_GETARG_OID(4), - .if_not_exists = PG_ARGISNULL(5) ? false : PG_GETARG_BOOL(5), - }; Datum retval = 0; - TS_PREVENT_FUNC_IF_READ_ONLY(); - - if (PG_ARGISNULL(0)) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("hypertable cannot be NULL"))); + Assert(DIMENSION_INFO_IS_SET(info)); - if (!info.num_slices_is_set && !OidIsValid(info.interval_type)) + if (!DIMENSION_INFO_IS_VALID(info)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("must specify either the number of partitions or an interval"))); - ts_hypertable_permissions_check(info.table_relid, GetUserId()); + ts_hypertable_permissions_check(info->table_relid, GetUserId()); /* * The hypertable catalog table has a CHECK(num_dimensions > 0), which @@ -1611,25 +1620,25 @@ ts_dimension_add_internal(PG_FUNCTION_ARGS, bool is_generic) * This lock is also used to serialize access from concurrent add_dimension() * call and a chunk creation. */ - LockRelationOid(info.table_relid, ShareUpdateExclusiveLock); + LockRelationOid(info->table_relid, ShareUpdateExclusiveLock); DEBUG_WAITPOINT("add_dimension_ht_lock"); - info.ht = ts_hypertable_cache_get_cache_and_entry(info.table_relid, CACHE_FLAG_NONE, &hcache); + info->ht = ts_hypertable_cache_get_cache_and_entry(info->table_relid, CACHE_FLAG_NONE, &hcache); - if (info.num_slices_is_set && OidIsValid(info.interval_type)) + if (info->num_slices_is_set && OidIsValid(info->interval_type)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("cannot specify both the number of partitions and an interval"))); - if (!info.num_slices_is_set && !OidIsValid(info.interval_type)) + if (!info->num_slices_is_set && !OidIsValid(info->interval_type)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("cannot omit both the number of partitions and the interval"))); - ts_dimension_info_validate(&info); + ts_dimension_info_validate(info); - if (!info.skip) + if (!info->skip) { int32 dimension_id; @@ -1638,21 +1647,21 @@ ts_dimension_add_internal(PG_FUNCTION_ARGS, bool is_generic) * dimension rows and not the num_dimensions in the hypertable catalog * table. */ - ts_hypertable_set_num_dimensions(info.ht, info.ht->space->num_dimensions + 1); - dimension_id = ts_dimension_add_from_info(&info); + ts_hypertable_set_num_dimensions(info->ht, info->ht->space->num_dimensions + 1); + dimension_id = ts_dimension_add_from_info(info); /* If adding the first space dimension, also add dimension partition metadata */ - if (info.type == DIMENSION_TYPE_CLOSED) + if (info->type == DIMENSION_TYPE_CLOSED) { - const Dimension *space_dim = hyperspace_get_closed_dimension(info.ht->space, 0); + const Dimension *space_dim = hyperspace_get_closed_dimension(info->ht->space, 0); if (space_dim != NULL) { - List *data_nodes = ts_hypertable_get_available_data_nodes(info.ht, false); + List *data_nodes = ts_hypertable_get_available_data_nodes(info->ht, false); ts_dimension_partition_info_recreate(dimension_id, - info.num_slices, + info->num_slices, data_nodes, - info.ht->fd.replication_factor); + info->ht->fd.replication_factor); } } @@ -1663,11 +1672,11 @@ ts_dimension_add_internal(PG_FUNCTION_ARGS, bool is_generic) * does not reflect the changes in the previous 2 lines which add a * new dimension */ - info.ht = ts_hypertable_get_by_id(info.ht->fd.id); - ts_indexing_verify_indexes(info.ht); + info->ht = ts_hypertable_get_by_id(info->ht->fd.id); + ts_indexing_verify_indexes(info->ht); /* Check that partitioning is sane */ - ts_hypertable_check_partitioning(info.ht, dimension_id); + ts_hypertable_check_partitioning(info->ht, dimension_id); /* * If the hypertable has chunks, to make it compatible @@ -1677,10 +1686,10 @@ ts_dimension_add_internal(PG_FUNCTION_ARGS, bool is_generic) * Newly created chunks will have a proper slice range according to * the created dimension and its partitioning. */ - if (ts_hypertable_has_chunks(info.table_relid, AccessShareLock)) + if (ts_hypertable_has_chunks(info->table_relid, AccessShareLock)) { ListCell *lc; - List *chunk_id_list = ts_chunk_get_chunk_ids_by_hypertable_id(info.ht->fd.id); + List *chunk_id_list = ts_chunk_get_chunk_ids_by_hypertable_id(info->ht->fd.id); DimensionSlice *slice; slice = ts_dimension_slice_create(dimension_id, @@ -1702,9 +1711,9 @@ ts_dimension_add_internal(PG_FUNCTION_ARGS, bool is_generic) } } - ts_hypertable_func_call_on_data_nodes(info.ht, fcinfo); + ts_hypertable_func_call_on_data_nodes(info->ht, fcinfo); - retval = dimension_create_datum(fcinfo, &info, is_generic); + retval = dimension_create_datum(fcinfo, info, is_generic); ts_cache_release(hcache); PG_RETURN_DATUM(retval); @@ -1716,13 +1725,129 @@ TS_FUNCTION_INFO_V1(ts_dimension_add_general); Datum ts_dimension_add(PG_FUNCTION_ARGS) { - return ts_dimension_add_internal(fcinfo, false); + DimensionInfo info = { + .type = PG_ARGISNULL(2) ? DIMENSION_TYPE_OPEN : DIMENSION_TYPE_CLOSED, + .table_relid = PG_GETARG_OID(0), + .num_slices = PG_ARGISNULL(2) ? DatumGetInt32(-1) : PG_GETARG_INT32(2), + .num_slices_is_set = !PG_ARGISNULL(2), + .interval_datum = PG_ARGISNULL(3) ? Int32GetDatum(-1) : PG_GETARG_DATUM(3), + .interval_type = PG_ARGISNULL(3) ? InvalidOid : get_fn_expr_argtype(fcinfo->flinfo, 3), + .partitioning_func = PG_ARGISNULL(4) ? InvalidOid : PG_GETARG_OID(4), + .if_not_exists = PG_ARGISNULL(5) ? false : PG_GETARG_BOOL(5), + }; + + TS_PREVENT_FUNC_IF_READ_ONLY(); + + if (!PG_ARGISNULL(1)) + memcpy(&info.colname, PG_GETARG_NAME(1), NAMEDATALEN); + + if (PG_ARGISNULL(0)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("hypertable cannot be NULL"))); + + return ts_dimension_add_internal(fcinfo, &info, false); +} + +TSDLLEXPORT Datum +ts_dimension_info_in(PG_FUNCTION_ARGS) +{ + Oid argtype = get_fn_expr_argtype(fcinfo->flinfo, 0); + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot accept a value of type OID \"%d\"", argtype))); + + PG_RETURN_VOID(); /* keep compiler quiet */ +} + +TSDLLEXPORT Datum +ts_dimension_info_out(PG_FUNCTION_ARGS) +{ + DimensionInfo *info = (DimensionInfo *) PG_GETARG_POINTER(0); + StringInfoData str; + + switch (info->type) + { + case DIMENSION_TYPE_CLOSED: + appendStringInfo(&str, + "hash//%s//%d//%s", + NameStr(info->colname), + info->num_slices, + get_func_name(info->partitioning_func)); + break; + + case DIMENSION_TYPE_OPEN: + { + bool isvarlena; + Oid outfuncid; + + getTypeOutputInfo(info->interval_type, &outfuncid, &isvarlena); + Assert(OidIsValid(outfuncid)); + const char *argvalstr = OidOutputFunctionCall(outfuncid, info->interval_datum); + + appendStringInfo(&str, + "range//%s//%s//%s", + NameStr(info->colname), + argvalstr, + get_func_name(info->partitioning_func)); + break; + } + + case DIMENSION_TYPE_ANY: + appendStringInfo(&str, "any"); + break; + } + PG_RETURN_CSTRING(str.data); +} + +/* + * DimensionInfo for a hash dimension. + * + * This structure is only partially filled in when constructed. The rest will + * be filled in by ts_dimension_add_general. + */ +Datum +ts_hash_dimension(PG_FUNCTION_ARGS) +{ + Name colname = PG_GETARG_NAME(0); + DimensionInfo *info = palloc0(sizeof(DimensionInfo)); + Ensure(PG_NARGS() > 2, "expected 3 arguments, defined with %d", PG_NARGS()); + info->type = DIMENSION_TYPE_CLOSED; + namestrcpy(&info->colname, NameStr(*colname)); + info->num_slices = PG_ARGISNULL(1) ? DatumGetInt32(-1) : PG_GETARG_INT32(1); + info->num_slices_is_set = !PG_ARGISNULL(1); + info->partitioning_func = PG_ARGISNULL(2) ? InvalidOid : PG_GETARG_OID(2); + PG_RETURN_POINTER(info); +} + +/* + * DimensionInfo for a hash dimension. + * + * This structure is only partially filled in when constructed. The rest will + * be filled in by ts_dimension_add_general. + */ +Datum +ts_range_dimension(PG_FUNCTION_ARGS) +{ + Name colname = PG_GETARG_NAME(0); + DimensionInfo *info = palloc0(sizeof(DimensionInfo)); + Ensure(PG_NARGS() > 2, "expected 3 arguments, defined with %d", PG_NARGS()); + info->type = DIMENSION_TYPE_OPEN; + namestrcpy(&info->colname, NameStr(*colname)); + info->interval_datum = PG_ARGISNULL(1) ? Int32GetDatum(-1) : PG_GETARG_DATUM(1); + info->interval_type = PG_ARGISNULL(1) ? InvalidOid : get_fn_expr_argtype(fcinfo->flinfo, 1); + info->partitioning_func = PG_ARGISNULL(2) ? InvalidOid : PG_GETARG_OID(2); + + PG_RETURN_POINTER(info); } Datum ts_dimension_add_general(PG_FUNCTION_ARGS) { - return ts_dimension_add_internal(fcinfo, true); + DimensionInfo *info = (DimensionInfo *) PG_GETARG_POINTER(1); + info->table_relid = PG_GETARG_OID(0); + if (PG_GETARG_BOOL(2)) + info->if_not_exists = true; + return ts_dimension_add_internal(fcinfo, info, true); } /* Used as a tuple found function */ diff --git a/src/dimension.h b/src/dimension.h index 7302411d78b..2b567462d94 100644 --- a/src/dimension.h +++ b/src/dimension.h @@ -84,14 +84,21 @@ typedef struct Point typedef struct Hypertable Hypertable; -/* +/** * Dimension information used to validate, create and update dimensions. + * + * This structure is used both partially filled in from the dimension info + * constructors as well as when building dimension info for the storage into + * the dimension table. + * + * @see ts_hash_dimension + * @see ts_range_dimension */ typedef struct DimensionInfo { Oid table_relid; int32 dimension_id; - Name colname; + NameData colname; Oid coltype; DimensionType type; Datum interval_datum; @@ -107,8 +114,8 @@ typedef struct DimensionInfo Hypertable *ht; } DimensionInfo; -#define DIMENSION_INFO_IS_SET(di) \ - (di != NULL && OidIsValid((di)->table_relid) && (di)->colname != NULL) +#define DIMENSION_INFO_IS_SET(di) (di != NULL && OidIsValid((di)->table_relid)) +#define DIMENSION_INFO_IS_VALID(di) (info->num_slices_is_set || OidIsValid(info->interval_type)) extern Hyperspace *ts_dimension_scan(int32 hypertable_id, Oid main_table_relid, int16 num_dimension, MemoryContext mctx); diff --git a/src/hypertable.c b/src/hypertable.c index d83fd08bb6f..d38d28b07c4 100644 --- a/src/hypertable.c +++ b/src/hypertable.c @@ -1871,15 +1871,14 @@ TS_FUNCTION_INFO_V1(ts_hypertable_create_general); * process the function arguments before calling this function. */ static Datum -ts_hypertable_create_internal( - FunctionCallInfo fcinfo, Oid table_relid, Name open_dim_name, Name closed_dim_name, - int16 num_partitions, Name associated_schema_name, Name associated_table_prefix, - Datum default_interval, Oid interval_type, bool create_default_indexes, bool if_not_exists, - regproc closed_partitioning_func, bool migrate_data, text *target_size, Oid sizing_func, - regproc open_partitioning_func, bool replication_factor_is_null, int32 replication_factor_in, - ArrayType *data_node_arr, bool distributed_is_null, bool distributed, bool is_generic) -{ - DimensionInfo *closed_dim_info = NULL; +ts_hypertable_create_internal(FunctionCallInfo fcinfo, Oid table_relid, + DimensionInfo *open_dim_info, DimensionInfo *closed_dim_info, + Name associated_schema_name, Name associated_table_prefix, + bool create_default_indexes, bool if_not_exists, bool migrate_data, + text *target_size, Oid sizing_func, bool replication_factor_is_null, + int32 replication_factor_in, ArrayType *data_node_arr, + bool distributed_is_null, bool distributed, bool is_generic) +{ int16 replication_factor; Cache *hcache; Hypertable *ht; @@ -1889,40 +1888,22 @@ ts_hypertable_create_internal( List *data_nodes = NIL; ts_feature_flag_check(FEATURE_HYPERTABLE); - DimensionInfo *open_dim_info = ts_dimension_info_create_open(table_relid, - /* column name */ - open_dim_name, - /* interval */ - default_interval, - /* interval type */ - interval_type, - /* partitioning func */ - open_partitioning_func); ChunkSizingInfo chunk_sizing_info = { .table_relid = table_relid, .target_size = target_size, .func = sizing_func, - .colname = NameStr(*open_dim_name), + .colname = NameStr(open_dim_info->colname), .check_for_index = !create_default_indexes, }; TS_PREVENT_FUNC_IF_READ_ONLY(); - if (!OidIsValid(table_relid)) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("relation cannot be NULL"))); - if (migrate_data && distributed) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot migrate data for distributed hypertable"))); - if (NULL == open_dim_name) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("partition column cannot be NULL"))); - if (NULL != data_node_arr && ARR_NDIM(data_node_arr) > 1) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), @@ -1963,10 +1944,11 @@ ts_hypertable_create_internal( data_node_arr, &data_nodes); - if (NULL != closed_dim_name) + if (closed_dim_info && !closed_dim_info->num_slices_is_set) { /* If the number of partitions isn't specified, default to setting it * to the number of data nodes */ + int16 num_partitions = closed_dim_info->num_slices; if (num_partitions < 1 && replication_factor > 0) { int num_nodes = list_length(data_nodes); @@ -1974,14 +1956,8 @@ ts_hypertable_create_internal( Assert(num_nodes >= 0); num_partitions = num_nodes & 0xFFFF; } - - closed_dim_info = ts_dimension_info_create_closed(table_relid, - /* column name */ - closed_dim_name, - /* number partitions */ - num_partitions, - /* partitioning func */ - closed_partitioning_func); + closed_dim_info->num_slices = num_partitions; + closed_dim_info->num_slices_is_set = true; } if (if_not_exists) @@ -2062,6 +2038,15 @@ ts_hypertable_create_time_prev(PG_FUNCTION_ARGS, bool is_dist_call) bool distributed_is_null; bool distributed; + if (!OidIsValid(table_relid)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("relation cannot be NULL"))); + + if (!open_dim_name) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("partition column cannot be NULL"))); + /* create_distributed_hypertable() does not have explicit * distributed argument */ if (!is_dist_call) @@ -2075,22 +2060,34 @@ ts_hypertable_create_time_prev(PG_FUNCTION_ARGS, bool is_dist_call) distributed = true; } + DimensionInfo *open_dim_info = + ts_dimension_info_create_open(table_relid, + open_dim_name, /* column name */ + default_interval, /* interval */ + interval_type, /* interval type */ + open_partitioning_func /* partitioning func */ + ); + + DimensionInfo *closed_dim_info = + closed_dim_name == NULL ? + NULL : + ts_dimension_info_create_closed(table_relid, + closed_dim_name, /* column name */ + num_partitions, /* number partitions */ + closed_partitioning_func /* partitioning func */ + ); + return ts_hypertable_create_internal(fcinfo, table_relid, - open_dim_name, - closed_dim_name, - num_partitions, + open_dim_info, + closed_dim_info, associated_schema_name, associated_table_prefix, - default_interval, - interval_type, create_default_indexes, if_not_exists, - closed_partitioning_func, migrate_data, target_size, sizing_func, - open_partitioning_func, replication_factor_is_null, replication_factor_in, data_node_arr, @@ -2140,13 +2137,10 @@ Datum ts_hypertable_create_general(PG_FUNCTION_ARGS) { Oid table_relid = PG_ARGISNULL(0) ? InvalidOid : PG_GETARG_OID(0); - Name open_dim_name = PG_ARGISNULL(1) ? NULL : PG_GETARG_NAME(1); - Datum partitioning_interval = PG_ARGISNULL(2) ? Int64GetDatum(-1) : PG_GETARG_DATUM(2); - Oid interval_type = PG_ARGISNULL(2) ? InvalidOid : get_fn_expr_argtype(fcinfo->flinfo, 2); - regproc partitioning_func = PG_ARGISNULL(3) ? InvalidOid : PG_GETARG_OID(3); - bool create_default_indexes = PG_ARGISNULL(4) ? false : PG_GETARG_BOOL(4); - bool if_not_exists = PG_ARGISNULL(5) ? false : PG_GETARG_BOOL(5); - bool migrate_data = PG_ARGISNULL(6) ? false : PG_GETARG_BOOL(6); + DimensionInfo *dim_info = (DimensionInfo *) PG_GETARG_POINTER(1); + bool create_default_indexes = PG_ARGISNULL(2) ? false : PG_GETARG_BOOL(2); + bool if_not_exists = PG_ARGISNULL(3) ? false : PG_GETARG_BOOL(3); + bool migrate_data = PG_ARGISNULL(4) ? false : PG_GETARG_BOOL(4); /* * Current implementation requires to provide a valid chunk sizing function @@ -2154,22 +2148,22 @@ ts_hypertable_create_general(PG_FUNCTION_ARGS) */ Oid sizing_func = get_sizing_func_oid(); + /* + * Fill in the rest of the info. + */ + dim_info->table_relid = table_relid; + return ts_hypertable_create_internal(fcinfo, table_relid, - open_dim_name, - NULL, - -1, - NULL, - NULL, - partitioning_interval, - interval_type, + dim_info, + NULL, /* closed_dim_info */ + NULL, /* associated_schema_name */ + NULL, /* associated_table_prefix */ create_default_indexes, if_not_exists, - InvalidOid, migrate_data, NULL, sizing_func, - partitioning_func, true, 0, NULL, diff --git a/tsl/test/expected/hypertable_generalization.out b/tsl/test/expected/hypertable_generalization.out index 22b73262016..e24f9446045 100644 --- a/tsl/test/expected/hypertable_generalization.out +++ b/tsl/test/expected/hypertable_generalization.out @@ -3,7 +3,7 @@ -- LICENSE-TIMESCALE for a copy of the license. -- Validate generalized hypertable for smallint CREATE TABLE test_table_smallint(id SMALLINT, device INTEGER, time TIMESTAMPTZ); -SELECT timescaledb_experimental.create_hypertable('test_table_smallint', 'id'); +SELECT create_hypertable('test_table_smallint', ByRange('id')); NOTICE: adding not-null constraint to column "id" create_hypertable ------------------- @@ -31,7 +31,7 @@ SELECT count(*) FROM timescaledb_information.chunks WHERE hypertable_name='test_ -- Validate generalized hypertable for int CREATE TABLE test_table_int(id INTEGER, device INTEGER, time TIMESTAMPTZ); -SELECT timescaledb_experimental.create_hypertable('test_table_int', 'id'); +SELECT create_hypertable('test_table_int', ByRange('id')); NOTICE: adding not-null constraint to column "id" create_hypertable ------------------- @@ -59,7 +59,7 @@ SELECT count(*) FROM timescaledb_information.chunks WHERE hypertable_name='test_ -- Validate generalized hypertable for bigint CREATE TABLE test_table_bigint(id BIGINT, device INTEGER, time TIMESTAMPTZ); -SELECT timescaledb_experimental.create_hypertable('test_table_bigint', 'id'); +SELECT create_hypertable('test_table_bigint', ByRange('id')); NOTICE: adding not-null constraint to column "id" create_hypertable ------------------- @@ -90,7 +90,7 @@ DROP TABLE test_table_int; DROP TABLE test_table_bigint; -- Create hypertable with SERIAL column CREATE TABLE jobs_serial (job_id SERIAL, device_id INTEGER, start_time TIMESTAMPTZ, end_time TIMESTAMPTZ, PRIMARY KEY (job_id)); -SELECT timescaledb_experimental.create_hypertable('jobs_serial', 'job_id', partition_interval => 30); +SELECT create_hypertable('jobs_serial', ByRange('job_id', partition_interval => 30)); create_hypertable ------------------- (4,t) @@ -166,7 +166,7 @@ SELECT count(*) FROM jobs_serial WHERE job_id < 30; DROP TABLE jobs_serial; -- Create and validate hypertable with BIGSERIAL column CREATE TABLE jobs_big_serial (job_id BIGSERIAL, device_id INTEGER, start_time TIMESTAMPTZ, end_time TIMESTAMPTZ, PRIMARY KEY (job_id)); -SELECT timescaledb_experimental.create_hypertable('jobs_big_serial', 'job_id', partition_interval => 100); +SELECT create_hypertable('jobs_big_serial', ByRange('job_id', 100)); create_hypertable ------------------- (5,t) @@ -279,7 +279,7 @@ BEGIN END $BODY$; CREATE TABLE test_table_int(id TEXT, device INTEGER, time TIMESTAMPTZ); -SELECT timescaledb_experimental.create_hypertable('test_table_int', 'id', partition_func => 'part_func', partition_interval => 10); +SELECT create_hypertable('test_table_int', ByRange('id', 10, partition_func => 'part_func')); NOTICE: adding not-null constraint to column "id" create_hypertable ------------------- @@ -300,7 +300,7 @@ DROP FUNCTION part_func; -- Migrate data CREATE TABLE test_table_int(id INTEGER, device INTEGER, time TIMESTAMPTZ); INSERT INTO test_table_int SELECT t, t%10, '01-01-2023 11:00'::TIMESTAMPTZ FROM generate_series(1, 50, 1) t; -SELECT timescaledb_experimental.create_hypertable('test_table_int', 'id', partition_interval => 10, migrate_data => true); +SELECT create_hypertable('test_table_int', ByRange('id', 10), migrate_data => true); NOTICE: adding not-null constraint to column "id" NOTICE: migrating data to chunks create_hypertable @@ -317,7 +317,7 @@ SELECT count(*) FROM timescaledb_information.chunks WHERE hypertable_name = 'tes DROP TABLE test_table_int; -- Create default indexes CREATE TABLE test_table_int(id INTEGER, device INTEGER, time TIMESTAMPTZ); -SELECT timescaledb_experimental.create_hypertable('test_table_int', 'id', partition_interval => 10, create_default_indexes => false); +SELECT create_hypertable('test_table_int', ByRange('id', 10), create_default_indexes => false); NOTICE: adding not-null constraint to column "id" create_hypertable ------------------- @@ -332,7 +332,7 @@ SELECT indexname FROM pg_indexes WHERE tablename = 'test_table_int'; DROP TABLE test_table_int; -- if_not_exists CREATE TABLE test_table_int(id INTEGER, device INTEGER, time TIMESTAMPTZ); -SELECT timescaledb_experimental.create_hypertable('test_table_int', 'id', partition_interval => 10); +SELECT create_hypertable('test_table_int', ByRange('id', 10)); NOTICE: adding not-null constraint to column "id" create_hypertable ------------------- @@ -340,7 +340,7 @@ NOTICE: adding not-null constraint to column "id" (1 row) -- No error when if_not_exists => true -SELECT timescaledb_experimental.create_hypertable('test_table_int', 'id', partition_interval => 10, if_not_exists => true); +SELECT create_hypertable('test_table_int', ByRange('id', 10), if_not_exists => true); NOTICE: table "test_table_int" is already a hypertable, skipping create_hypertable ------------------- @@ -355,13 +355,13 @@ SELECT * FROM _timescaledb_functions.get_create_command('test_table_int'); -- Should throw an error when if_not_exists is not set \set ON_ERROR_STOP 0 -SELECT timescaledb_experimental.create_hypertable('test_table_int', 'id', partition_interval => 10); +SELECT create_hypertable('test_table_int', ByRange('id', 10)); ERROR: table "test_table_int" is already a hypertable \set ON_ERROR_STOP 1 DROP TABLE test_table_int; -- Add dimension CREATE TABLE test_table_int(id INTEGER, device INTEGER, time TIMESTAMPTZ); -SELECT timescaledb_experimental.create_hypertable('test_table_int', 'id', partition_interval => 10, migrate_data => true); +SELECT create_hypertable('test_table_int', ByRange('id', 10), migrate_data => true); NOTICE: adding not-null constraint to column "id" create_hypertable ------------------- @@ -369,7 +369,7 @@ NOTICE: adding not-null constraint to column "id" (1 row) INSERT INTO test_table_int SELECT t, t%10, '01-01-2023 11:00'::TIMESTAMPTZ FROM generate_series(1, 50, 1) t; -SELECT timescaledb_experimental.add_dimension('test_table_int', 'device', partition_interval => 2); +SELECT add_dimension('test_table_int', ByRange('device', partition_interval => 2)); NOTICE: adding not-null constraint to column "device" add_dimension --------------- @@ -390,7 +390,7 @@ SELECT count(*) FROM timescaledb_information.chunks WHERE hypertable_name='test_ (1 row) -- set_partitioning_interval -SELECT timescaledb_experimental.set_partitioning_interval('test_table_int', 5, 'device'); +SELECT set_partitioning_interval('test_table_int', 5, 'device'); set_partitioning_interval --------------------------- @@ -405,7 +405,7 @@ SELECT integer_interval FROM timescaledb_information.dimensions WHERE column_nam DROP TABLE test_table_int; -- Hypertable with time dimension using new API CREATE TABLE test_time(time TIMESTAMP NOT NULL, device INT, temp FLOAT); -SELECT timescaledb_experimental.create_hypertable('test_time', 'time'); +SELECT create_hypertable('test_time', ByRange('time')); WARNING: column type "timestamp without time zone" used for "time" does not follow best practices create_hypertable ------------------- @@ -426,7 +426,7 @@ SELECT count(*) FROM timescaledb_information.chunks WHERE hypertable_name='test_ 2 (1 row) -SELECT timescaledb_experimental.add_dimension('test_time', 'device', partition_interval => 2); +SELECT add_dimension('test_time', ByRange('device', partition_interval => 2)); NOTICE: adding not-null constraint to column "device" add_dimension --------------- @@ -440,7 +440,7 @@ SELECT hypertable_name, dimension_number, column_name FROM timescaledb_informati test_time | 2 | device (2 rows) -SELECT timescaledb_experimental.set_partitioning_interval('test_time', INTERVAL '1 day', 'time'); +SELECT set_partitioning_interval('test_time', INTERVAL '1 day', 'time'); set_partitioning_interval --------------------------- diff --git a/tsl/test/shared/expected/extension.out b/tsl/test/shared/expected/extension.out index 5f353003676..8933eea92b6 100644 --- a/tsl/test/shared/expected/extension.out +++ b/tsl/test/shared/expected/extension.out @@ -63,6 +63,8 @@ ORDER BY pronamespace::regnamespace::text COLLATE "C", p.oid::regprocedure::text _timescaledb_functions.data_node_compressed_chunk_stats(name,name,name) _timescaledb_functions.data_node_hypertable_info(name,name,name) _timescaledb_functions.data_node_index_size(name,name,name) + _timescaledb_functions.dimension_info_in(cstring) + _timescaledb_functions.dimension_info_out(_timescaledb_internal.dimension_info) _timescaledb_functions.drop_chunk(regclass) _timescaledb_functions.drop_dist_ht_invalidation_trigger(integer) _timescaledb_functions.drop_stale_chunks(name,integer[]) @@ -260,6 +262,7 @@ ORDER BY pronamespace::regnamespace::text COLLATE "C", p.oid::regprocedure::text add_compression_policy(regclass,"any",boolean,interval,timestamp with time zone,text) add_continuous_aggregate_policy(regclass,"any","any",interval,boolean,timestamp with time zone,text) add_data_node(name,text,name,integer,boolean,boolean,text) + add_dimension(regclass,_timescaledb_internal.dimension_info,boolean) add_dimension(regclass,name,integer,anyelement,regproc,boolean) add_job(regproc,interval,jsonb,timestamp with time zone,boolean,regproc,boolean,text) add_reorder_policy(regclass,name,boolean,timestamp with time zone,text) @@ -269,12 +272,15 @@ ORDER BY pronamespace::regnamespace::text COLLATE "C", p.oid::regprocedure::text approximate_row_count(regclass) attach_data_node(name,regclass,boolean,boolean) attach_tablespace(name,regclass,boolean) + byhash(name,integer,regproc) + byrange(name,anyelement,regproc) cagg_migrate(regclass,boolean,boolean) chunk_compression_stats(regclass) chunks_detailed_size(regclass) compress_chunk(regclass,boolean) create_distributed_hypertable(regclass,name,name,integer,name,name,anyelement,boolean,boolean,regproc,boolean,text,regproc,regproc,integer,name[]) create_distributed_restore_point(text) + create_hypertable(regclass,_timescaledb_internal.dimension_info,boolean,boolean,boolean) create_hypertable(regclass,name,name,integer,name,name,anyelement,boolean,boolean,regproc,boolean,text,regproc,regproc,integer,name[],boolean) decompress_chunk(regclass,boolean) delete_data_node(name,boolean,boolean,boolean,boolean) @@ -310,6 +316,7 @@ ORDER BY pronamespace::regnamespace::text COLLATE "C", p.oid::regprocedure::text set_chunk_time_interval(regclass,anyelement,name) set_integer_now_func(regclass,regproc,boolean) set_number_partitions(regclass,integer,name) + set_partitioning_interval(regclass,anyelement,name) set_replication_factor(regclass,integer) show_chunks(regclass,"any","any") show_tablespaces(regclass) @@ -340,18 +347,15 @@ ORDER BY pronamespace::regnamespace::text COLLATE "C", p.oid::regprocedure::text timescaledb_fdw_validator(text[],oid) timescaledb_post_restore() timescaledb_pre_restore() - timescaledb_experimental.add_dimension(regclass,name,integer,anyelement,regproc,boolean) timescaledb_experimental.add_policies(regclass,boolean,"any","any","any","any") timescaledb_experimental.allow_new_chunks(name,regclass) timescaledb_experimental.alter_policies(regclass,boolean,"any","any","any","any") timescaledb_experimental.block_new_chunks(name,regclass,boolean) timescaledb_experimental.cleanup_copy_chunk_operation(name) timescaledb_experimental.copy_chunk(regclass,name,name,name) - timescaledb_experimental.create_hypertable(regclass,name,anyelement,regproc,boolean,boolean,boolean) timescaledb_experimental.move_chunk(regclass,name,name,name) timescaledb_experimental.remove_all_policies(regclass,boolean) timescaledb_experimental.remove_policies(regclass,boolean,text[]) - timescaledb_experimental.set_partitioning_interval(regclass,anyelement,name) timescaledb_experimental.show_policies(regclass) timescaledb_experimental.subscription_exec(text) timescaledb_experimental.time_bucket_ng(interval,date) diff --git a/tsl/test/sql/hypertable_generalization.sql b/tsl/test/sql/hypertable_generalization.sql index a1e9be60a5d..95b893bc5ba 100644 --- a/tsl/test/sql/hypertable_generalization.sql +++ b/tsl/test/sql/hypertable_generalization.sql @@ -4,7 +4,7 @@ -- Validate generalized hypertable for smallint CREATE TABLE test_table_smallint(id SMALLINT, device INTEGER, time TIMESTAMPTZ); -SELECT timescaledb_experimental.create_hypertable('test_table_smallint', 'id'); +SELECT create_hypertable('test_table_smallint', ByRange('id')); -- default interval SELECT integer_interval FROM timescaledb_information.dimensions WHERE hypertable_name = 'test_table_smallint'; @@ -20,7 +20,7 @@ SELECT count(*) FROM timescaledb_information.chunks WHERE hypertable_name='test_ -- Validate generalized hypertable for int CREATE TABLE test_table_int(id INTEGER, device INTEGER, time TIMESTAMPTZ); -SELECT timescaledb_experimental.create_hypertable('test_table_int', 'id'); +SELECT create_hypertable('test_table_int', ByRange('id')); -- Default interval SELECT integer_interval FROM timescaledb_information.dimensions WHERE hypertable_name = 'test_table_int'; @@ -36,7 +36,7 @@ SELECT count(*) FROM timescaledb_information.chunks WHERE hypertable_name='test_ -- Validate generalized hypertable for bigint CREATE TABLE test_table_bigint(id BIGINT, device INTEGER, time TIMESTAMPTZ); -SELECT timescaledb_experimental.create_hypertable('test_table_bigint', 'id'); +SELECT create_hypertable('test_table_bigint', ByRange('id')); -- Default interval SELECT integer_interval FROM timescaledb_information.dimensions WHERE hypertable_name = 'test_table_bigint'; @@ -56,7 +56,7 @@ DROP TABLE test_table_bigint; -- Create hypertable with SERIAL column CREATE TABLE jobs_serial (job_id SERIAL, device_id INTEGER, start_time TIMESTAMPTZ, end_time TIMESTAMPTZ, PRIMARY KEY (job_id)); -SELECT timescaledb_experimental.create_hypertable('jobs_serial', 'job_id', partition_interval => 30); +SELECT create_hypertable('jobs_serial', ByRange('job_id', partition_interval => 30)); -- Insert data INSERT INTO jobs_serial (device_id, start_time, end_time) @@ -86,7 +86,7 @@ DROP TABLE jobs_serial; -- Create and validate hypertable with BIGSERIAL column CREATE TABLE jobs_big_serial (job_id BIGSERIAL, device_id INTEGER, start_time TIMESTAMPTZ, end_time TIMESTAMPTZ, PRIMARY KEY (job_id)); -SELECT timescaledb_experimental.create_hypertable('jobs_big_serial', 'job_id', partition_interval => 100); +SELECT create_hypertable('jobs_big_serial', ByRange('job_id', 100)); -- Insert data INSERT INTO jobs_big_serial (device_id, start_time, end_time) @@ -143,7 +143,7 @@ END $BODY$; CREATE TABLE test_table_int(id TEXT, device INTEGER, time TIMESTAMPTZ); -SELECT timescaledb_experimental.create_hypertable('test_table_int', 'id', partition_func => 'part_func', partition_interval => 10); +SELECT create_hypertable('test_table_int', ByRange('id', 10, partition_func => 'part_func')); INSERT INTO test_table_int VALUES('1', 1, '01-01-2023 11:00'::TIMESTAMPTZ); INSERT INTO test_table_int VALUES('10', 10, '01-01-2023 11:00'::TIMESTAMPTZ); @@ -158,7 +158,7 @@ DROP FUNCTION part_func; CREATE TABLE test_table_int(id INTEGER, device INTEGER, time TIMESTAMPTZ); INSERT INTO test_table_int SELECT t, t%10, '01-01-2023 11:00'::TIMESTAMPTZ FROM generate_series(1, 50, 1) t; -SELECT timescaledb_experimental.create_hypertable('test_table_int', 'id', partition_interval => 10, migrate_data => true); +SELECT create_hypertable('test_table_int', ByRange('id', 10), migrate_data => true); SELECT count(*) FROM timescaledb_information.chunks WHERE hypertable_name = 'test_table_int'; @@ -167,7 +167,7 @@ DROP TABLE test_table_int; -- Create default indexes CREATE TABLE test_table_int(id INTEGER, device INTEGER, time TIMESTAMPTZ); -SELECT timescaledb_experimental.create_hypertable('test_table_int', 'id', partition_interval => 10, create_default_indexes => false); +SELECT create_hypertable('test_table_int', ByRange('id', 10), create_default_indexes => false); SELECT indexname FROM pg_indexes WHERE tablename = 'test_table_int'; @@ -176,33 +176,33 @@ DROP TABLE test_table_int; -- if_not_exists CREATE TABLE test_table_int(id INTEGER, device INTEGER, time TIMESTAMPTZ); -SELECT timescaledb_experimental.create_hypertable('test_table_int', 'id', partition_interval => 10); +SELECT create_hypertable('test_table_int', ByRange('id', 10)); -- No error when if_not_exists => true -SELECT timescaledb_experimental.create_hypertable('test_table_int', 'id', partition_interval => 10, if_not_exists => true); +SELECT create_hypertable('test_table_int', ByRange('id', 10), if_not_exists => true); SELECT * FROM _timescaledb_functions.get_create_command('test_table_int'); -- Should throw an error when if_not_exists is not set \set ON_ERROR_STOP 0 -SELECT timescaledb_experimental.create_hypertable('test_table_int', 'id', partition_interval => 10); +SELECT create_hypertable('test_table_int', ByRange('id', 10)); \set ON_ERROR_STOP 1 DROP TABLE test_table_int; -- Add dimension CREATE TABLE test_table_int(id INTEGER, device INTEGER, time TIMESTAMPTZ); -SELECT timescaledb_experimental.create_hypertable('test_table_int', 'id', partition_interval => 10, migrate_data => true); +SELECT create_hypertable('test_table_int', ByRange('id', 10), migrate_data => true); INSERT INTO test_table_int SELECT t, t%10, '01-01-2023 11:00'::TIMESTAMPTZ FROM generate_series(1, 50, 1) t; -SELECT timescaledb_experimental.add_dimension('test_table_int', 'device', partition_interval => 2); +SELECT add_dimension('test_table_int', ByRange('device', partition_interval => 2)); SELECT hypertable_name, dimension_number, column_name FROM timescaledb_information.dimensions WHERE hypertable_name = 'test_table_int'; SELECT count(*) FROM timescaledb_information.chunks WHERE hypertable_name='test_table_int'; -- set_partitioning_interval -SELECT timescaledb_experimental.set_partitioning_interval('test_table_int', 5, 'device'); +SELECT set_partitioning_interval('test_table_int', 5, 'device'); SELECT integer_interval FROM timescaledb_information.dimensions WHERE column_name='device'; @@ -210,7 +210,7 @@ DROP TABLE test_table_int; -- Hypertable with time dimension using new API CREATE TABLE test_time(time TIMESTAMP NOT NULL, device INT, temp FLOAT); -SELECT timescaledb_experimental.create_hypertable('test_time', 'time'); +SELECT create_hypertable('test_time', ByRange('time')); -- Default interval SELECT time_interval FROM timescaledb_information.dimensions WHERE hypertable_name = 'test_time'; @@ -219,11 +219,11 @@ INSERT INTO test_time SELECT t, (abs(timestamp_hash(t::timestamp)) % 10) + 1, 0. SELECT count(*) FROM timescaledb_information.chunks WHERE hypertable_name='test_time'; -SELECT timescaledb_experimental.add_dimension('test_time', 'device', partition_interval => 2); +SELECT add_dimension('test_time', ByRange('device', partition_interval => 2)); SELECT hypertable_name, dimension_number, column_name FROM timescaledb_information.dimensions WHERE hypertable_name = 'test_time'; -SELECT timescaledb_experimental.set_partitioning_interval('test_time', INTERVAL '1 day', 'time'); +SELECT set_partitioning_interval('test_time', INTERVAL '1 day', 'time'); SELECT time_interval FROM timescaledb_information.dimensions WHERE hypertable_name = 'test_time' AND column_name = 'time';