From e7340e3b3e033bd41327621926141eee03b98faa Mon Sep 17 00:00:00 2001 From: Dipesh Pandit Date: Wed, 7 Jun 2023 20:37:40 +0530 Subject: [PATCH] Simplify hypertable DDL APIs The current hypertable creation interface is heavily focused on a time column, but since hypertables are focused on partitioning of not only time columns, we introduce a more generic API that support different types of keys for partitioning. The new interface introduced new versions of create_hypertable, add_dimension, and a replacement function `set_partitioning_interval` that replaces `set_chunk_time_interval`. The new functions accept an instance of dimension_info that can be constructed using constructor functions `by_range` and `by_hash`, allowing a more versatile and future-proof API. For examples: SELECT create_hypertable('conditions', by_range('time')); SELECT add_dimension('conditions', by_hash('device')); The old API remains, but will eventually be deprecated. --- .unreleased/feature_5761 | 2 + sql/ddl_api.sql | 66 ++- sql/partitioning.sql | 1 - sql/pre_install/types.functions.sql | 11 + sql/pre_install/types.post.sql | 9 + sql/pre_install/types.pre.sql | 3 + sql/updates/latest-dev.sql | 50 ++ sql/updates/reverse-dev.sql | 8 + src/dimension.c | 334 ++++++++--- src/dimension.h | 20 +- src/hypertable.c | 365 +++++++----- src/hypertable.h | 10 + test/expected/ddl_errors.out | 7 +- test/expected/insert_single.out | 10 +- test/expected/rowsecurity-13.out | 66 +-- test/expected/rowsecurity-14.out | 66 +-- test/expected/rowsecurity-15.out | 66 +-- test/expected/timestamp.out | 40 +- test/sql/ddl_errors.sql | 5 +- test/sql/insert_single.sql | 9 +- test/sql/timestamp.sql | 10 +- tsl/test/expected/cagg_errors.out | 2 +- tsl/test/expected/cagg_errors_deprecated.out | 2 +- tsl/test/expected/cagg_joins.out | 2 +- tsl/test/expected/compression_errors.out | 16 +- .../expected/hypertable_generalization.out | 518 ++++++++++++++++++ tsl/test/shared/expected/extension.out | 7 + tsl/test/sql/CMakeLists.txt | 1 + tsl/test/sql/hypertable_generalization.sql | 257 +++++++++ 29 files changed, 1628 insertions(+), 335 deletions(-) create mode 100644 .unreleased/feature_5761 create mode 100644 tsl/test/expected/hypertable_generalization.out create mode 100644 tsl/test/sql/hypertable_generalization.sql diff --git a/.unreleased/feature_5761 b/.unreleased/feature_5761 new file mode 100644 index 00000000000..2d5196033f7 --- /dev/null +++ b/.unreleased/feature_5761 @@ -0,0 +1,2 @@ +Implements: #5761 Simplify hypertable DDL API +Thanks: @pdipesh02 for contributing to the implementation of the generalized hypertable API diff --git a/sql/ddl_api.sql b/sql/ddl_api.sql index 36c565c5ca3..09ef76571cb 100644 --- a/sql/ddl_api.sql +++ b/sql/ddl_api.sql @@ -62,6 +62,23 @@ CREATE OR REPLACE FUNCTION @extschema@.create_distributed_hypertable( data_nodes NAME[] = NULL ) RETURNS TABLE(hypertable_id INT, schema_name NAME, table_name NAME, created BOOL) AS '@MODULE_PATHNAME@', 'ts_hypertable_distributed_create' LANGUAGE C VOLATILE; +-- A generalized hypertable creation API that can be used to convert a PostgreSQL table +-- with TIME/SERIAL/BIGSERIAL columns to a hypertable. +-- +-- relation - The OID of the table to be converted +-- dimension - The dimension to use for partitioning +-- create_default_indexes (Optional) Whether or not to create the default indexes +-- if_not_exists (Optional) Do not fail if table is already a hypertable +-- migrate_data (Optional) Set to true to migrate any existing data in the table to chunks +CREATE OR REPLACE FUNCTION @extschema@.create_hypertable( + relation REGCLASS, + dimension _timescaledb_internal.dimension_info, + create_default_indexes BOOLEAN = TRUE, + if_not_exists BOOLEAN = FALSE, + migrate_data BOOLEAN = FALSE +) RETURNS TABLE(hypertable_id INT, created BOOL) AS '@MODULE_PATHNAME@', 'ts_hypertable_create_general' LANGUAGE C VOLATILE; + + -- Set adaptive chunking. To disable, set chunk_target_size => 'off'. CREATE OR REPLACE FUNCTION @extschema@.set_adaptive_chunking( hypertable REGCLASS, @@ -70,7 +87,7 @@ CREATE OR REPLACE FUNCTION @extschema@.set_adaptive_chunking( OUT chunk_target_size BIGINT ) RETURNS RECORD AS '@MODULE_PATHNAME@', 'ts_chunk_adaptive_set' LANGUAGE C VOLATILE; --- Update chunk_time_interval for a hypertable. +-- Update chunk_time_interval for a hypertable [DEPRECATED]. -- -- hypertable - The OID of the table corresponding to a hypertable whose time -- interval should be updated @@ -84,6 +101,20 @@ CREATE OR REPLACE FUNCTION @extschema@.set_chunk_time_interval( dimension_name NAME = NULL ) RETURNS VOID AS '@MODULE_PATHNAME@', 'ts_dimension_set_interval' LANGUAGE C VOLATILE; +-- Update partition_interval for a hypertable. +-- +-- hypertable - The OID of the table corresponding to a hypertable whose +-- partition interval should be updated +-- partition_interval - The new interval. For hypertables with integral/serial/bigserial +-- time columns, this must be an integral type. For hypertables with a +-- TIMESTAMP/TIMESTAMPTZ/DATE type, it can be integral which is treated as +-- microseconds, or an INTERVAL type. +CREATE OR REPLACE FUNCTION @extschema@.set_partitioning_interval( + hypertable REGCLASS, + partition_interval ANYELEMENT, + dimension_name NAME = NULL +) RETURNS VOID AS '@MODULE_PATHNAME@', 'ts_dimension_set_interval' LANGUAGE C VOLATILE; + CREATE OR REPLACE FUNCTION @extschema@.set_number_partitions( hypertable REGCLASS, number_partitions INTEGER, @@ -109,12 +140,12 @@ CREATE OR REPLACE FUNCTION @extschema@.show_chunks( ) RETURNS SETOF REGCLASS AS '@MODULE_PATHNAME@', 'ts_chunk_show_chunks' LANGUAGE C STABLE PARALLEL SAFE; --- Add a dimension (of partitioning) to a hypertable +-- Add a dimension (of partitioning) to a hypertable [DEPRECATED] -- -- hypertable - OID of the table to add a dimension to -- column_name - NAME of the column to use in partitioning for this dimension -- number_partitions - Number of partitions, for non-time dimensions --- interval_length - Size of intervals for time dimensions (can be integral or INTERVAL) +-- chunk_time_interval - Size of intervals for time dimensions (can be integral or INTERVAL) -- partitioning_func - Function used to partition the column -- if_not_exists - If set, and the dimension already exists, generate a notice instead of an error CREATE OR REPLACE FUNCTION @extschema@.add_dimension( @@ -127,6 +158,29 @@ CREATE OR REPLACE FUNCTION @extschema@.add_dimension( ) RETURNS TABLE(dimension_id INT, schema_name NAME, table_name NAME, column_name NAME, created BOOL) AS '@MODULE_PATHNAME@', 'ts_dimension_add' LANGUAGE C VOLATILE; +-- Add a dimension (of partitioning) to a hypertable. +-- +-- hypertable - OID of the table to add a dimension to +-- dimension - Dimension to add +-- if_not_exists - If set, and the dimension already exists, generate a notice instead of an error +CREATE OR REPLACE FUNCTION @extschema@.add_dimension( + hypertable REGCLASS, + dimension _timescaledb_internal.dimension_info, + if_not_exists BOOLEAN = FALSE +) RETURNS TABLE(dimension_id INT, created BOOL) +AS '@MODULE_PATHNAME@', 'ts_dimension_add_general' LANGUAGE C VOLATILE; + +CREATE OR REPLACE FUNCTION @extschema@.by_hash(column_name NAME, number_partitions INTEGER, + partition_func regproc = NULL) + RETURNS _timescaledb_internal.dimension_info LANGUAGE C + AS '@MODULE_PATHNAME@', 'ts_hash_dimension'; + +CREATE OR REPLACE FUNCTION @extschema@.by_range(column_name NAME, + partition_interval ANYELEMENT = NULL::bigint, + partition_func regproc = NULL) + RETURNS _timescaledb_internal.dimension_info LANGUAGE C + AS '@MODULE_PATHNAME@', 'ts_range_dimension'; + CREATE OR REPLACE FUNCTION @extschema@.attach_tablespace( tablespace NAME, hypertable REGCLASS, @@ -166,7 +220,7 @@ CREATE OR REPLACE FUNCTION @extschema@.delete_data_node( if_exists BOOLEAN = FALSE, force BOOLEAN = FALSE, repartition BOOLEAN = TRUE, - drop_database BOOLEAN = FALSE + drop_database BOOLEAN = FALSE ) RETURNS BOOLEAN AS '@MODULE_PATHNAME@', 'ts_data_node_delete' LANGUAGE C VOLATILE; -- Attach a data node to a distributed hypertable @@ -185,7 +239,7 @@ CREATE OR REPLACE FUNCTION @extschema@.detach_data_node( if_attached BOOLEAN = FALSE, force BOOLEAN = FALSE, repartition BOOLEAN = TRUE, - drop_remote_data BOOLEAN = FALSE + drop_remote_data BOOLEAN = FALSE ) RETURNS INTEGER AS '@MODULE_PATHNAME@', 'ts_data_node_detach' LANGUAGE C VOLATILE; @@ -222,6 +276,6 @@ CREATE OR REPLACE FUNCTION @extschema@.alter_data_node( host TEXT = NULL, database NAME = NULL, port INTEGER = NULL, - available BOOLEAN = NULL + available BOOLEAN = NULL ) RETURNS TABLE(node_name NAME, host TEXT, port INTEGER, database NAME, available BOOLEAN) AS '@MODULE_PATHNAME@', 'ts_data_node_alter' LANGUAGE C VOLATILE; diff --git a/sql/partitioning.sql b/sql/partitioning.sql index 5637ecbd32e..8a7c6de555c 100644 --- a/sql/partitioning.sql +++ b/sql/partitioning.sql @@ -10,4 +10,3 @@ CREATE OR REPLACE FUNCTION _timescaledb_functions.get_partition_for_key(val anye CREATE OR REPLACE FUNCTION _timescaledb_functions.get_partition_hash(val anyelement) RETURNS int AS '@MODULE_PATHNAME@', 'ts_get_partition_hash' LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE; - diff --git a/sql/pre_install/types.functions.sql b/sql/pre_install/types.functions.sql index b21d1075c15..f4cd8df41a1 100644 --- a/sql/pre_install/types.functions.sql +++ b/sql/pre_install/types.functions.sql @@ -40,3 +40,14 @@ CREATE OR REPLACE FUNCTION _timescaledb_functions.rxid_in(cstring) RETURNS @exts CREATE OR REPLACE FUNCTION _timescaledb_functions.rxid_out(@extschema@.rxid) RETURNS cstring AS '@MODULE_PATHNAME@', 'ts_remote_txn_id_out' LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE; + +CREATE OR REPLACE FUNCTION _timescaledb_functions.dimension_info_in(cstring) + RETURNS _timescaledb_internal.dimension_info + LANGUAGE C STRICT IMMUTABLE + AS '@MODULE_PATHNAME@', 'ts_dimension_info_in'; + +CREATE OR REPLACE FUNCTION _timescaledb_functions.dimension_info_out(_timescaledb_internal.dimension_info) + RETURNS cstring + LANGUAGE C STRICT IMMUTABLE + AS '@MODULE_PATHNAME@', 'ts_dimension_info_out'; + diff --git a/sql/pre_install/types.post.sql b/sql/pre_install/types.post.sql index a222c37f776..b6cc568ecfa 100644 --- a/sql/pre_install/types.post.sql +++ b/sql/pre_install/types.post.sql @@ -23,3 +23,12 @@ CREATE TYPE @extschema@.rxid ( input = _timescaledb_functions.rxid_in, output = _timescaledb_functions.rxid_out ); + +-- Dimension type used in create_hypertable, add_dimension, etc. It is +-- deliberately an opaque type. +CREATE TYPE _timescaledb_internal.dimension_info ( + INPUT = _timescaledb_functions.dimension_info_in, + OUTPUT = _timescaledb_functions.dimension_info_out, + INTERNALLENGTH = VARIABLE +); + diff --git a/sql/pre_install/types.pre.sql b/sql/pre_install/types.pre.sql index 43033ae6cf8..8d10e3f096c 100644 --- a/sql/pre_install/types.pre.sql +++ b/sql/pre_install/types.pre.sql @@ -13,3 +13,6 @@ CREATE TYPE _timescaledb_internal.compressed_data; CREATE TYPE @extschema@.rxid; --placeholder to allow creation of functions below + +CREATE TYPE _timescaledb_internal.dimension_info; + diff --git a/sql/updates/latest-dev.sql b/sql/updates/latest-dev.sql index e69de29bb2d..4e9098ddcf6 100644 --- a/sql/updates/latest-dev.sql +++ b/sql/updates/latest-dev.sql @@ -0,0 +1,50 @@ + +CREATE TYPE _timescaledb_internal.dimension_info; + +CREATE OR REPLACE FUNCTION _timescaledb_functions.dimension_info_in(cstring) + RETURNS _timescaledb_internal.dimension_info + LANGUAGE C STRICT IMMUTABLE + AS '@MODULE_PATHNAME@', 'ts_dimension_info_in'; + +CREATE OR REPLACE FUNCTION _timescaledb_functions.dimension_info_out(_timescaledb_internal.dimension_info) + RETURNS cstring + LANGUAGE C STRICT IMMUTABLE + AS '@MODULE_PATHNAME@', 'ts_dimension_info_out'; + +CREATE TYPE _timescaledb_internal.dimension_info ( + INPUT = _timescaledb_functions.dimension_info_in, + OUTPUT = _timescaledb_functions.dimension_info_out, + INTERNALLENGTH = VARIABLE +); + +CREATE FUNCTION @extschema@.create_hypertable( + relation REGCLASS, + dimension _timescaledb_internal.dimension_info, + create_default_indexes BOOLEAN = TRUE, + if_not_exists BOOLEAN = FALSE, + migrate_data BOOLEAN = FALSE +) RETURNS TABLE(hypertable_id INT, created BOOL) AS '@MODULE_PATHNAME@', 'ts_hypertable_create_general' LANGUAGE C VOLATILE; + +CREATE FUNCTION @extschema@.add_dimension( + hypertable REGCLASS, + dimension _timescaledb_internal.dimension_info, + if_not_exists BOOLEAN = FALSE +) RETURNS TABLE(dimension_id INT, created BOOL) +AS '@MODULE_PATHNAME@', 'ts_dimension_add_general' LANGUAGE C VOLATILE; + +CREATE FUNCTION @extschema@.set_partitioning_interval( + hypertable REGCLASS, + partition_interval ANYELEMENT, + dimension_name NAME = NULL +) RETURNS VOID AS '@MODULE_PATHNAME@', 'ts_dimension_set_interval' LANGUAGE C VOLATILE; + +CREATE FUNCTION @extschema@.by_hash(column_name NAME, number_partitions INTEGER, + partition_func regproc = NULL) + RETURNS _timescaledb_internal.dimension_info LANGUAGE C + AS '@MODULE_PATHNAME@', 'ts_hash_dimension'; + +CREATE FUNCTION @extschema@.by_range(column_name NAME, + partition_interval ANYELEMENT = NULL::bigint, + partition_func regproc = NULL) + RETURNS _timescaledb_internal.dimension_info LANGUAGE C + AS '@MODULE_PATHNAME@', 'ts_range_dimension'; diff --git a/sql/updates/reverse-dev.sql b/sql/updates/reverse-dev.sql index e69de29bb2d..e0755790787 100644 --- a/sql/updates/reverse-dev.sql +++ b/sql/updates/reverse-dev.sql @@ -0,0 +1,8 @@ +-- API changes related to hypertable generalization +DROP FUNCTION IF EXISTS @extschema@.add_dimension(regclass,dimension_info,boolean); +DROP FUNCTION IF EXISTS @extschema@.create_hypertable(regclass,dimension_info,boolean,boolean,boolean); +DROP FUNCTION IF EXISTS @extschema@.set_partitioning_interval(regclass,anyelement,name); +DROP FUNCTION IF EXISTS @extschema@.by_hash(name,integer,regproc); +DROP FUNCTION IF EXISTS @extschema@.by_range(name,anyelement,regproc); + +DROP TYPE IF EXISTS _timescaledb_internal.dimension_info CASCADE; diff --git a/src/dimension.c b/src/dimension.c index 8acba09d5c1..5b55898f42d 100644 --- a/src/dimension.c +++ b/src/dimension.c @@ -46,6 +46,18 @@ enum Anum_add_dimension #define Natts_add_dimension (_Anum_add_dimension_max - 1) +/* + * Generic add dimension attributes + */ +enum Anum_generic_add_dimension +{ + Anum_generic_add_dimension_id = 1, + Anum_generic_add_dimension_created, + _Anum_generic_add_dimension_max, +}; + +#define Natts_generic_add_dimension (_Anum_generic_add_dimension_max - 1) + static int cmp_dimension_id(const void *left, const void *right) { @@ -61,6 +73,11 @@ cmp_dimension_id(const void *left, const void *right) return 0; } +TS_FUNCTION_INFO_V1(ts_hash_dimension); +TS_FUNCTION_INFO_V1(ts_range_dimension); +PG_FUNCTION_INFO_V1(ts_dimension_info_in); +PG_FUNCTION_INFO_V1(ts_dimension_info_out); + const Dimension * ts_hyperspace_get_dimension_by_id(const Hyperspace *hs, int32 id) { @@ -1032,6 +1049,42 @@ get_validated_integer_interval(Oid dimtype, int64 value) return value; } +/* + * Get the default chunk interval based on dimension type. + */ +static int64 +get_default_interval(Oid dimtype, bool adaptive_chunking) +{ + int64 interval; + + switch (dimtype) + { + case INT2OID: + interval = DEFAULT_SMALLINT_INTERVAL; + break; + case INT4OID: + interval = DEFAULT_INT_INTERVAL; + break; + case INT8OID: + interval = DEFAULT_BIGINT_INTERVAL; + break; + case TIMESTAMPOID: + case TIMESTAMPTZOID: + case DATEOID: + interval = adaptive_chunking ? DEFAULT_CHUNK_TIME_INTERVAL_ADAPTIVE : + DEFAULT_CHUNK_TIME_INTERVAL; + break; + default: + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cannot get default interval for %s dimension", + format_type_be(dimtype)), + errhint("Use a valid dimension type."))); + } + + return interval; +} + static int64 dimension_interval_to_internal(const char *colname, Oid dimtype, Oid valuetype, Datum value, bool adaptive_chunking) @@ -1046,13 +1099,7 @@ dimension_interval_to_internal(const char *colname, Oid dimtype, Oid valuetype, if (!OidIsValid(valuetype)) { - if (IS_INTEGER_TYPE(dimtype)) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("integer dimensions require an explicit interval"))); - - value = Int64GetDatum(adaptive_chunking ? DEFAULT_CHUNK_TIME_INTERVAL_ADAPTIVE : - DEFAULT_CHUNK_TIME_INTERVAL); + value = Int64GetDatum(get_default_interval(dimtype, adaptive_chunking)); valuetype = INT8OID; } @@ -1121,7 +1168,7 @@ dimension_add_not_null_on_column(Oid table_relid, char *colname) ereport(NOTICE, (errmsg("adding not-null constraint to column \"%s\"", colname), - errdetail("Time dimensions cannot have NULL values."))); + errdetail("Dimensions cannot have NULL values."))); ts_alter_table_with_event_trigger(table_relid, (Node *) &cmd, list_make1(&cmd), false); } @@ -1289,11 +1336,11 @@ ts_dimension_info_create_open(Oid table_relid, Name column_name, Datum interval, *info = (DimensionInfo){ .type = DIMENSION_TYPE_OPEN, .table_relid = table_relid, - .colname = column_name, .interval_datum = interval, .interval_type = interval_type, .partitioning_func = partitioning_func, }; + namestrcpy(&info->colname, NameStr(*column_name)); return info; } @@ -1305,11 +1352,11 @@ ts_dimension_info_create_closed(Oid table_relid, Name column_name, int32 num_sli *info = (DimensionInfo){ .type = DIMENSION_TYPE_CLOSED, .table_relid = table_relid, - .colname = column_name, .num_slices = num_slices, - .num_slices_is_set = true, + .num_slices_is_set = (num_slices > 0), .partitioning_func = partitioning_func, }; + namestrcpy(&info->colname, NameStr(*column_name)); return info; } @@ -1335,7 +1382,7 @@ dimension_info_validate_open(DimensionInfo *info) dimtype = get_func_rettype(info->partitioning_func); } - info->interval = dimension_interval_to_internal(NameStr(*info->colname), + info->interval = dimension_interval_to_internal(NameStr(info->colname), dimtype, info->interval_type, info->interval_datum, @@ -1362,7 +1409,7 @@ dimension_info_validate_closed(DimensionInfo *info) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid number of partitions for dimension \"%s\"", - NameStr(*info->colname)), + NameStr(info->colname)), errhint("A closed (space) dimension must specify between 1 and %d partitions.", PG_INT16_MAX))); } @@ -1386,12 +1433,12 @@ ts_dimension_info_validate(DimensionInfo *info) errmsg("cannot specify both the number of partitions and an interval"))); /* Check that the column exists and get its NOT NULL status */ - tuple = SearchSysCacheAttName(info->table_relid, NameStr(*info->colname)); + tuple = SearchSysCacheAttName(info->table_relid, NameStr(info->colname)); if (!HeapTupleIsValid(tuple)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("column \"%s\" does not exist", NameStr(*info->colname)))); + errmsg("column \"%s\" does not exist", NameStr(info->colname)))); datum = SysCacheGetAttr(ATTNAME, tuple, Anum_pg_attribute_atttypid, &isnull); Assert(!isnull); @@ -1421,21 +1468,21 @@ ts_dimension_info_validate(DimensionInfo *info) /* Check if the dimension already exists */ dim = ts_hyperspace_get_dimension_by_name(info->ht->space, DIMENSION_TYPE_ANY, - NameStr(*info->colname)); + NameStr(info->colname)); if (NULL != dim) { if (!info->if_not_exists) ereport(ERROR, (errcode(ERRCODE_TS_DUPLICATE_DIMENSION), - errmsg("column \"%s\" is already a dimension", NameStr(*info->colname)))); + errmsg("column \"%s\" is already a dimension", NameStr(info->colname)))); info->dimension_id = dim->fd.id; info->skip = true; ereport(NOTICE, (errmsg("column \"%s\" is already a dimension, skipping", - NameStr(*info->colname)))); + NameStr(info->colname)))); return; } } @@ -1458,12 +1505,12 @@ int32 ts_dimension_add_from_info(DimensionInfo *info) { if (info->set_not_null && info->type == DIMENSION_TYPE_OPEN) - dimension_add_not_null_on_column(info->table_relid, NameStr(*info->colname)); + dimension_add_not_null_on_column(info->table_relid, NameStr(info->colname)); Assert(info->ht != NULL); info->dimension_id = dimension_insert(info->ht->fd.id, - info->colname, + &info->colname, info->coltype, info->num_slices, info->partitioning_func, @@ -1476,12 +1523,10 @@ ts_dimension_add_from_info(DimensionInfo *info) * Create a datum to be returned by add_dimension DDL function */ static Datum -dimension_create_datum(FunctionCallInfo fcinfo, DimensionInfo *info) +dimension_create_datum(FunctionCallInfo fcinfo, DimensionInfo *info, bool is_generic) { TupleDesc tupdesc; HeapTuple tuple; - Datum values[Natts_add_dimension]; - bool nulls[Natts_add_dimension] = { false }; if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) ereport(ERROR, @@ -1490,20 +1535,38 @@ dimension_create_datum(FunctionCallInfo fcinfo, DimensionInfo *info) "context that cannot accept type record"))); tupdesc = BlessTupleDesc(tupdesc); - values[AttrNumberGetAttrOffset(Anum_add_dimension_id)] = info->dimension_id; - values[AttrNumberGetAttrOffset(Anum_add_dimension_schema_name)] = - NameGetDatum(&info->ht->fd.schema_name); - values[AttrNumberGetAttrOffset(Anum_add_dimension_table_name)] = - NameGetDatum(&info->ht->fd.table_name); - values[AttrNumberGetAttrOffset(Anum_add_dimension_column_name)] = NameGetDatum(info->colname); - values[AttrNumberGetAttrOffset(Anum_add_dimension_created)] = BoolGetDatum(!info->skip); - tuple = heap_form_tuple(tupdesc, values, nulls); + + if (is_generic) + { + Datum values[Natts_generic_add_dimension]; + bool nulls[Natts_generic_add_dimension] = { false }; + + Assert(tupdesc->natts == Natts_generic_add_dimension); + values[AttrNumberGetAttrOffset(Anum_generic_add_dimension_id)] = info->dimension_id; + values[AttrNumberGetAttrOffset(Anum_generic_add_dimension_created)] = + BoolGetDatum(!info->skip); + tuple = heap_form_tuple(tupdesc, values, nulls); + } + else + { + Datum values[Natts_add_dimension]; + bool nulls[Natts_add_dimension] = { false }; + + Assert(tupdesc->natts == Natts_add_dimension); + values[AttrNumberGetAttrOffset(Anum_add_dimension_id)] = info->dimension_id; + values[AttrNumberGetAttrOffset(Anum_add_dimension_schema_name)] = + NameGetDatum(&info->ht->fd.schema_name); + values[AttrNumberGetAttrOffset(Anum_add_dimension_table_name)] = + NameGetDatum(&info->ht->fd.table_name); + values[AttrNumberGetAttrOffset(Anum_add_dimension_column_name)] = + NameGetDatum(&info->colname); + values[AttrNumberGetAttrOffset(Anum_add_dimension_created)] = BoolGetDatum(!info->skip); + tuple = heap_form_tuple(tupdesc, values, nulls); + } return HeapTupleGetDatum(tuple); } -TS_FUNCTION_INFO_V1(ts_dimension_add); - /* * Add a new dimension to a hypertable. * @@ -1515,35 +1578,20 @@ TS_FUNCTION_INFO_V1(ts_dimension_add); * 4. Partitioning function * 5. IF NOT EXISTS option (bool) */ -Datum -ts_dimension_add(PG_FUNCTION_ARGS) +static Datum +ts_dimension_add_internal(FunctionCallInfo fcinfo, DimensionInfo *info, bool is_generic) { Cache *hcache; - DimensionInfo info = { - .type = PG_ARGISNULL(2) ? DIMENSION_TYPE_OPEN : DIMENSION_TYPE_CLOSED, - .table_relid = PG_GETARG_OID(0), - .colname = PG_ARGISNULL(1) ? NULL : PG_GETARG_NAME(1), - .num_slices = PG_ARGISNULL(2) ? DatumGetInt32(-1) : PG_GETARG_INT32(2), - .num_slices_is_set = !PG_ARGISNULL(2), - .interval_datum = PG_ARGISNULL(3) ? Int32GetDatum(-1) : PG_GETARG_DATUM(3), - .interval_type = PG_ARGISNULL(3) ? InvalidOid : get_fn_expr_argtype(fcinfo->flinfo, 3), - .partitioning_func = PG_ARGISNULL(4) ? InvalidOid : PG_GETARG_OID(4), - .if_not_exists = PG_ARGISNULL(5) ? false : PG_GETARG_BOOL(5), - }; Datum retval = 0; - TS_PREVENT_FUNC_IF_READ_ONLY(); - - if (PG_ARGISNULL(0)) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("hypertable cannot be NULL"))); + Assert(DIMENSION_INFO_IS_SET(info)); - if (!info.num_slices_is_set && !OidIsValid(info.interval_type)) + if (!DIMENSION_INFO_IS_VALID(info)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("must specify either the number of partitions or an interval"))); - ts_hypertable_permissions_check(info.table_relid, GetUserId()); + ts_hypertable_permissions_check(info->table_relid, GetUserId()); /* * The hypertable catalog table has a CHECK(num_dimensions > 0), which @@ -1555,25 +1603,25 @@ ts_dimension_add(PG_FUNCTION_ARGS) * This lock is also used to serialize access from concurrent add_dimension() * call and a chunk creation. */ - LockRelationOid(info.table_relid, ShareUpdateExclusiveLock); + LockRelationOid(info->table_relid, ShareUpdateExclusiveLock); DEBUG_WAITPOINT("add_dimension_ht_lock"); - info.ht = ts_hypertable_cache_get_cache_and_entry(info.table_relid, CACHE_FLAG_NONE, &hcache); + info->ht = ts_hypertable_cache_get_cache_and_entry(info->table_relid, CACHE_FLAG_NONE, &hcache); - if (info.num_slices_is_set && OidIsValid(info.interval_type)) + if (info->num_slices_is_set && OidIsValid(info->interval_type)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("cannot specify both the number of partitions and an interval"))); - if (!info.num_slices_is_set && !OidIsValid(info.interval_type)) + if (!info->num_slices_is_set && !OidIsValid(info->interval_type)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("cannot omit both the number of partitions and the interval"))); - ts_dimension_info_validate(&info); + ts_dimension_info_validate(info); - if (!info.skip) + if (!info->skip) { int32 dimension_id; @@ -1582,21 +1630,21 @@ ts_dimension_add(PG_FUNCTION_ARGS) * dimension rows and not the num_dimensions in the hypertable catalog * table. */ - ts_hypertable_set_num_dimensions(info.ht, info.ht->space->num_dimensions + 1); - dimension_id = ts_dimension_add_from_info(&info); + ts_hypertable_set_num_dimensions(info->ht, info->ht->space->num_dimensions + 1); + dimension_id = ts_dimension_add_from_info(info); /* If adding the first space dimension, also add dimension partition metadata */ - if (info.type == DIMENSION_TYPE_CLOSED) + if (info->type == DIMENSION_TYPE_CLOSED) { - const Dimension *space_dim = hyperspace_get_closed_dimension(info.ht->space, 0); + const Dimension *space_dim = hyperspace_get_closed_dimension(info->ht->space, 0); if (space_dim != NULL) { - List *data_nodes = ts_hypertable_get_available_data_nodes(info.ht, false); + List *data_nodes = ts_hypertable_get_available_data_nodes(info->ht, false); ts_dimension_partition_info_recreate(dimension_id, - info.num_slices, + info->num_slices, data_nodes, - info.ht->fd.replication_factor); + info->ht->fd.replication_factor); } } @@ -1607,11 +1655,11 @@ ts_dimension_add(PG_FUNCTION_ARGS) * does not reflect the changes in the previous 2 lines which add a * new dimension */ - info.ht = ts_hypertable_get_by_id(info.ht->fd.id); - ts_indexing_verify_indexes(info.ht); + info->ht = ts_hypertable_get_by_id(info->ht->fd.id); + ts_indexing_verify_indexes(info->ht); /* Check that partitioning is sane */ - ts_hypertable_check_partitioning(info.ht, dimension_id); + ts_hypertable_check_partitioning(info->ht, dimension_id); /* * If the hypertable has chunks, to make it compatible @@ -1621,10 +1669,10 @@ ts_dimension_add(PG_FUNCTION_ARGS) * Newly created chunks will have a proper slice range according to * the created dimension and its partitioning. */ - if (ts_hypertable_has_chunks(info.table_relid, AccessShareLock)) + if (ts_hypertable_has_chunks(info->table_relid, AccessShareLock)) { ListCell *lc; - List *chunk_id_list = ts_chunk_get_chunk_ids_by_hypertable_id(info.ht->fd.id); + List *chunk_id_list = ts_chunk_get_chunk_ids_by_hypertable_id(info->ht->fd.id); DimensionSlice *slice; slice = ts_dimension_slice_create(dimension_id, @@ -1646,14 +1694,154 @@ ts_dimension_add(PG_FUNCTION_ARGS) } } - ts_hypertable_func_call_on_data_nodes(info.ht, fcinfo); + ts_hypertable_func_call_on_data_nodes(info->ht, fcinfo); - retval = dimension_create_datum(fcinfo, &info); + retval = dimension_create_datum(fcinfo, info, is_generic); ts_cache_release(hcache); PG_RETURN_DATUM(retval); } +TS_FUNCTION_INFO_V1(ts_dimension_add); +TS_FUNCTION_INFO_V1(ts_dimension_add_general); + +Datum +ts_dimension_add(PG_FUNCTION_ARGS) +{ + DimensionInfo info = { + .type = PG_ARGISNULL(2) ? DIMENSION_TYPE_OPEN : DIMENSION_TYPE_CLOSED, + .table_relid = PG_GETARG_OID(0), + .num_slices = PG_ARGISNULL(2) ? DatumGetInt32(-1) : PG_GETARG_INT32(2), + .num_slices_is_set = !PG_ARGISNULL(2), + .interval_datum = PG_ARGISNULL(3) ? Int32GetDatum(-1) : PG_GETARG_DATUM(3), + .interval_type = PG_ARGISNULL(3) ? InvalidOid : get_fn_expr_argtype(fcinfo->flinfo, 3), + .partitioning_func = PG_ARGISNULL(4) ? InvalidOid : PG_GETARG_OID(4), + .if_not_exists = PG_ARGISNULL(5) ? false : PG_GETARG_BOOL(5), + }; + + TS_PREVENT_FUNC_IF_READ_ONLY(); + + if (!PG_ARGISNULL(1)) + memcpy(&info.colname, PG_GETARG_NAME(1), NAMEDATALEN); + + if (PG_ARGISNULL(0)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("hypertable cannot be NULL"))); + + return ts_dimension_add_internal(fcinfo, &info, false); +} + +TSDLLEXPORT Datum +ts_dimension_info_in(PG_FUNCTION_ARGS) +{ + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot construct type \"dimension_info\" from string"), + errdetail("Type dimension_info cannot be constructed from string. You need to " + "use constructor function."), + errhint("Use \"by_range\" or \"by_hash\" to construct dimension types."))); + PG_RETURN_VOID(); /* keep compiler quiet */ +} + +TSDLLEXPORT Datum +ts_dimension_info_out(PG_FUNCTION_ARGS) +{ + DimensionInfo *info = (DimensionInfo *) PG_GETARG_POINTER(0); + StringInfoData str; + const char *partfuncname = + OidIsValid(info->partitioning_func) ? get_func_name(info->partitioning_func) : "-"; + initStringInfo(&str); + switch (info->type) + { + case DIMENSION_TYPE_CLOSED: + appendStringInfo(&str, + "hash//%s//%d//%s", + NameStr(info->colname), + info->num_slices, + partfuncname); + break; + + case DIMENSION_TYPE_OPEN: + { + const char *argvalstr = "-"; + + if (OidIsValid(info->interval_type)) + { + bool isvarlena; + Oid outfuncid; + getTypeOutputInfo(info->interval_type, &outfuncid, &isvarlena); + Assert(OidIsValid(outfuncid)); + argvalstr = OidOutputFunctionCall(outfuncid, info->interval_datum); + } + + appendStringInfo(&str, + "range//%s//%s//%s", + NameStr(info->colname), + argvalstr, + partfuncname); + break; + } + + case DIMENSION_TYPE_ANY: + appendStringInfo(&str, "any"); + break; + } + PG_RETURN_CSTRING(str.data); +} + +static DimensionInfo * +make_dimension_info(Name colname, DimensionType dimtype) +{ + DimensionInfo *info = palloc0(sizeof(DimensionInfo)); + info->type = dimtype; + namestrcpy(&info->colname, NameStr(*colname)); + return info; +} + +/* + * DimensionInfo for a hash dimension. + * + * This structure is only partially filled in when constructed. The rest will + * be filled in by ts_dimension_add_general. + */ +Datum +ts_hash_dimension(PG_FUNCTION_ARGS) +{ + Ensure(PG_NARGS() > 2, "expected at most 3 arguments, invoked with %d arguments", PG_NARGS()); + DimensionInfo *info = make_dimension_info(PG_GETARG_NAME(0), DIMENSION_TYPE_CLOSED); + info->num_slices = PG_ARGISNULL(1) ? DatumGetInt32(-1) : PG_GETARG_INT32(1); + info->num_slices_is_set = !PG_ARGISNULL(1); + info->partitioning_func = PG_ARGISNULL(2) ? InvalidOid : PG_GETARG_OID(2); + PG_RETURN_POINTER(info); +} + +/* + * DimensionInfo for a hash dimension. + * + * This structure is only partially filled in when constructed. The rest will + * be filled in by ts_dimension_add_general. + */ +Datum +ts_range_dimension(PG_FUNCTION_ARGS) +{ + Ensure(PG_NARGS() > 2, "expected at most 3 arguments, invoked with %d arguments", PG_NARGS()); + DimensionInfo *info = make_dimension_info(PG_GETARG_NAME(0), DIMENSION_TYPE_OPEN); + info->interval_datum = PG_ARGISNULL(1) ? Int32GetDatum(-1) : PG_GETARG_DATUM(1); + info->interval_type = PG_ARGISNULL(1) ? InvalidOid : get_fn_expr_argtype(fcinfo->flinfo, 1); + info->partitioning_func = PG_ARGISNULL(2) ? InvalidOid : PG_GETARG_OID(2); + PG_RETURN_POINTER(info); +} + +Datum +ts_dimension_add_general(PG_FUNCTION_ARGS) +{ + DimensionInfo *info = (DimensionInfo *) PG_GETARG_POINTER(1); + info->table_relid = PG_GETARG_OID(0); + if (PG_GETARG_BOOL(2)) + info->if_not_exists = true; + return ts_dimension_add_internal(fcinfo, info, true); +} + /* Used as a tuple found function */ static ScanTupleResult dimension_rename_schema_name(TupleInfo *ti, void *data) diff --git a/src/dimension.h b/src/dimension.h index 31e2c96b7d3..800f52de2e9 100644 --- a/src/dimension.h +++ b/src/dimension.h @@ -77,16 +77,28 @@ typedef struct Point (USECS_PER_DAY) /* 1 day with adaptive \ * chunking enabled */ +/* Default intervals for integer types */ +#define DEFAULT_SMALLINT_INTERVAL 10000 +#define DEFAULT_INT_INTERVAL 100000 +#define DEFAULT_BIGINT_INTERVAL 1000000 + typedef struct Hypertable Hypertable; /* * Dimension information used to validate, create and update dimensions. + * + * This structure is used both partially filled in from the dimension info + * constructors as well as when building dimension info for the storage into + * the dimension table. + * + * @see ts_hash_dimension + * @see ts_range_dimension */ typedef struct DimensionInfo { Oid table_relid; int32 dimension_id; - Name colname; + NameData colname; Oid coltype; DimensionType type; Datum interval_datum; @@ -102,8 +114,8 @@ typedef struct DimensionInfo Hypertable *ht; } DimensionInfo; -#define DIMENSION_INFO_IS_SET(di) \ - (di != NULL && OidIsValid((di)->table_relid) && (di)->colname != NULL) +#define DIMENSION_INFO_IS_SET(di) (di != NULL && OidIsValid((di)->table_relid)) +#define DIMENSION_INFO_IS_VALID(di) (info->num_slices_is_set || OidIsValid(info->interval_type)) extern Hyperspace *ts_dimension_scan(int32 hypertable_id, Oid main_table_relid, int16 num_dimension, MemoryContext mctx); @@ -149,6 +161,8 @@ extern TSDLLEXPORT void ts_dimension_update(const Hypertable *ht, const NameData extern TSDLLEXPORT List *ts_dimension_get_partexprs(const Dimension *dim, Index hyper_varno); extern TSDLLEXPORT Point *ts_point_create(int16 num_dimensions); extern TSDLLEXPORT bool ts_is_equality_operator(Oid opno, Oid left, Oid right); +extern TSDLLEXPORT Datum ts_dimension_info_in(PG_FUNCTION_ARGS); +extern TSDLLEXPORT Datum ts_dimension_info_out(PG_FUNCTION_ARGS); #define hyperspace_get_open_dimension(space, i) \ ts_hyperspace_get_dimension(space, DIMENSION_TYPE_OPEN, i) diff --git a/src/hypertable.c b/src/hypertable.c index 332b4cc79a4..d4296533c59 100644 --- a/src/hypertable.c +++ b/src/hypertable.c @@ -1654,11 +1654,10 @@ ts_hypertable_insert_blocker_trigger_add(PG_FUNCTION_ARGS) } static Datum -create_hypertable_datum(FunctionCallInfo fcinfo, const Hypertable *ht, bool created) +create_hypertable_datum(FunctionCallInfo fcinfo, const Hypertable *ht, bool created, + bool is_generic) { TupleDesc tupdesc; - Datum values[Natts_create_hypertable]; - bool nulls[Natts_create_hypertable] = { false }; HeapTuple tuple; if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) @@ -1668,13 +1667,31 @@ create_hypertable_datum(FunctionCallInfo fcinfo, const Hypertable *ht, bool crea "context that cannot accept type record"))); tupdesc = BlessTupleDesc(tupdesc); - values[AttrNumberGetAttrOffset(Anum_create_hypertable_id)] = Int32GetDatum(ht->fd.id); - values[AttrNumberGetAttrOffset(Anum_create_hypertable_schema_name)] = - NameGetDatum(&ht->fd.schema_name); - values[AttrNumberGetAttrOffset(Anum_create_hypertable_table_name)] = - NameGetDatum(&ht->fd.table_name); - values[AttrNumberGetAttrOffset(Anum_create_hypertable_created)] = BoolGetDatum(created); - tuple = heap_form_tuple(tupdesc, values, nulls); + + if (is_generic) + { + Datum values[Natts_generic_create_hypertable]; + bool nulls[Natts_generic_create_hypertable] = { false }; + + values[AttrNumberGetAttrOffset(Anum_generic_create_hypertable_id)] = + Int32GetDatum(ht->fd.id); + values[AttrNumberGetAttrOffset(Anum_generic_create_hypertable_created)] = + BoolGetDatum(created); + tuple = heap_form_tuple(tupdesc, values, nulls); + } + else + { + Datum values[Natts_create_hypertable]; + bool nulls[Natts_create_hypertable] = { false }; + + values[AttrNumberGetAttrOffset(Anum_create_hypertable_id)] = Int32GetDatum(ht->fd.id); + values[AttrNumberGetAttrOffset(Anum_create_hypertable_schema_name)] = + NameGetDatum(&ht->fd.schema_name); + values[AttrNumberGetAttrOffset(Anum_create_hypertable_table_name)] = + NameGetDatum(&ht->fd.table_name); + values[AttrNumberGetAttrOffset(Anum_create_hypertable_created)] = BoolGetDatum(created); + tuple = heap_form_tuple(tupdesc, values, nulls); + } return HeapTupleGetDatum(tuple); } @@ -1847,80 +1864,22 @@ hypertable_validate_create_call(const char *hypertable_name, bool distributed, TS_FUNCTION_INFO_V1(ts_hypertable_create); TS_FUNCTION_INFO_V1(ts_hypertable_distributed_create); +TS_FUNCTION_INFO_V1(ts_hypertable_create_general); /* - * Create a hypertable from an existing table. - * - * Arguments: - * relation REGCLASS - * time_column_name NAME - * partitioning_column NAME = NULL - * number_partitions INTEGER = NULL - * associated_schema_name NAME = NULL - * associated_table_prefix NAME = NULL - * chunk_time_interval anyelement = NULL::BIGINT - * create_default_indexes BOOLEAN = TRUE - * if_not_exists BOOLEAN = FALSE - * partitioning_func REGPROC = NULL - * migrate_data BOOLEAN = FALSE - * chunk_target_size TEXT = NULL - * chunk_sizing_func OID = NULL - * time_partitioning_func REGPROC = NULL - * replication_factor INTEGER = NULL - * data nodes NAME[] = NULL - * distributed BOOLEAN = NULL (not present for dist call) + * Create a hypertable from an existing table. The specific version of create hypertable API + * process the function arguments before calling this function. */ static Datum -ts_hypertable_create_internal(PG_FUNCTION_ARGS, bool is_dist_call) +ts_hypertable_create_internal(FunctionCallInfo fcinfo, Oid table_relid, + DimensionInfo *open_dim_info, DimensionInfo *closed_dim_info, + Name associated_schema_name, Name associated_table_prefix, + bool create_default_indexes, bool if_not_exists, bool migrate_data, + text *target_size, Oid sizing_func, bool replication_factor_is_null, + int32 replication_factor_in, ArrayType *data_node_arr, + bool distributed_is_null, bool distributed, bool is_generic) { - Oid table_relid = PG_ARGISNULL(0) ? InvalidOid : PG_GETARG_OID(0); - Name time_dim_name = PG_ARGISNULL(1) ? NULL : PG_GETARG_NAME(1); - Name space_dim_name = PG_ARGISNULL(2) ? NULL : PG_GETARG_NAME(2); - Name associated_schema_name = PG_ARGISNULL(4) ? NULL : PG_GETARG_NAME(4); - Name associated_table_prefix = PG_ARGISNULL(5) ? NULL : PG_GETARG_NAME(5); - bool create_default_indexes = - PG_ARGISNULL(7) ? false : PG_GETARG_BOOL(7); /* Defaults to true in the sql code */ - bool if_not_exists = PG_ARGISNULL(8) ? false : PG_GETARG_BOOL(8); - bool migrate_data = PG_ARGISNULL(10) ? false : PG_GETARG_BOOL(10); - DimensionInfo *time_dim_info = - ts_dimension_info_create_open(table_relid, - /* column name */ - time_dim_name, - /* interval */ - PG_ARGISNULL(6) ? Int64GetDatum(-1) : PG_GETARG_DATUM(6), - /* interval type */ - PG_ARGISNULL(6) ? InvalidOid : - get_fn_expr_argtype(fcinfo->flinfo, 6), - /* partitioning func */ - PG_ARGISNULL(13) ? InvalidOid : PG_GETARG_OID(13)); - DimensionInfo *space_dim_info = NULL; - bool replication_factor_is_null = PG_ARGISNULL(14); - int32 replication_factor_in = replication_factor_is_null ? 0 : PG_GETARG_INT32(14); int16 replication_factor; - ArrayType *data_node_arr = PG_ARGISNULL(15) ? NULL : PG_GETARG_ARRAYTYPE_P(15); - ChunkSizingInfo chunk_sizing_info = { - .table_relid = table_relid, - .target_size = PG_ARGISNULL(11) ? NULL : PG_GETARG_TEXT_P(11), - .func = PG_ARGISNULL(12) ? InvalidOid : PG_GETARG_OID(12), - .colname = PG_ARGISNULL(1) ? NULL : PG_GETARG_CSTRING(1), - .check_for_index = !create_default_indexes, - }; - bool distributed_is_null; - bool distributed; - - /* create_distributed_hypertable() does not have explicit - * distributed argument */ - if (!is_dist_call) - { - distributed_is_null = PG_ARGISNULL(16); - distributed = distributed_is_null ? false : PG_GETARG_BOOL(16); - } - else - { - distributed_is_null = false; - distributed = true; - } - Cache *hcache; Hypertable *ht; Datum retval; @@ -1930,21 +1889,21 @@ ts_hypertable_create_internal(PG_FUNCTION_ARGS, bool is_dist_call) ts_feature_flag_check(FEATURE_HYPERTABLE); - TS_PREVENT_FUNC_IF_READ_ONLY(); + ChunkSizingInfo chunk_sizing_info = { + .table_relid = table_relid, + .target_size = target_size, + .func = sizing_func, + .colname = NameStr(open_dim_info->colname), + .check_for_index = !create_default_indexes, + }; - if (!OidIsValid(table_relid)) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("relation cannot be NULL"))); + TS_PREVENT_FUNC_IF_READ_ONLY(); - if (migrate_data && is_dist_call) + if (migrate_data && distributed) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot migrate data for distributed hypertable"))); - if (NULL == time_dim_name) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("time column cannot be NULL"))); - if (NULL != data_node_arr && ARR_NDIM(data_node_arr) > 1) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), @@ -1985,12 +1944,11 @@ ts_hypertable_create_internal(PG_FUNCTION_ARGS, bool is_dist_call) data_node_arr, &data_nodes); - if (NULL != space_dim_name) + if (closed_dim_info && !closed_dim_info->num_slices_is_set) { - int16 num_partitions = PG_ARGISNULL(3) ? -1 : PG_GETARG_INT16(3); - /* If the number of partitions isn't specified, default to setting it * to the number of data nodes */ + int16 num_partitions = closed_dim_info->num_slices; if (num_partitions < 1 && replication_factor > 0) { int num_nodes = list_length(data_nodes); @@ -1998,15 +1956,8 @@ ts_hypertable_create_internal(PG_FUNCTION_ARGS, bool is_dist_call) Assert(num_nodes >= 0); num_partitions = num_nodes & 0xFFFF; } - - space_dim_info = - ts_dimension_info_create_closed(table_relid, - /* column name */ - space_dim_name, - /* number partitions */ - num_partitions, - /* partitioning func */ - PG_ARGISNULL(9) ? InvalidOid : PG_GETARG_OID(9)); + closed_dim_info->num_slices = num_partitions; + closed_dim_info->num_slices_is_set = true; } if (if_not_exists) @@ -2019,8 +1970,8 @@ ts_hypertable_create_internal(PG_FUNCTION_ARGS, bool is_dist_call) created = ts_hypertable_create_from_info(table_relid, INVALID_HYPERTABLE_ID, flags, - time_dim_info, - space_dim_info, + open_dim_info, + closed_dim_info, associated_schema_name, associated_table_prefix, &chunk_sizing_info, @@ -2029,26 +1980,194 @@ ts_hypertable_create_internal(PG_FUNCTION_ARGS, bool is_dist_call) Assert(created); ht = ts_hypertable_cache_get_cache_and_entry(table_relid, CACHE_FLAG_NONE, &hcache); - if (NULL != space_dim_info) - ts_hypertable_check_partitioning(ht, space_dim_info->dimension_id); + if (NULL != closed_dim_info) + ts_hypertable_check_partitioning(ht, closed_dim_info->dimension_id); } - retval = create_hypertable_datum(fcinfo, ht, created); + retval = create_hypertable_datum(fcinfo, ht, created, is_generic); ts_cache_release(hcache); PG_RETURN_DATUM(retval); } +/* + * Process create_hypertable parameters for time specific implementation. + * + * Arguments: + * relation REGCLASS + * time_column_name NAME + * partitioning_column NAME = NULL + * number_partitions INTEGER = NULL + * associated_schema_name NAME = NULL + * associated_table_prefix NAME = NULL + * chunk_time_interval anyelement = NULL::BIGINT + * create_default_indexes BOOLEAN = TRUE + * if_not_exists BOOLEAN = FALSE + * partitioning_func REGPROC = NULL + * migrate_data BOOLEAN = FALSE + * chunk_target_size TEXT = NULL + * chunk_sizing_func OID = NULL + * time_partitioning_func REGPROC = NULL + * replication_factor INTEGER = NULL + * data nodes NAME[] = NULL + * distributed BOOLEAN = NULL (not present for dist call) + */ +static Datum +ts_hypertable_create_time_prev(PG_FUNCTION_ARGS, bool is_dist_call) +{ + Oid table_relid = PG_ARGISNULL(0) ? InvalidOid : PG_GETARG_OID(0); + Name open_dim_name = PG_ARGISNULL(1) ? NULL : PG_GETARG_NAME(1); + Name closed_dim_name = PG_ARGISNULL(2) ? NULL : PG_GETARG_NAME(2); + int16 num_partitions = PG_ARGISNULL(3) ? -1 : PG_GETARG_INT16(3); + Name associated_schema_name = PG_ARGISNULL(4) ? NULL : PG_GETARG_NAME(4); + Name associated_table_prefix = PG_ARGISNULL(5) ? NULL : PG_GETARG_NAME(5); + Datum default_interval = PG_ARGISNULL(6) ? Int64GetDatum(-1) : PG_GETARG_DATUM(6); + Oid interval_type = PG_ARGISNULL(6) ? InvalidOid : get_fn_expr_argtype(fcinfo->flinfo, 6); + bool create_default_indexes = + PG_ARGISNULL(7) ? false : PG_GETARG_BOOL(7); /* Defaults to true in the sql code */ + bool if_not_exists = PG_ARGISNULL(8) ? false : PG_GETARG_BOOL(8); + regproc closed_partitioning_func = PG_ARGISNULL(9) ? InvalidOid : PG_GETARG_OID(9); + bool migrate_data = PG_ARGISNULL(10) ? false : PG_GETARG_BOOL(10); + text *target_size = PG_ARGISNULL(11) ? NULL : PG_GETARG_TEXT_P(11); + Oid sizing_func = PG_ARGISNULL(12) ? InvalidOid : PG_GETARG_OID(12); + regproc open_partitioning_func = PG_ARGISNULL(13) ? InvalidOid : PG_GETARG_OID(13); + bool replication_factor_is_null = PG_ARGISNULL(14); + int32 replication_factor_in = replication_factor_is_null ? 0 : PG_GETARG_INT32(14); + ArrayType *data_node_arr = PG_ARGISNULL(15) ? NULL : PG_GETARG_ARRAYTYPE_P(15); + + bool distributed_is_null; + bool distributed; + + if (!OidIsValid(table_relid)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("relation cannot be NULL"))); + + if (!open_dim_name) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("partition column cannot be NULL"))); + + /* create_distributed_hypertable() does not have explicit + * distributed argument */ + if (!is_dist_call) + { + distributed_is_null = PG_ARGISNULL(16); + distributed = distributed_is_null ? false : PG_GETARG_BOOL(16); + } + else + { + distributed_is_null = false; + distributed = true; + } + + DimensionInfo *open_dim_info = + ts_dimension_info_create_open(table_relid, + open_dim_name, /* column name */ + default_interval, /* interval */ + interval_type, /* interval type */ + open_partitioning_func /* partitioning func */ + ); + + DimensionInfo *closed_dim_info = NULL; + if (closed_dim_name) + closed_dim_info = + ts_dimension_info_create_closed(table_relid, + closed_dim_name, /* column name */ + num_partitions, /* number partitions */ + closed_partitioning_func /* partitioning func */ + ); + + return ts_hypertable_create_internal(fcinfo, + table_relid, + open_dim_info, + closed_dim_info, + associated_schema_name, + associated_table_prefix, + create_default_indexes, + if_not_exists, + migrate_data, + target_size, + sizing_func, + replication_factor_is_null, + replication_factor_in, + data_node_arr, + distributed_is_null, + distributed, + false); +} + Datum ts_hypertable_create(PG_FUNCTION_ARGS) { - return ts_hypertable_create_internal(fcinfo, false); + return ts_hypertable_create_time_prev(fcinfo, false); } Datum ts_hypertable_distributed_create(PG_FUNCTION_ARGS) { - return ts_hypertable_create_internal(fcinfo, true); + return ts_hypertable_create_time_prev(fcinfo, true); +} + +static Oid +get_sizing_func_oid() +{ + const char *sizing_func_name = "calculate_chunk_interval"; + const int sizing_func_nargs = 3; + static Oid sizing_func_arg_types[] = { INT4OID, INT8OID, INT8OID }; + + return ts_get_function_oid(sizing_func_name, + INTERNAL_SCHEMA_NAME, + sizing_func_nargs, + sizing_func_arg_types); +} + +/* + * Process create_hypertable parameters for generic implementation. + * + * Arguments: + * relation REGCLASS + * dimension dimension_info + * create_default_indexes BOOLEAN = TRUE + * if_not_exists BOOLEAN = FALSE + * migrate_data BOOLEAN = FALSE + */ +Datum +ts_hypertable_create_general(PG_FUNCTION_ARGS) +{ + Oid table_relid = PG_ARGISNULL(0) ? InvalidOid : PG_GETARG_OID(0); + DimensionInfo *dim_info = (DimensionInfo *) PG_GETARG_POINTER(1); + bool create_default_indexes = PG_ARGISNULL(2) ? false : PG_GETARG_BOOL(2); + bool if_not_exists = PG_ARGISNULL(3) ? false : PG_GETARG_BOOL(3); + bool migrate_data = PG_ARGISNULL(4) ? false : PG_GETARG_BOOL(4); + + /* + * Current implementation requires to provide a valid chunk sizing function + * that is being used to populate hypertable catalog information. + */ + Oid sizing_func = get_sizing_func_oid(); + + /* + * Fill in the rest of the info. + */ + dim_info->table_relid = table_relid; + + return ts_hypertable_create_internal(fcinfo, + table_relid, + dim_info, + NULL, /* closed_dim_info */ + NULL, /* associated_schema_name */ + NULL, /* associated_table_prefix */ + create_default_indexes, + if_not_exists, + migrate_data, + NULL, + sizing_func, + true, + 0, + NULL, + true, + false, + true); } /* Go through columns of parent table and check for column data types. */ @@ -2098,7 +2217,7 @@ ts_validate_basetable_columns(Relation *rel) */ bool ts_hypertable_create_from_info(Oid table_relid, int32 hypertable_id, uint32 flags, - DimensionInfo *time_dim_info, DimensionInfo *space_dim_info, + DimensionInfo *time_dim_info, DimensionInfo *closed_dim_info, Name associated_schema_name, Name associated_table_prefix, ChunkSizingInfo *chunk_sizing_info, int16 replication_factor, List *data_node_names) @@ -2275,8 +2394,8 @@ ts_hypertable_create_from_info(Oid table_relid, int32 hypertable_id, uint32 flag /* Validate that the dimensions are OK */ ts_dimension_info_validate(time_dim_info); - if (DIMENSION_INFO_IS_SET(space_dim_info)) - ts_dimension_info_validate(space_dim_info); + if (DIMENSION_INFO_IS_SET(closed_dim_info)) + ts_dimension_info_validate(closed_dim_info); /* Checks pass, now we can create the catalog information */ namestrcpy(&schema_name, get_namespace_name(get_rel_namespace(table_relid))); @@ -2290,7 +2409,7 @@ ts_hypertable_create_from_info(Oid table_relid, int32 hypertable_id, uint32 flag &chunk_sizing_info->func_schema, &chunk_sizing_info->func_name, chunk_sizing_info->target_size_bytes, - DIMENSION_INFO_IS_SET(space_dim_info) ? 2 : 1, + DIMENSION_INFO_IS_SET(closed_dim_info) ? 2 : 1, false, replication_factor); @@ -2301,12 +2420,12 @@ ts_hypertable_create_from_info(Oid table_relid, int32 hypertable_id, uint32 flag /* Add validated dimensions */ ts_dimension_add_from_info(time_dim_info); - if (DIMENSION_INFO_IS_SET(space_dim_info)) + if (DIMENSION_INFO_IS_SET(closed_dim_info)) { - space_dim_info->ht = time_dim_info->ht; - ts_dimension_add_from_info(space_dim_info); - ts_dimension_partition_info_recreate(space_dim_info->dimension_id, - space_dim_info->num_slices, + closed_dim_info->ht = time_dim_info->ht; + ts_dimension_add_from_info(closed_dim_info); + ts_dimension_partition_info_recreate(closed_dim_info->dimension_id, + closed_dim_info->num_slices, data_node_names, replication_factor); } @@ -2745,14 +2864,14 @@ List * ts_hypertable_assign_chunk_data_nodes(const Hypertable *ht, const Hypercube *cube) { List *chunk_data_nodes = NIL; - const Dimension *space_dim = hyperspace_get_closed_dimension(ht->space, 0); + const Dimension *closed_dim = hyperspace_get_closed_dimension(ht->space, 0); - if (NULL != space_dim && NULL != space_dim->dimension_partitions) + if (NULL != closed_dim && NULL != closed_dim->dimension_partitions) { const DimensionSlice *slice = - ts_hypercube_get_slice_by_dimension_id(cube, space_dim->fd.id); + ts_hypercube_get_slice_by_dimension_id(cube, closed_dim->fd.id); const DimensionPartition *dp = - ts_dimension_partition_find(space_dim->dimension_partitions, slice->fd.range_start); + ts_dimension_partition_find(closed_dim->dimension_partitions, slice->fd.range_start); ListCell *lc; /* Filter out data nodes that aren't available */ @@ -3002,17 +3121,17 @@ ts_hypertable_has_compression_table(const Hypertable *ht) bool ts_hypertable_update_dimension_partitions(const Hypertable *ht) { - const Dimension *space_dim = hyperspace_get_closed_dimension(ht->space, 0); + const Dimension *closed_dim = hyperspace_get_closed_dimension(ht->space, 0); - if (NULL != space_dim) + if (NULL != closed_dim) { List *data_node_names = NIL; if (hypertable_is_distributed(ht)) data_node_names = ts_hypertable_get_available_data_node_names(ht, false); - ts_dimension_partition_info_recreate(space_dim->fd.id, - space_dim->fd.num_slices, + ts_dimension_partition_info_recreate(closed_dim->fd.id, + closed_dim->fd.num_slices, data_node_names, ht->fd.replication_factor); return true; diff --git a/src/hypertable.h b/src/hypertable.h index 8e2fa27bf60..c2a8a35b2e6 100644 --- a/src/hypertable.h +++ b/src/hypertable.h @@ -73,6 +73,16 @@ enum Anum_create_hypertable #define Natts_create_hypertable (_Anum_create_hypertable_max - 1) +/* Create a generic hypertable */ +enum Anum_generic_create_hypertable +{ + Anum_generic_create_hypertable_id = 1, + Anum_generic_create_hypertable_created, + _Anum_generic_create_hypertable_max, +}; + +#define Natts_generic_create_hypertable (_Anum_generic_create_hypertable_max - 1) + extern TSDLLEXPORT Oid ts_rel_get_owner(Oid relid); extern List *ts_hypertable_get_all(void); diff --git a/test/expected/ddl_errors.out b/test/expected/ddl_errors.out index f35a2ab3abb..a8f997550e8 100644 --- a/test/expected/ddl_errors.out +++ b/test/expected/ddl_errors.out @@ -7,14 +7,13 @@ CREATE TABLE PUBLIC."Hypertable_1" ( temp_c int NOT NULL DEFAULT -1 ); CREATE INDEX ON PUBLIC."Hypertable_1" (time, "Device_id"); +-- Default integer interval is supported as part of +-- hypertable generalization, verify additional secnarios \set ON_ERROR_STOP 0 SELECT * FROM create_hypertable(NULL, NULL); ERROR: relation cannot be NULL SELECT * FROM create_hypertable('"public"."Hypertable_1"', NULL); -ERROR: time column cannot be NULL --- integer time dimensions require an explicit interval -SELECT * FROM create_hypertable('"public"."Hypertable_1"', 'time'); -ERROR: integer dimensions require an explicit interval +ERROR: partition column cannot be NULL -- space dimensions require explicit number of partitions SELECT * FROM create_hypertable('"public"."Hypertable_1"', 'time', 'Device_id', chunk_time_interval=>_timescaledb_functions.interval_to_usec('1 month')); ERROR: invalid number of partitions for dimension "Device_id" diff --git a/test/expected/insert_single.out b/test/expected/insert_single.out index 2f5c7cabcc5..90a25ef5dad 100644 --- a/test/expected/insert_single.out +++ b/test/expected/insert_single.out @@ -387,13 +387,12 @@ SELECT * FROM "3dim" ORDER BY (time, device); Fri Jan 20 09:00:47 2017 | 25.1 | yellow | la (3 rows) --- Test that large intervals and no interval fail for INTEGER +-- Test large intervals as default interval for integer is +-- supported as part of hypertable generalization \set ON_ERROR_STOP 0 CREATE TABLE "inttime_err"(time INTEGER PRIMARY KEY, temp float); SELECT create_hypertable('"inttime_err"', 'time', chunk_time_interval=>2147483648); ERROR: invalid interval: must be between 1 and 2147483647 -SELECT create_hypertable('"inttime_err"', 'time'); -ERROR: integer dimensions require an explicit interval \set ON_ERROR_STOP 1 SELECT create_hypertable('"inttime_err"', 'time', chunk_time_interval=>2147483647); create_hypertable @@ -401,13 +400,12 @@ SELECT create_hypertable('"inttime_err"', 'time', chunk_time_interval=>214748364 (8,public,inttime_err,t) (1 row) --- Test that large intervals and no interval fail for SMALLINT +-- Test large intervals as default interval is supported +-- for integer types as part of hypertable generalization. \set ON_ERROR_STOP 0 CREATE TABLE "smallinttime_err"(time SMALLINT PRIMARY KEY, temp float); SELECT create_hypertable('"smallinttime_err"', 'time', chunk_time_interval=>32768); ERROR: invalid interval: must be between 1 and 32767 -SELECT create_hypertable('"smallinttime_err"', 'time'); -ERROR: integer dimensions require an explicit interval \set ON_ERROR_STOP 1 SELECT create_hypertable('"smallinttime_err"', 'time', chunk_time_interval=>32767); create_hypertable diff --git a/test/expected/rowsecurity-13.out b/test/expected/rowsecurity-13.out index 96ade000b50..fb239b6a208 100644 --- a/test/expected/rowsecurity-13.out +++ b/test/expected/rowsecurity-13.out @@ -996,7 +996,7 @@ CREATE TABLE hyper_document ( GRANT ALL ON hyper_document TO public; SELECT public.create_hypertable('hyper_document', 'did', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "did" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ----------------------------------------- (2,regress_rls_schema,hyper_document,t) @@ -1489,7 +1489,7 @@ SET row_security TO ON; CREATE TABLE dependee (x integer, y integer); SELECT public.create_hypertable('dependee', 'x', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "x" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ----------------------------------- (3,regress_rls_schema,dependee,t) @@ -1498,7 +1498,7 @@ DETAIL: Time dimensions cannot have NULL values. CREATE TABLE dependent (x integer, y integer); SELECT public.create_hypertable('dependent', 'x', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "x" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ------------------------------------ (4,regress_rls_schema,dependent,t) @@ -1527,7 +1527,7 @@ SET SESSION AUTHORIZATION regress_rls_alice; CREATE TABLE rec1 (x integer, y integer); SELECT public.create_hypertable('rec1', 'x', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "x" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ------------------------------- (5,regress_rls_schema,rec1,t) @@ -1586,7 +1586,7 @@ SET SESSION AUTHORIZATION regress_rls_alice; CREATE TABLE s1 (a int, b text); SELECT public.create_hypertable('s1', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ----------------------------- (6,regress_rls_schema,s1,t) @@ -1596,7 +1596,7 @@ INSERT INTO s1 (SELECT x, md5(x::text) FROM generate_series(-10,10) x); CREATE TABLE s2 (x int, y text); SELECT public.create_hypertable('s2', 'x', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "x" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ----------------------------- (7,regress_rls_schema,s2,t) @@ -2221,7 +2221,7 @@ SET SESSION AUTHORIZATION regress_rls_alice; CREATE TABLE b1 (a int, b text); SELECT public.create_hypertable('b1', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ----------------------------- (8,regress_rls_schema,b1,t) @@ -2480,7 +2480,7 @@ SET SESSION AUTHORIZATION regress_rls_alice; CREATE TABLE z1 (a int, b text); SELECT public.create_hypertable('z1', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ----------------------------- (9,regress_rls_schema,z1,t) @@ -2489,7 +2489,7 @@ DETAIL: Time dimensions cannot have NULL values. CREATE TABLE z2 (a int, b text); SELECT public.create_hypertable('z2', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ------------------------------ (10,regress_rls_schema,z2,t) @@ -3190,7 +3190,7 @@ SET SESSION AUTHORIZATION regress_rls_alice; CREATE TABLE x1 (a int, b text, c text); SELECT public.create_hypertable('x1', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ------------------------------ (11,regress_rls_schema,x1,t) @@ -3306,7 +3306,7 @@ SET SESSION AUTHORIZATION regress_rls_alice; CREATE TABLE y1 (a int, b text); SELECT public.create_hypertable('y1', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ------------------------------ (12,regress_rls_schema,y1,t) @@ -3316,7 +3316,7 @@ INSERT INTO y1 VALUES(1,2); CREATE TABLE y2 (a int, b text); SELECT public.create_hypertable('y2', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ------------------------------ (13,regress_rls_schema,y2,t) @@ -3625,7 +3625,7 @@ NOTICE: drop cascades to 2 other objects CREATE TABLE t1 (a integer); SELECT public.create_hypertable('t1', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ------------------------------ (14,regress_rls_schema,t1,t) @@ -3674,7 +3674,7 @@ DROP TABLE t1 CASCADE; CREATE TABLE t1 (a integer, b text); SELECT public.create_hypertable('t1', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ------------------------------ (15,regress_rls_schema,t1,t) @@ -3803,7 +3803,7 @@ SET SESSION AUTHORIZATION regress_rls_bob; CREATE TABLE t2 (a integer, b text); SELECT public.create_hypertable('t2', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ------------------------------ (16,regress_rls_schema,t2,t) @@ -3923,7 +3923,7 @@ SET SESSION AUTHORIZATION regress_rls_alice; CREATE TABLE blog (id integer, author text, post text); SELECT public.create_hypertable('blog', 'id', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "id" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable -------------------------------- (17,regress_rls_schema,blog,t) @@ -3932,7 +3932,7 @@ DETAIL: Time dimensions cannot have NULL values. CREATE TABLE comment (blog_id integer, message text); SELECT public.create_hypertable('comment', 'blog_id', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "blog_id" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ----------------------------------- (18,regress_rls_schema,comment,t) @@ -4128,7 +4128,7 @@ ERROR: table "copy_t" does not exist CREATE TABLE copy_t (a integer, b text); SELECT public.create_hypertable('copy_t', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ---------------------------------- (19,regress_rls_schema,copy_t,t) @@ -4221,7 +4221,7 @@ SET row_security TO ON; CREATE TABLE copy_rel_to (a integer, b text); SELECT public.create_hypertable('copy_rel_to', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable --------------------------------------- (20,regress_rls_schema,copy_rel_to,t) @@ -4297,7 +4297,7 @@ SET SESSION AUTHORIZATION regress_rls_alice; CREATE TABLE current_check (currentid int, payload text, rlsuser text); SELECT public.create_hypertable('current_check', 'currentid', chunk_time_interval=>10); NOTICE: adding not-null constraint to column "currentid" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ----------------------------------------- (21,regress_rls_schema,current_check,t) @@ -4536,7 +4536,7 @@ BEGIN; CREATE TABLE t (c int); SELECT public.create_hypertable('t', 'c', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "c" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ----------------------------- (22,regress_rls_schema,t,t) @@ -4575,7 +4575,7 @@ SET SESSION AUTHORIZATION regress_rls_alice; CREATE TABLE r1 (a int); SELECT public.create_hypertable('r1', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ------------------------------ (23,regress_rls_schema,r1,t) @@ -4584,7 +4584,7 @@ DETAIL: Time dimensions cannot have NULL values. CREATE TABLE r2 (a int); SELECT public.create_hypertable('r2', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ------------------------------ (24,regress_rls_schema,r2,t) @@ -4667,7 +4667,7 @@ SET row_security = on; CREATE TABLE r1 (a int); SELECT public.create_hypertable('r1', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ------------------------------ (25,regress_rls_schema,r1,t) @@ -4722,7 +4722,7 @@ CREATE TABLE r1 (a int PRIMARY KEY); CREATE TABLE r2 (a int REFERENCES r1); SELECT public.create_hypertable('r2', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ------------------------------ (26,regress_rls_schema,r2,t) @@ -4766,7 +4766,7 @@ CREATE TABLE r1 (a int PRIMARY KEY); CREATE TABLE r2 (a int REFERENCES r1 ON DELETE CASCADE); SELECT public.create_hypertable('r2', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ------------------------------ (27,regress_rls_schema,r2,t) @@ -4799,7 +4799,7 @@ CREATE TABLE r1 (a int PRIMARY KEY); CREATE TABLE r2 (a int REFERENCES r1 ON UPDATE CASCADE); SELECT public.create_hypertable('r2', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ------------------------------ (28,regress_rls_schema,r2,t) @@ -4840,7 +4840,7 @@ SET row_security = on; CREATE TABLE r1 (a int); SELECT public.create_hypertable('r1', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ------------------------------ (29,regress_rls_schema,r1,t) @@ -4929,7 +4929,7 @@ RESET SESSION AUTHORIZATION; CREATE TABLE dep1 (c1 int); SELECT public.create_hypertable('dep1', 'c1', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "c1" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable -------------------------------- (31,regress_rls_schema,dep1,t) @@ -4938,7 +4938,7 @@ DETAIL: Time dimensions cannot have NULL values. CREATE TABLE dep2 (c1 int); SELECT public.create_hypertable('dep2', 'c1', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "c1" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable -------------------------------- (32,regress_rls_schema,dep2,t) @@ -4990,7 +4990,7 @@ CREATE ROLE regress_rls_dob_role2; CREATE TABLE dob_t1 (c1 int); SELECT public.create_hypertable('dob_t1', 'c1', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "c1" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ---------------------------------- (33,regress_rls_schema,dob_t1,t) @@ -5030,7 +5030,7 @@ CREATE SCHEMA regress_rls_schema; CREATE TABLE rls_tbl (c1 int); SELECT public.create_hypertable('rls_tbl', 'c1', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "c1" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ----------------------------------- (34,regress_rls_schema,rls_tbl,t) @@ -5044,7 +5044,7 @@ CREATE POLICY p4 ON rls_tbl FOR DELETE USING (c1 <= 3); CREATE TABLE rls_tbl_force (c1 int); SELECT public.create_hypertable('rls_tbl_force', 'c1', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "c1" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ----------------------------------------- (35,regress_rls_schema,rls_tbl_force,t) diff --git a/test/expected/rowsecurity-14.out b/test/expected/rowsecurity-14.out index 19f7d0301dc..3c95b64c662 100644 --- a/test/expected/rowsecurity-14.out +++ b/test/expected/rowsecurity-14.out @@ -996,7 +996,7 @@ CREATE TABLE hyper_document ( GRANT ALL ON hyper_document TO public; SELECT public.create_hypertable('hyper_document', 'did', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "did" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ----------------------------------------- (2,regress_rls_schema,hyper_document,t) @@ -1489,7 +1489,7 @@ SET row_security TO ON; CREATE TABLE dependee (x integer, y integer); SELECT public.create_hypertable('dependee', 'x', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "x" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ----------------------------------- (3,regress_rls_schema,dependee,t) @@ -1498,7 +1498,7 @@ DETAIL: Time dimensions cannot have NULL values. CREATE TABLE dependent (x integer, y integer); SELECT public.create_hypertable('dependent', 'x', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "x" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ------------------------------------ (4,regress_rls_schema,dependent,t) @@ -1527,7 +1527,7 @@ SET SESSION AUTHORIZATION regress_rls_alice; CREATE TABLE rec1 (x integer, y integer); SELECT public.create_hypertable('rec1', 'x', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "x" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ------------------------------- (5,regress_rls_schema,rec1,t) @@ -1586,7 +1586,7 @@ SET SESSION AUTHORIZATION regress_rls_alice; CREATE TABLE s1 (a int, b text); SELECT public.create_hypertable('s1', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ----------------------------- (6,regress_rls_schema,s1,t) @@ -1596,7 +1596,7 @@ INSERT INTO s1 (SELECT x, md5(x::text) FROM generate_series(-10,10) x); CREATE TABLE s2 (x int, y text); SELECT public.create_hypertable('s2', 'x', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "x" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ----------------------------- (7,regress_rls_schema,s2,t) @@ -2202,7 +2202,7 @@ SET SESSION AUTHORIZATION regress_rls_alice; CREATE TABLE b1 (a int, b text); SELECT public.create_hypertable('b1', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ----------------------------- (8,regress_rls_schema,b1,t) @@ -2461,7 +2461,7 @@ SET SESSION AUTHORIZATION regress_rls_alice; CREATE TABLE z1 (a int, b text); SELECT public.create_hypertable('z1', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ----------------------------- (9,regress_rls_schema,z1,t) @@ -2470,7 +2470,7 @@ DETAIL: Time dimensions cannot have NULL values. CREATE TABLE z2 (a int, b text); SELECT public.create_hypertable('z2', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ------------------------------ (10,regress_rls_schema,z2,t) @@ -3171,7 +3171,7 @@ SET SESSION AUTHORIZATION regress_rls_alice; CREATE TABLE x1 (a int, b text, c text); SELECT public.create_hypertable('x1', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ------------------------------ (11,regress_rls_schema,x1,t) @@ -3287,7 +3287,7 @@ SET SESSION AUTHORIZATION regress_rls_alice; CREATE TABLE y1 (a int, b text); SELECT public.create_hypertable('y1', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ------------------------------ (12,regress_rls_schema,y1,t) @@ -3297,7 +3297,7 @@ INSERT INTO y1 VALUES(1,2); CREATE TABLE y2 (a int, b text); SELECT public.create_hypertable('y2', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ------------------------------ (13,regress_rls_schema,y2,t) @@ -3606,7 +3606,7 @@ NOTICE: drop cascades to 2 other objects CREATE TABLE t1 (a integer); SELECT public.create_hypertable('t1', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ------------------------------ (14,regress_rls_schema,t1,t) @@ -3655,7 +3655,7 @@ DROP TABLE t1 CASCADE; CREATE TABLE t1 (a integer, b text); SELECT public.create_hypertable('t1', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ------------------------------ (15,regress_rls_schema,t1,t) @@ -3784,7 +3784,7 @@ SET SESSION AUTHORIZATION regress_rls_bob; CREATE TABLE t2 (a integer, b text); SELECT public.create_hypertable('t2', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ------------------------------ (16,regress_rls_schema,t2,t) @@ -3904,7 +3904,7 @@ SET SESSION AUTHORIZATION regress_rls_alice; CREATE TABLE blog (id integer, author text, post text); SELECT public.create_hypertable('blog', 'id', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "id" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable -------------------------------- (17,regress_rls_schema,blog,t) @@ -3913,7 +3913,7 @@ DETAIL: Time dimensions cannot have NULL values. CREATE TABLE comment (blog_id integer, message text); SELECT public.create_hypertable('comment', 'blog_id', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "blog_id" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ----------------------------------- (18,regress_rls_schema,comment,t) @@ -4109,7 +4109,7 @@ ERROR: table "copy_t" does not exist CREATE TABLE copy_t (a integer, b text); SELECT public.create_hypertable('copy_t', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ---------------------------------- (19,regress_rls_schema,copy_t,t) @@ -4202,7 +4202,7 @@ SET row_security TO ON; CREATE TABLE copy_rel_to (a integer, b text); SELECT public.create_hypertable('copy_rel_to', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable --------------------------------------- (20,regress_rls_schema,copy_rel_to,t) @@ -4278,7 +4278,7 @@ SET SESSION AUTHORIZATION regress_rls_alice; CREATE TABLE current_check (currentid int, payload text, rlsuser text); SELECT public.create_hypertable('current_check', 'currentid', chunk_time_interval=>10); NOTICE: adding not-null constraint to column "currentid" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ----------------------------------------- (21,regress_rls_schema,current_check,t) @@ -4514,7 +4514,7 @@ BEGIN; CREATE TABLE t (c int); SELECT public.create_hypertable('t', 'c', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "c" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ----------------------------- (22,regress_rls_schema,t,t) @@ -4553,7 +4553,7 @@ SET SESSION AUTHORIZATION regress_rls_alice; CREATE TABLE r1 (a int); SELECT public.create_hypertable('r1', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ------------------------------ (23,regress_rls_schema,r1,t) @@ -4562,7 +4562,7 @@ DETAIL: Time dimensions cannot have NULL values. CREATE TABLE r2 (a int); SELECT public.create_hypertable('r2', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ------------------------------ (24,regress_rls_schema,r2,t) @@ -4644,7 +4644,7 @@ SET row_security = on; CREATE TABLE r1 (a int); SELECT public.create_hypertable('r1', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ------------------------------ (25,regress_rls_schema,r1,t) @@ -4699,7 +4699,7 @@ CREATE TABLE r1 (a int PRIMARY KEY); CREATE TABLE r2 (a int REFERENCES r1); SELECT public.create_hypertable('r2', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ------------------------------ (26,regress_rls_schema,r2,t) @@ -4743,7 +4743,7 @@ CREATE TABLE r1 (a int PRIMARY KEY); CREATE TABLE r2 (a int REFERENCES r1 ON DELETE CASCADE); SELECT public.create_hypertable('r2', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ------------------------------ (27,regress_rls_schema,r2,t) @@ -4776,7 +4776,7 @@ CREATE TABLE r1 (a int PRIMARY KEY); CREATE TABLE r2 (a int REFERENCES r1 ON UPDATE CASCADE); SELECT public.create_hypertable('r2', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ------------------------------ (28,regress_rls_schema,r2,t) @@ -4817,7 +4817,7 @@ SET row_security = on; CREATE TABLE r1 (a int); SELECT public.create_hypertable('r1', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ------------------------------ (29,regress_rls_schema,r1,t) @@ -4906,7 +4906,7 @@ RESET SESSION AUTHORIZATION; CREATE TABLE dep1 (c1 int); SELECT public.create_hypertable('dep1', 'c1', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "c1" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable -------------------------------- (31,regress_rls_schema,dep1,t) @@ -4915,7 +4915,7 @@ DETAIL: Time dimensions cannot have NULL values. CREATE TABLE dep2 (c1 int); SELECT public.create_hypertable('dep2', 'c1', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "c1" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable -------------------------------- (32,regress_rls_schema,dep2,t) @@ -4967,7 +4967,7 @@ CREATE ROLE regress_rls_dob_role2; CREATE TABLE dob_t1 (c1 int); SELECT public.create_hypertable('dob_t1', 'c1', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "c1" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ---------------------------------- (33,regress_rls_schema,dob_t1,t) @@ -5007,7 +5007,7 @@ CREATE SCHEMA regress_rls_schema; CREATE TABLE rls_tbl (c1 int); SELECT public.create_hypertable('rls_tbl', 'c1', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "c1" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ----------------------------------- (34,regress_rls_schema,rls_tbl,t) @@ -5021,7 +5021,7 @@ CREATE POLICY p4 ON rls_tbl FOR DELETE USING (c1 <= 3); CREATE TABLE rls_tbl_force (c1 int); SELECT public.create_hypertable('rls_tbl_force', 'c1', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "c1" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ----------------------------------------- (35,regress_rls_schema,rls_tbl_force,t) diff --git a/test/expected/rowsecurity-15.out b/test/expected/rowsecurity-15.out index 00ced637fbb..e2ce06ccb2a 100644 --- a/test/expected/rowsecurity-15.out +++ b/test/expected/rowsecurity-15.out @@ -996,7 +996,7 @@ CREATE TABLE hyper_document ( GRANT ALL ON hyper_document TO public; SELECT public.create_hypertable('hyper_document', 'did', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "did" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ----------------------------------------- (2,regress_rls_schema,hyper_document,t) @@ -1489,7 +1489,7 @@ SET row_security TO ON; CREATE TABLE dependee (x integer, y integer); SELECT public.create_hypertable('dependee', 'x', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "x" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ----------------------------------- (3,regress_rls_schema,dependee,t) @@ -1498,7 +1498,7 @@ DETAIL: Time dimensions cannot have NULL values. CREATE TABLE dependent (x integer, y integer); SELECT public.create_hypertable('dependent', 'x', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "x" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ------------------------------------ (4,regress_rls_schema,dependent,t) @@ -1527,7 +1527,7 @@ SET SESSION AUTHORIZATION regress_rls_alice; CREATE TABLE rec1 (x integer, y integer); SELECT public.create_hypertable('rec1', 'x', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "x" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ------------------------------- (5,regress_rls_schema,rec1,t) @@ -1586,7 +1586,7 @@ SET SESSION AUTHORIZATION regress_rls_alice; CREATE TABLE s1 (a int, b text); SELECT public.create_hypertable('s1', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ----------------------------- (6,regress_rls_schema,s1,t) @@ -1596,7 +1596,7 @@ INSERT INTO s1 (SELECT x, md5(x::text) FROM generate_series(-10,10) x); CREATE TABLE s2 (x int, y text); SELECT public.create_hypertable('s2', 'x', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "x" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ----------------------------- (7,regress_rls_schema,s2,t) @@ -2203,7 +2203,7 @@ SET SESSION AUTHORIZATION regress_rls_alice; CREATE TABLE b1 (a int, b text); SELECT public.create_hypertable('b1', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ----------------------------- (8,regress_rls_schema,b1,t) @@ -2463,7 +2463,7 @@ SET SESSION AUTHORIZATION regress_rls_alice; CREATE TABLE z1 (a int, b text); SELECT public.create_hypertable('z1', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ----------------------------- (9,regress_rls_schema,z1,t) @@ -2472,7 +2472,7 @@ DETAIL: Time dimensions cannot have NULL values. CREATE TABLE z2 (a int, b text); SELECT public.create_hypertable('z2', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ------------------------------ (10,regress_rls_schema,z2,t) @@ -3173,7 +3173,7 @@ SET SESSION AUTHORIZATION regress_rls_alice; CREATE TABLE x1 (a int, b text, c text); SELECT public.create_hypertable('x1', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ------------------------------ (11,regress_rls_schema,x1,t) @@ -3289,7 +3289,7 @@ SET SESSION AUTHORIZATION regress_rls_alice; CREATE TABLE y1 (a int, b text); SELECT public.create_hypertable('y1', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ------------------------------ (12,regress_rls_schema,y1,t) @@ -3299,7 +3299,7 @@ INSERT INTO y1 VALUES(1,2); CREATE TABLE y2 (a int, b text); SELECT public.create_hypertable('y2', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ------------------------------ (13,regress_rls_schema,y2,t) @@ -3608,7 +3608,7 @@ NOTICE: drop cascades to 2 other objects CREATE TABLE t1 (a integer); SELECT public.create_hypertable('t1', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ------------------------------ (14,regress_rls_schema,t1,t) @@ -3657,7 +3657,7 @@ DROP TABLE t1 CASCADE; CREATE TABLE t1 (a integer, b text); SELECT public.create_hypertable('t1', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ------------------------------ (15,regress_rls_schema,t1,t) @@ -3786,7 +3786,7 @@ SET SESSION AUTHORIZATION regress_rls_bob; CREATE TABLE t2 (a integer, b text); SELECT public.create_hypertable('t2', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ------------------------------ (16,regress_rls_schema,t2,t) @@ -3906,7 +3906,7 @@ SET SESSION AUTHORIZATION regress_rls_alice; CREATE TABLE blog (id integer, author text, post text); SELECT public.create_hypertable('blog', 'id', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "id" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable -------------------------------- (17,regress_rls_schema,blog,t) @@ -3915,7 +3915,7 @@ DETAIL: Time dimensions cannot have NULL values. CREATE TABLE comment (blog_id integer, message text); SELECT public.create_hypertable('comment', 'blog_id', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "blog_id" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ----------------------------------- (18,regress_rls_schema,comment,t) @@ -4111,7 +4111,7 @@ ERROR: table "copy_t" does not exist CREATE TABLE copy_t (a integer, b text); SELECT public.create_hypertable('copy_t', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ---------------------------------- (19,regress_rls_schema,copy_t,t) @@ -4204,7 +4204,7 @@ SET row_security TO ON; CREATE TABLE copy_rel_to (a integer, b text); SELECT public.create_hypertable('copy_rel_to', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable --------------------------------------- (20,regress_rls_schema,copy_rel_to,t) @@ -4280,7 +4280,7 @@ SET SESSION AUTHORIZATION regress_rls_alice; CREATE TABLE current_check (currentid int, payload text, rlsuser text); SELECT public.create_hypertable('current_check', 'currentid', chunk_time_interval=>10); NOTICE: adding not-null constraint to column "currentid" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ----------------------------------------- (21,regress_rls_schema,current_check,t) @@ -4516,7 +4516,7 @@ BEGIN; CREATE TABLE t (c int); SELECT public.create_hypertable('t', 'c', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "c" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ----------------------------- (22,regress_rls_schema,t,t) @@ -4555,7 +4555,7 @@ SET SESSION AUTHORIZATION regress_rls_alice; CREATE TABLE r1 (a int); SELECT public.create_hypertable('r1', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ------------------------------ (23,regress_rls_schema,r1,t) @@ -4564,7 +4564,7 @@ DETAIL: Time dimensions cannot have NULL values. CREATE TABLE r2 (a int); SELECT public.create_hypertable('r2', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ------------------------------ (24,regress_rls_schema,r2,t) @@ -4646,7 +4646,7 @@ SET row_security = on; CREATE TABLE r1 (a int); SELECT public.create_hypertable('r1', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ------------------------------ (25,regress_rls_schema,r1,t) @@ -4701,7 +4701,7 @@ CREATE TABLE r1 (a int PRIMARY KEY); CREATE TABLE r2 (a int REFERENCES r1); SELECT public.create_hypertable('r2', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ------------------------------ (26,regress_rls_schema,r2,t) @@ -4745,7 +4745,7 @@ CREATE TABLE r1 (a int PRIMARY KEY); CREATE TABLE r2 (a int REFERENCES r1 ON DELETE CASCADE); SELECT public.create_hypertable('r2', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ------------------------------ (27,regress_rls_schema,r2,t) @@ -4778,7 +4778,7 @@ CREATE TABLE r1 (a int PRIMARY KEY); CREATE TABLE r2 (a int REFERENCES r1 ON UPDATE CASCADE); SELECT public.create_hypertable('r2', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ------------------------------ (28,regress_rls_schema,r2,t) @@ -4819,7 +4819,7 @@ SET row_security = on; CREATE TABLE r1 (a int); SELECT public.create_hypertable('r1', 'a', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ------------------------------ (29,regress_rls_schema,r1,t) @@ -4908,7 +4908,7 @@ RESET SESSION AUTHORIZATION; CREATE TABLE dep1 (c1 int); SELECT public.create_hypertable('dep1', 'c1', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "c1" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable -------------------------------- (31,regress_rls_schema,dep1,t) @@ -4917,7 +4917,7 @@ DETAIL: Time dimensions cannot have NULL values. CREATE TABLE dep2 (c1 int); SELECT public.create_hypertable('dep2', 'c1', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "c1" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable -------------------------------- (32,regress_rls_schema,dep2,t) @@ -4969,7 +4969,7 @@ CREATE ROLE regress_rls_dob_role2; CREATE TABLE dob_t1 (c1 int); SELECT public.create_hypertable('dob_t1', 'c1', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "c1" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ---------------------------------- (33,regress_rls_schema,dob_t1,t) @@ -5009,7 +5009,7 @@ CREATE SCHEMA regress_rls_schema; CREATE TABLE rls_tbl (c1 int); SELECT public.create_hypertable('rls_tbl', 'c1', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "c1" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ----------------------------------- (34,regress_rls_schema,rls_tbl,t) @@ -5023,7 +5023,7 @@ CREATE POLICY p4 ON rls_tbl FOR DELETE USING (c1 <= 3); CREATE TABLE rls_tbl_force (c1 int); SELECT public.create_hypertable('rls_tbl_force', 'c1', chunk_time_interval=>2); NOTICE: adding not-null constraint to column "c1" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ----------------------------------------- (35,regress_rls_schema,rls_tbl_force,t) diff --git a/test/expected/timestamp.out b/test/expected/timestamp.out index 7bae605a09f..91ad38f2d88 100644 --- a/test/expected/timestamp.out +++ b/test/expected/timestamp.out @@ -2006,10 +2006,46 @@ SELECT test.interval_to_internal('BIGINT'::regtype, 2147483649::bigint); 2147483649 (1 row) +-- Default interval for integer is supported as part of +-- hypertable generalization +SELECT test.interval_to_internal('INT'::regtype); + interval_to_internal +---------------------- + 100000 +(1 row) + +SELECT test.interval_to_internal('SMALLINT'::regtype); + interval_to_internal +---------------------- + 10000 +(1 row) + +SELECT test.interval_to_internal('BIGINT'::regtype); + interval_to_internal +---------------------- + 1000000 +(1 row) + +SELECT test.interval_to_internal('TIMESTAMPTZ'::regtype); + interval_to_internal +---------------------- + 604800000000 +(1 row) + +SELECT test.interval_to_internal('TIMESTAMP'::regtype); + interval_to_internal +---------------------- + 604800000000 +(1 row) + +SELECT test.interval_to_internal('DATE'::regtype); + interval_to_internal +---------------------- + 604800000000 +(1 row) + \set VERBOSITY terse \set ON_ERROR_STOP 0 -SELECT test.interval_to_internal('INT'::regtype); -ERROR: integer dimensions require an explicit interval SELECT test.interval_to_internal('INT'::regtype, 2147483649::bigint); ERROR: invalid interval: must be between 1 and 2147483647 SELECT test.interval_to_internal('SMALLINT'::regtype, 32768::bigint); diff --git a/test/sql/ddl_errors.sql b/test/sql/ddl_errors.sql index 92d5a8b1cf2..5fb7ecd4a4a 100644 --- a/test/sql/ddl_errors.sql +++ b/test/sql/ddl_errors.sql @@ -9,11 +9,12 @@ CREATE TABLE PUBLIC."Hypertable_1" ( ); CREATE INDEX ON PUBLIC."Hypertable_1" (time, "Device_id"); +-- Default integer interval is supported as part of +-- hypertable generalization, verify additional secnarios + \set ON_ERROR_STOP 0 SELECT * FROM create_hypertable(NULL, NULL); SELECT * FROM create_hypertable('"public"."Hypertable_1"', NULL); --- integer time dimensions require an explicit interval -SELECT * FROM create_hypertable('"public"."Hypertable_1"', 'time'); -- space dimensions require explicit number of partitions SELECT * FROM create_hypertable('"public"."Hypertable_1"', 'time', 'Device_id', chunk_time_interval=>_timescaledb_functions.interval_to_usec('1 month')); SELECT * FROM create_hypertable('"public"."Hypertable_1_mispelled"', 'time', 'Device_id', 2, chunk_time_interval=>_timescaledb_functions.interval_to_usec('1 month')); diff --git a/test/sql/insert_single.sql b/test/sql/insert_single.sql index d652f65ad96..46394e03d5c 100644 --- a/test/sql/insert_single.sql +++ b/test/sql/insert_single.sql @@ -99,19 +99,20 @@ EXECUTE "1dim_plan_generic"; SELECT * FROM "1dim" ORDER BY time; SELECT * FROM "3dim" ORDER BY (time, device); --- Test that large intervals and no interval fail for INTEGER +-- Test large intervals as default interval for integer is +-- supported as part of hypertable generalization \set ON_ERROR_STOP 0 CREATE TABLE "inttime_err"(time INTEGER PRIMARY KEY, temp float); SELECT create_hypertable('"inttime_err"', 'time', chunk_time_interval=>2147483648); -SELECT create_hypertable('"inttime_err"', 'time'); \set ON_ERROR_STOP 1 SELECT create_hypertable('"inttime_err"', 'time', chunk_time_interval=>2147483647); --- Test that large intervals and no interval fail for SMALLINT +-- Test large intervals as default interval is supported +-- for integer types as part of hypertable generalization. + \set ON_ERROR_STOP 0 CREATE TABLE "smallinttime_err"(time SMALLINT PRIMARY KEY, temp float); SELECT create_hypertable('"smallinttime_err"', 'time', chunk_time_interval=>32768); -SELECT create_hypertable('"smallinttime_err"', 'time'); \set ON_ERROR_STOP 1 SELECT create_hypertable('"smallinttime_err"', 'time', chunk_time_interval=>32767); diff --git a/test/sql/timestamp.sql b/test/sql/timestamp.sql index 4217a40a09f..ca47231193f 100644 --- a/test/sql/timestamp.sql +++ b/test/sql/timestamp.sql @@ -897,9 +897,17 @@ SELECT test.interval_to_internal('TIMESTAMP'::regtype, 86400); SELECT test.interval_to_internal('TIMESTAMP'::regtype); SELECT test.interval_to_internal('BIGINT'::regtype, 2147483649::bigint); +-- Default interval for integer is supported as part of +-- hypertable generalization +SELECT test.interval_to_internal('INT'::regtype); +SELECT test.interval_to_internal('SMALLINT'::regtype); +SELECT test.interval_to_internal('BIGINT'::regtype); +SELECT test.interval_to_internal('TIMESTAMPTZ'::regtype); +SELECT test.interval_to_internal('TIMESTAMP'::regtype); +SELECT test.interval_to_internal('DATE'::regtype); + \set VERBOSITY terse \set ON_ERROR_STOP 0 -SELECT test.interval_to_internal('INT'::regtype); SELECT test.interval_to_internal('INT'::regtype, 2147483649::bigint); SELECT test.interval_to_internal('SMALLINT'::regtype, 32768::bigint); SELECT test.interval_to_internal('TEXT'::regtype, 32768::bigint); diff --git a/tsl/test/expected/cagg_errors.out b/tsl/test/expected/cagg_errors.out index ad8fd145f88..9db8fb6e1a6 100644 --- a/tsl/test/expected/cagg_errors.out +++ b/tsl/test/expected/cagg_errors.out @@ -260,7 +260,7 @@ HINT: Use CREATE MATERIALIZED VIEW to create a continuous aggregate. create table rowsec_tab( a bigint, b integer, c integer); select table_name from create_hypertable( 'rowsec_tab', 'a', chunk_time_interval=>10); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. table_name ------------ rowsec_tab diff --git a/tsl/test/expected/cagg_errors_deprecated.out b/tsl/test/expected/cagg_errors_deprecated.out index 324bbf04ca6..29131835628 100644 --- a/tsl/test/expected/cagg_errors_deprecated.out +++ b/tsl/test/expected/cagg_errors_deprecated.out @@ -326,7 +326,7 @@ HINT: Use CREATE MATERIALIZED VIEW to create a continuous aggregate. create table rowsec_tab( a bigint, b integer, c integer); select table_name from create_hypertable( 'rowsec_tab', 'a', chunk_time_interval=>10); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. table_name ------------ rowsec_tab diff --git a/tsl/test/expected/cagg_joins.out b/tsl/test/expected/cagg_joins.out index 9a15ba3635c..c142d8d101f 100644 --- a/tsl/test/expected/cagg_joins.out +++ b/tsl/test/expected/cagg_joins.out @@ -39,7 +39,7 @@ SELECT create_hypertable( migrate_data => true ); NOTICE: adding not-null constraint to column "day" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. NOTICE: migrating data to chunks DETAIL: Migration might take a while depending on the amount of data. create_hypertable diff --git a/tsl/test/expected/compression_errors.out b/tsl/test/expected/compression_errors.out index daef6218ff3..c4fc5899ebd 100644 --- a/tsl/test/expected/compression_errors.out +++ b/tsl/test/expected/compression_errors.out @@ -8,7 +8,7 @@ create table foo2 (a integer, "bacB toD" integer, c integer, d integer); select table_name from create_hypertable('foo2', 'a', chunk_time_interval=> 10); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. table_name ------------ foo2 @@ -17,7 +17,7 @@ DETAIL: Time dimensions cannot have NULL values. create table foo3 (a integer, "bacB toD" integer, c integer, d integer); select table_name from create_hypertable('foo3', 'a', chunk_time_interval=> 10); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. table_name ------------ foo3 @@ -26,7 +26,7 @@ DETAIL: Time dimensions cannot have NULL values. create table non_compressed (a integer, "bacB toD" integer, c integer, d integer); select table_name from create_hypertable('non_compressed', 'a', chunk_time_interval=> 10); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. table_name ---------------- non_compressed @@ -55,7 +55,7 @@ create table with_rls (a integer, b integer); ALTER TABLE with_rls ENABLE ROW LEVEL SECURITY; select table_name from create_hypertable('with_rls', 'a', chunk_time_interval=> 10); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. table_name ------------ with_rls @@ -93,7 +93,7 @@ DETAIL: The timescaledb.compress_orderby option was previously set and must als create table reserved_column_prefix (a integer, _ts_meta_foo integer, "bacB toD" integer, c integer, d integer); select table_name from create_hypertable('reserved_column_prefix', 'a', chunk_time_interval=> 10); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. table_name ------------------------ reserved_column_prefix @@ -106,7 +106,7 @@ create table foo (a integer, b integer, c integer, t text, p point); ALTER TABLE foo ADD CONSTRAINT chk_existing CHECK(b > 0); select table_name from create_hypertable('foo', 'a', chunk_time_interval=> 10); NOTICE: adding not-null constraint to column "a" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. table_name ------------ foo @@ -452,7 +452,7 @@ ALTER TABLE table_constr2 SET (timescaledb.compress=false); CREATE TABLE test_table_int(time bigint, val int); SELECT create_hypertable('test_table_int', 'time', chunk_time_interval => 1); NOTICE: adding not-null constraint to column "time" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. create_hypertable ------------------------------ (27,public,test_table_int,t) @@ -623,7 +623,7 @@ CREATE TABLE data_table AS SELECT now() AS tm, 125 AS c1, 125 AS c2, 125 AS c3, CREATE TABLE ts_table (LIKE data_table); SELECT * FROM create_hypertable('ts_table', 'tm'); NOTICE: adding not-null constraint to column "tm" -DETAIL: Time dimensions cannot have NULL values. +DETAIL: Dimensions cannot have NULL values. hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- 33 | public | ts_table | t diff --git a/tsl/test/expected/hypertable_generalization.out b/tsl/test/expected/hypertable_generalization.out new file mode 100644 index 00000000000..d297fcc38d2 --- /dev/null +++ b/tsl/test/expected/hypertable_generalization.out @@ -0,0 +1,518 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +CREATE OR REPLACE FUNCTION part_func(id TEXT) + RETURNS INTEGER LANGUAGE PLPGSQL IMMUTABLE AS +$BODY$ +DECLARE + retval INTEGER; +BEGIN + retval := CAST(id AS INTEGER); + RETURN retval; +END +$BODY$; +SELECT by_range('id'); + by_range +----------------- + range//id//-//- +(1 row) + +SELECT by_range('id', partition_func => 'part_func'); + by_range +------------------------- + range//id//-//part_func +(1 row) + +SELECT by_range('id', '1 week'::interval); + by_range +------------------------ + range//id//@ 7 days//- +(1 row) + +SELECT by_range('id', '1 week'::interval, 'part_func'::regproc); + by_range +-------------------------------- + range//id//@ 7 days//part_func +(1 row) + +SELECT by_hash('id', 3); + by_hash +---------------- + hash//id//3//- +(1 row) + +SELECT by_hash('id', 3, partition_func => 'part_func'); + by_hash +------------------------ + hash//id//3//part_func +(1 row) + +\set ON_ERROR_STOP 0 +SELECT 'hash//id//3//-'::_timescaledb_internal.dimension_info; +ERROR: cannot construct type "dimension_info" from string at character 8 +\set ON_ERROR_STOP 1 +-- Validate generalized hypertable for smallint +CREATE TABLE test_table_smallint(id SMALLINT, device INTEGER, time TIMESTAMPTZ); +SELECT create_hypertable('test_table_smallint', by_range('id')); +NOTICE: adding not-null constraint to column "id" + create_hypertable +------------------- + (1,t) +(1 row) + +-- default interval +SELECT integer_interval FROM timescaledb_information.dimensions WHERE hypertable_name = 'test_table_smallint'; + integer_interval +------------------ + 10000 +(1 row) + +-- Add data with default partition (10000) +INSERT INTO test_table_smallint VALUES (1, 10, '01-01-2023 11:00'::TIMESTAMPTZ); +INSERT INTO test_table_smallint VALUES (9999, 10, '01-01-2023 11:00'::TIMESTAMPTZ); +INSERT INTO test_table_smallint VALUES (10000, 10, '01-01-2023 11:00'::TIMESTAMPTZ); +INSERT INTO test_table_smallint VALUES (20000, 10, '01-01-2023 11:00'::TIMESTAMPTZ); +-- Number of chunks +SELECT count(*) FROM timescaledb_information.chunks WHERE hypertable_name='test_table_smallint'; + count +------- + 3 +(1 row) + +-- Validate generalized hypertable for int +CREATE TABLE test_table_int(id INTEGER, device INTEGER, time TIMESTAMPTZ); +SELECT create_hypertable('test_table_int', by_range('id')); +NOTICE: adding not-null constraint to column "id" + create_hypertable +------------------- + (2,t) +(1 row) + +-- Default interval +SELECT integer_interval FROM timescaledb_information.dimensions WHERE hypertable_name = 'test_table_int'; + integer_interval +------------------ + 100000 +(1 row) + +-- Add data +INSERT INTO test_table_int VALUES (1, 10, '01-01-2023 11:00'::TIMESTAMPTZ); +INSERT INTO test_table_int VALUES (99999, 10, '01-01-2023 11:00'::TIMESTAMPTZ); +INSERT INTO test_table_int VALUES (100000, 10, '01-01-2023 11:00'::TIMESTAMPTZ); +INSERT INTO test_table_int VALUES (200000, 10, '01-01-2023 11:00'::TIMESTAMPTZ); +-- Number of chunks +SELECT count(*) FROM timescaledb_information.chunks WHERE hypertable_name='test_table_int'; + count +------- + 3 +(1 row) + +-- Validate generalized hypertable for bigint +CREATE TABLE test_table_bigint(id BIGINT, device INTEGER, time TIMESTAMPTZ); +SELECT create_hypertable('test_table_bigint', by_range('id')); +NOTICE: adding not-null constraint to column "id" + create_hypertable +------------------- + (3,t) +(1 row) + +-- Default interval +SELECT integer_interval FROM timescaledb_information.dimensions WHERE hypertable_name = 'test_table_bigint'; + integer_interval +------------------ + 1000000 +(1 row) + +-- Add data +INSERT INTO test_table_bigint VALUES (1, 10, '01-01-2023 11:00'::TIMESTAMPTZ); +INSERT INTO test_table_bigint VALUES (999999, 10, '01-01-2023 11:00'::TIMESTAMPTZ); +INSERT INTO test_table_bigint VALUES (1000000, 10, '01-01-2023 11:00'::TIMESTAMPTZ); +INSERT INTO test_table_bigint VALUES (2000000, 10, '01-01-2023 11:00'::TIMESTAMPTZ); +-- Number of chunks +SELECT count(*) FROM timescaledb_information.chunks WHERE hypertable_name='test_table_bigint'; + count +------- + 3 +(1 row) + +DROP TABLE test_table_smallint; +DROP TABLE test_table_int; +DROP TABLE test_table_bigint; +-- Create hypertable with SERIAL column +CREATE TABLE jobs_serial (job_id SERIAL, device_id INTEGER, start_time TIMESTAMPTZ, end_time TIMESTAMPTZ, PRIMARY KEY (job_id)); +SELECT create_hypertable('jobs_serial', by_range('job_id', partition_interval => 30)); + create_hypertable +------------------- + (4,t) +(1 row) + +-- Insert data +INSERT INTO jobs_serial (device_id, start_time, end_time) +SELECT abs(timestamp_hash(t::timestamp)) % 10, t, t + INTERVAL '1 day' +FROM generate_series('2018-03-02 1:00'::TIMESTAMPTZ, '2018-03-08 1:00':: TIMESTAMPTZ,'1 hour')t; +-- Verify chunk pruning +EXPLAIN VERBOSE SELECT * FROM jobs_serial WHERE job_id < 30; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------- + Index Scan using "10_1_jobs_serial_pkey" on _timescaledb_internal._hyper_4_10_chunk (cost=0.15..20.30 rows=523 width=24) + Output: _hyper_4_10_chunk.job_id, _hyper_4_10_chunk.device_id, _hyper_4_10_chunk.start_time, _hyper_4_10_chunk.end_time + Index Cond: (_hyper_4_10_chunk.job_id < 30) +(3 rows) + +EXPLAIN VERBOSE SELECT * FROM jobs_serial WHERE job_id >= 30 AND job_id < 90; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------- + Append (cost=0.15..14.71 rows=16 width=24) + -> Index Scan using "11_2_jobs_serial_pkey" on _timescaledb_internal._hyper_4_11_chunk (cost=0.15..7.31 rows=8 width=24) + Output: _hyper_4_11_chunk.job_id, _hyper_4_11_chunk.device_id, _hyper_4_11_chunk.start_time, _hyper_4_11_chunk.end_time + Index Cond: ((_hyper_4_11_chunk.job_id >= 30) AND (_hyper_4_11_chunk.job_id < 90)) + -> Index Scan using "12_3_jobs_serial_pkey" on _timescaledb_internal._hyper_4_12_chunk (cost=0.15..7.31 rows=8 width=24) + Output: _hyper_4_12_chunk.job_id, _hyper_4_12_chunk.device_id, _hyper_4_12_chunk.start_time, _hyper_4_12_chunk.end_time + Index Cond: ((_hyper_4_12_chunk.job_id >= 30) AND (_hyper_4_12_chunk.job_id < 90)) +(7 rows) + +EXPLAIN VERBOSE SELECT * FROM jobs_serial WHERE job_id > 90; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------- + Append (cost=0.15..45.84 rows=1046 width=24) + -> Index Scan using "13_4_jobs_serial_pkey" on _timescaledb_internal._hyper_4_13_chunk (cost=0.15..20.30 rows=523 width=24) + Output: _hyper_4_13_chunk.job_id, _hyper_4_13_chunk.device_id, _hyper_4_13_chunk.start_time, _hyper_4_13_chunk.end_time + Index Cond: (_hyper_4_13_chunk.job_id > 90) + -> Index Scan using "14_5_jobs_serial_pkey" on _timescaledb_internal._hyper_4_14_chunk (cost=0.15..20.30 rows=523 width=24) + Output: _hyper_4_14_chunk.job_id, _hyper_4_14_chunk.device_id, _hyper_4_14_chunk.start_time, _hyper_4_14_chunk.end_time + Index Cond: (_hyper_4_14_chunk.job_id > 90) +(7 rows) + +-- Update rows +UPDATE jobs_serial SET end_time = end_time + INTERVAL '1 hour' where job_id = 1; +UPDATE jobs_serial SET end_time = end_time + INTERVAL '1 hour' where job_id = 30; +UPDATE jobs_serial SET end_time = end_time + INTERVAL '1 hour' where job_id = 90; +SELECT start_time, end_time FROM jobs_serial WHERE job_id = 1; + start_time | end_time +------------------------------+------------------------------ + Fri Mar 02 01:00:00 2018 PST | Sat Mar 03 02:00:00 2018 PST +(1 row) + +SELECT start_time, end_time FROM jobs_serial WHERE job_id = 30; + start_time | end_time +------------------------------+------------------------------ + Sat Mar 03 06:00:00 2018 PST | Sun Mar 04 07:00:00 2018 PST +(1 row) + +SELECT start_time, end_time FROM jobs_serial WHERE job_id = 90; + start_time | end_time +------------------------------+------------------------------ + Mon Mar 05 18:00:00 2018 PST | Tue Mar 06 19:00:00 2018 PST +(1 row) + +-- Test delete rows +-- Existing tuple counts. We saves these and compare with the values +-- after running the delete. +CREATE TABLE counts AS SELECT + (SELECT count(*) FROM jobs_serial) AS total_count, + (SELECT count(*) FROM jobs_serial WHERE job_id < 10) AS remove_count; +-- Perform the delete +DELETE FROM jobs_serial WHERE job_id < 10; +-- Ensure only the intended tuples are deleted. The two counts should be equal. +SELECT + (SELECT total_count FROM counts) - (SELECT count(*) FROM jobs_serial) AS total_removed, + (SELECT remove_count FROM counts) - (SELECT count(*) FROM jobs_serial WHERE job_id < 10) AS matching_removed; + total_removed | matching_removed +---------------+------------------ + 9 | 9 +(1 row) + +DROP TABLE jobs_serial; +DROP TABLE counts; +-- Create and validate hypertable with BIGSERIAL column +CREATE TABLE jobs_big_serial (job_id BIGSERIAL, device_id INTEGER, start_time TIMESTAMPTZ, end_time TIMESTAMPTZ, PRIMARY KEY (job_id)); +SELECT create_hypertable('jobs_big_serial', by_range('job_id', 100)); + create_hypertable +------------------- + (5,t) +(1 row) + +-- Insert data +INSERT INTO jobs_big_serial (device_id, start_time, end_time) +SELECT abs(timestamp_hash(t::timestamp)) % 10, t, t + INTERVAL '1 day' +FROM generate_series('2018-03-02 1:00'::TIMESTAMPTZ, '2018-03-08 1:00'::TIMESTAMPTZ,'30 mins')t; +-- Verify #chunks +SELECT count(*) FROM timescaledb_information.chunks; + count +------- + 3 +(1 row) + +-- Get current sequence and verify updating sequence +SELECT currval(pg_get_serial_sequence('jobs_big_serial', 'job_id')); + currval +--------- + 289 +(1 row) + +-- Update sequence value to 500 +SELECT setval(pg_get_serial_sequence('jobs_big_serial', 'job_id'), 500, false); + setval +-------- + 500 +(1 row) + +-- Insert few rows and verify that the next sequence starts from 500 +INSERT INTO jobs_big_serial (device_id, start_time, end_time) +SELECT abs(timestamp_hash(t::timestamp)) % 10, t, t + INTERVAL '1 day' +FROM generate_series('2018-03-09 1:00'::TIMESTAMPTZ, '2018-03-10 1:00'::TIMESTAMPTZ,'30 mins')t; +-- No data should exist for job_id >= 290 to job_id < 500 +SELECT count(*) FROM jobs_big_serial WHERE job_id >= 290 AND job_id < 500; + count +------- + 0 +(1 row) + +-- The new rows should be added with job_id > 500 +SELECT count(*) from jobs_big_serial WHERE job_id > 500; + count +------- + 48 +(1 row) + +-- Verify show_chunks API +SELECT show_chunks('jobs_big_serial', older_than => 100); + show_chunks +----------------------------------------- + _timescaledb_internal._hyper_5_15_chunk +(1 row) + +SELECT show_chunks('jobs_big_serial', newer_than => 200, older_than => 300); + show_chunks +----------------------------------------- + _timescaledb_internal._hyper_5_17_chunk +(1 row) + +SELECT show_chunks('jobs_big_serial', newer_than => 500); + show_chunks +----------------------------------------- + _timescaledb_internal._hyper_5_18_chunk +(1 row) + +-- Verify drop_chunks API +SELECT count(*) FROM timescaledb_information.chunks WHERE hypertable_name = 'jobs_big_serial'; + count +------- + 4 +(1 row) + +SELECT drop_chunks('jobs_big_serial', newer_than => 500); + drop_chunks +----------------------------------------- + _timescaledb_internal._hyper_5_18_chunk +(1 row) + +SELECT count(*) FROM timescaledb_information.chunks WHERE hypertable_name = 'jobs_big_serial'; + count +------- + 3 +(1 row) + +SELECT drop_chunks('jobs_big_serial', newer_than => 200, older_than => 300); + drop_chunks +----------------------------------------- + _timescaledb_internal._hyper_5_17_chunk +(1 row) + +SELECT count(*) FROM timescaledb_information.chunks WHERE hypertable_name = 'jobs_big_serial'; + count +------- + 2 +(1 row) + +DROP TABLE jobs_big_serial; +-- Verify partition function +CREATE TABLE test_table_int(id TEXT, device INTEGER, time TIMESTAMPTZ); +SELECT create_hypertable('test_table_int', by_range('id', 10, partition_func => 'part_func')); +NOTICE: adding not-null constraint to column "id" + create_hypertable +------------------- + (6,t) +(1 row) + +INSERT INTO test_table_int VALUES('1', 1, '01-01-2023 11:00'::TIMESTAMPTZ); +INSERT INTO test_table_int VALUES('10', 10, '01-01-2023 11:00'::TIMESTAMPTZ); +INSERT INTO test_table_int VALUES('29', 100, '01-01-2023 11:00'::TIMESTAMPTZ); +SELECT count(*) FROM timescaledb_information.chunks WHERE hypertable_name = 'test_table_int'; + count +------- + 3 +(1 row) + +DROP TABLE test_table_int; +DROP FUNCTION part_func; +-- Migrate data +CREATE TABLE test_table_int(id INTEGER, device INTEGER, time TIMESTAMPTZ); +INSERT INTO test_table_int SELECT t, t%10, '01-01-2023 11:00'::TIMESTAMPTZ FROM generate_series(1, 50, 1) t; +SELECT create_hypertable('test_table_int', by_range('id', 10), migrate_data => true); +NOTICE: adding not-null constraint to column "id" +NOTICE: migrating data to chunks + create_hypertable +------------------- + (7,t) +(1 row) + +-- Show default indexes created for hypertables. +SELECT indexname FROM pg_indexes WHERE tablename = 'test_table_int'; + indexname +----------------------- + test_table_int_id_idx +(1 row) + +SELECT count(*) FROM timescaledb_information.chunks WHERE hypertable_name = 'test_table_int'; + count +------- + 6 +(1 row) + +DROP TABLE test_table_int; +-- create_hypertable without default indexes +CREATE TABLE test_table_int(id INTEGER, device INTEGER, time TIMESTAMPTZ); +SELECT create_hypertable('test_table_int', by_range('id', 10), create_default_indexes => false); +NOTICE: adding not-null constraint to column "id" + create_hypertable +------------------- + (8,t) +(1 row) + +SELECT indexname FROM pg_indexes WHERE tablename = 'test_table_int'; + indexname +----------- +(0 rows) + +DROP TABLE test_table_int; +-- if_not_exists +CREATE TABLE test_table_int(id INTEGER, device INTEGER, time TIMESTAMPTZ); +SELECT create_hypertable('test_table_int', by_range('id', 10)); +NOTICE: adding not-null constraint to column "id" + create_hypertable +------------------- + (9,t) +(1 row) + +-- No error when if_not_exists => true +SELECT create_hypertable('test_table_int', by_range('id', 10), if_not_exists => true); +NOTICE: table "test_table_int" is already a hypertable, skipping + create_hypertable +------------------- + (9,f) +(1 row) + +SELECT * FROM _timescaledb_functions.get_create_command('test_table_int'); + get_create_command +-------------------------------------------------------------------------------------------------------------------- + SELECT create_hypertable('public.test_table_int', 'id', chunk_time_interval => 10, create_default_indexes=>FALSE); +(1 row) + +-- Should throw an error when if_not_exists is not set +\set ON_ERROR_STOP 0 +SELECT create_hypertable('test_table_int', by_range('id', 10)); +ERROR: table "test_table_int" is already a hypertable +\set ON_ERROR_STOP 1 +DROP TABLE test_table_int; +-- Add dimension +CREATE TABLE test_table_int(id INTEGER, device INTEGER, time TIMESTAMPTZ); +SELECT create_hypertable('test_table_int', by_range('id', 10), migrate_data => true); +NOTICE: adding not-null constraint to column "id" + create_hypertable +------------------- + (10,t) +(1 row) + +INSERT INTO test_table_int SELECT t, t%10, '01-01-2023 11:00'::TIMESTAMPTZ FROM generate_series(1, 50, 1) t; +SELECT add_dimension('test_table_int', by_hash('device', number_partitions => 2)); + add_dimension +--------------- + (11,t) +(1 row) + +SELECT hypertable_name, dimension_number, column_name FROM timescaledb_information.dimensions WHERE hypertable_name = 'test_table_int'; + hypertable_name | dimension_number | column_name +-----------------+------------------+------------- + test_table_int | 1 | id + test_table_int | 2 | device +(2 rows) + +SELECT count(*) FROM timescaledb_information.chunks WHERE hypertable_name='test_table_int'; + count +------- + 6 +(1 row) + +SELECT set_partitioning_interval('test_table_int', 5, 'id'); + set_partitioning_interval +--------------------------- + +(1 row) + +SELECT set_number_partitions('test_table_int', 3, 'device'); + set_number_partitions +----------------------- + +(1 row) + +SELECT integer_interval, num_partitions + FROM timescaledb_information.dimensions + WHERE column_name in ('id', 'device'); + integer_interval | num_partitions +------------------+---------------- + 5 | + | 3 +(2 rows) + +DROP TABLE test_table_int; +-- Hypertable with time dimension using new API +CREATE TABLE test_time(time TIMESTAMP NOT NULL, device INT, temp FLOAT); +SELECT create_hypertable('test_time', by_range('time')); +WARNING: column type "timestamp without time zone" used for "time" does not follow best practices + create_hypertable +------------------- + (11,t) +(1 row) + +-- Default interval +SELECT time_interval FROM timescaledb_information.dimensions WHERE hypertable_name = 'test_time'; + time_interval +--------------- + @ 7 days +(1 row) + +INSERT INTO test_time SELECT t, (abs(timestamp_hash(t::timestamp)) % 10) + 1, 0.10 FROM generate_series('2018-03-02 1:00'::TIMESTAMPTZ, '2018-03-08 1:00', '1 hour') t; +SELECT count(*) FROM timescaledb_information.chunks WHERE hypertable_name='test_time'; + count +------- + 2 +(1 row) + +SELECT add_dimension('test_time', by_range('device', partition_interval => 2)); +NOTICE: adding not-null constraint to column "device" + add_dimension +--------------- + (13,t) +(1 row) + +SELECT hypertable_name, dimension_number, column_name FROM timescaledb_information.dimensions WHERE hypertable_name = 'test_time'; + hypertable_name | dimension_number | column_name +-----------------+------------------+------------- + test_time | 1 | time + test_time | 2 | device +(2 rows) + +SELECT set_partitioning_interval('test_time', INTERVAL '1 day', 'time'); + set_partitioning_interval +--------------------------- + +(1 row) + +SELECT time_interval FROM timescaledb_information.dimensions WHERE hypertable_name = 'test_time' AND column_name = 'time'; + time_interval +--------------- + @ 1 day +(1 row) + +DROP TABLE test_time; diff --git a/tsl/test/shared/expected/extension.out b/tsl/test/shared/expected/extension.out index 23d851e5f5d..6bbc3104a57 100644 --- a/tsl/test/shared/expected/extension.out +++ b/tsl/test/shared/expected/extension.out @@ -63,6 +63,8 @@ ORDER BY pronamespace::regnamespace::text COLLATE "C", p.oid::regprocedure::text _timescaledb_functions.data_node_compressed_chunk_stats(name,name,name) _timescaledb_functions.data_node_hypertable_info(name,name,name) _timescaledb_functions.data_node_index_size(name,name,name) + _timescaledb_functions.dimension_info_in(cstring) + _timescaledb_functions.dimension_info_out(_timescaledb_internal.dimension_info) _timescaledb_functions.drop_chunk(regclass) _timescaledb_functions.drop_dist_ht_invalidation_trigger(integer) _timescaledb_functions.drop_stale_chunks(name,integer[]) @@ -260,6 +262,7 @@ ORDER BY pronamespace::regnamespace::text COLLATE "C", p.oid::regprocedure::text add_compression_policy(regclass,"any",boolean,interval,timestamp with time zone,text) add_continuous_aggregate_policy(regclass,"any","any",interval,boolean,timestamp with time zone,text) add_data_node(name,text,name,integer,boolean,boolean,text) + add_dimension(regclass,_timescaledb_internal.dimension_info,boolean) add_dimension(regclass,name,integer,anyelement,regproc,boolean) add_job(regproc,interval,jsonb,timestamp with time zone,boolean,regproc,boolean,text) add_reorder_policy(regclass,name,boolean,timestamp with time zone,text) @@ -269,12 +272,15 @@ ORDER BY pronamespace::regnamespace::text COLLATE "C", p.oid::regprocedure::text approximate_row_count(regclass) attach_data_node(name,regclass,boolean,boolean) attach_tablespace(name,regclass,boolean) + by_hash(name,integer,regproc) + by_range(name,anyelement,regproc) cagg_migrate(regclass,boolean,boolean) chunk_compression_stats(regclass) chunks_detailed_size(regclass) compress_chunk(regclass,boolean) create_distributed_hypertable(regclass,name,name,integer,name,name,anyelement,boolean,boolean,regproc,boolean,text,regproc,regproc,integer,name[]) create_distributed_restore_point(text) + create_hypertable(regclass,_timescaledb_internal.dimension_info,boolean,boolean,boolean) create_hypertable(regclass,name,name,integer,name,name,anyelement,boolean,boolean,regproc,boolean,text,regproc,regproc,integer,name[],boolean) decompress_chunk(regclass,boolean) delete_data_node(name,boolean,boolean,boolean,boolean) @@ -310,6 +316,7 @@ ORDER BY pronamespace::regnamespace::text COLLATE "C", p.oid::regprocedure::text set_chunk_time_interval(regclass,anyelement,name) set_integer_now_func(regclass,regproc,boolean) set_number_partitions(regclass,integer,name) + set_partitioning_interval(regclass,anyelement,name) set_replication_factor(regclass,integer) show_chunks(regclass,"any","any") show_tablespaces(regclass) diff --git a/tsl/test/sql/CMakeLists.txt b/tsl/test/sql/CMakeLists.txt index b6e8f6aa354..640e849f14a 100644 --- a/tsl/test/sql/CMakeLists.txt +++ b/tsl/test/sql/CMakeLists.txt @@ -83,6 +83,7 @@ if(CMAKE_BUILD_TYPE MATCHES Debug) debug_notice.sql decompress_vector_qual.sql deparse.sql + hypertable_generalization.sql insert_memory_usage.sql information_view_chunk_count.sql read_only.sql diff --git a/tsl/test/sql/hypertable_generalization.sql b/tsl/test/sql/hypertable_generalization.sql new file mode 100644 index 00000000000..43748160d41 --- /dev/null +++ b/tsl/test/sql/hypertable_generalization.sql @@ -0,0 +1,257 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. + +CREATE OR REPLACE FUNCTION part_func(id TEXT) + RETURNS INTEGER LANGUAGE PLPGSQL IMMUTABLE AS +$BODY$ +DECLARE + retval INTEGER; +BEGIN + retval := CAST(id AS INTEGER); + RETURN retval; +END +$BODY$; + +SELECT by_range('id'); +SELECT by_range('id', partition_func => 'part_func'); +SELECT by_range('id', '1 week'::interval); +SELECT by_range('id', '1 week'::interval, 'part_func'::regproc); +SELECT by_hash('id', 3); +SELECT by_hash('id', 3, partition_func => 'part_func'); + +\set ON_ERROR_STOP 0 +SELECT 'hash//id//3//-'::_timescaledb_internal.dimension_info; +\set ON_ERROR_STOP 1 + +-- Validate generalized hypertable for smallint +CREATE TABLE test_table_smallint(id SMALLINT, device INTEGER, time TIMESTAMPTZ); +SELECT create_hypertable('test_table_smallint', by_range('id')); + +-- default interval +SELECT integer_interval FROM timescaledb_information.dimensions WHERE hypertable_name = 'test_table_smallint'; + +-- Add data with default partition (10000) +INSERT INTO test_table_smallint VALUES (1, 10, '01-01-2023 11:00'::TIMESTAMPTZ); +INSERT INTO test_table_smallint VALUES (9999, 10, '01-01-2023 11:00'::TIMESTAMPTZ); +INSERT INTO test_table_smallint VALUES (10000, 10, '01-01-2023 11:00'::TIMESTAMPTZ); +INSERT INTO test_table_smallint VALUES (20000, 10, '01-01-2023 11:00'::TIMESTAMPTZ); + +-- Number of chunks +SELECT count(*) FROM timescaledb_information.chunks WHERE hypertable_name='test_table_smallint'; + +-- Validate generalized hypertable for int +CREATE TABLE test_table_int(id INTEGER, device INTEGER, time TIMESTAMPTZ); +SELECT create_hypertable('test_table_int', by_range('id')); + +-- Default interval +SELECT integer_interval FROM timescaledb_information.dimensions WHERE hypertable_name = 'test_table_int'; + +-- Add data +INSERT INTO test_table_int VALUES (1, 10, '01-01-2023 11:00'::TIMESTAMPTZ); +INSERT INTO test_table_int VALUES (99999, 10, '01-01-2023 11:00'::TIMESTAMPTZ); +INSERT INTO test_table_int VALUES (100000, 10, '01-01-2023 11:00'::TIMESTAMPTZ); +INSERT INTO test_table_int VALUES (200000, 10, '01-01-2023 11:00'::TIMESTAMPTZ); + +-- Number of chunks +SELECT count(*) FROM timescaledb_information.chunks WHERE hypertable_name='test_table_int'; + +-- Validate generalized hypertable for bigint +CREATE TABLE test_table_bigint(id BIGINT, device INTEGER, time TIMESTAMPTZ); +SELECT create_hypertable('test_table_bigint', by_range('id')); + +-- Default interval +SELECT integer_interval FROM timescaledb_information.dimensions WHERE hypertable_name = 'test_table_bigint'; + +-- Add data +INSERT INTO test_table_bigint VALUES (1, 10, '01-01-2023 11:00'::TIMESTAMPTZ); +INSERT INTO test_table_bigint VALUES (999999, 10, '01-01-2023 11:00'::TIMESTAMPTZ); +INSERT INTO test_table_bigint VALUES (1000000, 10, '01-01-2023 11:00'::TIMESTAMPTZ); +INSERT INTO test_table_bigint VALUES (2000000, 10, '01-01-2023 11:00'::TIMESTAMPTZ); + +-- Number of chunks +SELECT count(*) FROM timescaledb_information.chunks WHERE hypertable_name='test_table_bigint'; + +DROP TABLE test_table_smallint; +DROP TABLE test_table_int; +DROP TABLE test_table_bigint; + +-- Create hypertable with SERIAL column +CREATE TABLE jobs_serial (job_id SERIAL, device_id INTEGER, start_time TIMESTAMPTZ, end_time TIMESTAMPTZ, PRIMARY KEY (job_id)); +SELECT create_hypertable('jobs_serial', by_range('job_id', partition_interval => 30)); + +-- Insert data +INSERT INTO jobs_serial (device_id, start_time, end_time) +SELECT abs(timestamp_hash(t::timestamp)) % 10, t, t + INTERVAL '1 day' +FROM generate_series('2018-03-02 1:00'::TIMESTAMPTZ, '2018-03-08 1:00':: TIMESTAMPTZ,'1 hour')t; + +-- Verify chunk pruning +EXPLAIN VERBOSE SELECT * FROM jobs_serial WHERE job_id < 30; +EXPLAIN VERBOSE SELECT * FROM jobs_serial WHERE job_id >= 30 AND job_id < 90; +EXPLAIN VERBOSE SELECT * FROM jobs_serial WHERE job_id > 90; + +-- Update rows +UPDATE jobs_serial SET end_time = end_time + INTERVAL '1 hour' where job_id = 1; +UPDATE jobs_serial SET end_time = end_time + INTERVAL '1 hour' where job_id = 30; +UPDATE jobs_serial SET end_time = end_time + INTERVAL '1 hour' where job_id = 90; + +SELECT start_time, end_time FROM jobs_serial WHERE job_id = 1; +SELECT start_time, end_time FROM jobs_serial WHERE job_id = 30; +SELECT start_time, end_time FROM jobs_serial WHERE job_id = 90; + +-- Test delete rows + +-- Existing tuple counts. We saves these and compare with the values +-- after running the delete. +CREATE TABLE counts AS SELECT + (SELECT count(*) FROM jobs_serial) AS total_count, + (SELECT count(*) FROM jobs_serial WHERE job_id < 10) AS remove_count; + +-- Perform the delete +DELETE FROM jobs_serial WHERE job_id < 10; + +-- Ensure only the intended tuples are deleted. The two counts should be equal. +SELECT + (SELECT total_count FROM counts) - (SELECT count(*) FROM jobs_serial) AS total_removed, + (SELECT remove_count FROM counts) - (SELECT count(*) FROM jobs_serial WHERE job_id < 10) AS matching_removed; + +DROP TABLE jobs_serial; +DROP TABLE counts; + +-- Create and validate hypertable with BIGSERIAL column +CREATE TABLE jobs_big_serial (job_id BIGSERIAL, device_id INTEGER, start_time TIMESTAMPTZ, end_time TIMESTAMPTZ, PRIMARY KEY (job_id)); +SELECT create_hypertable('jobs_big_serial', by_range('job_id', 100)); + +-- Insert data +INSERT INTO jobs_big_serial (device_id, start_time, end_time) +SELECT abs(timestamp_hash(t::timestamp)) % 10, t, t + INTERVAL '1 day' +FROM generate_series('2018-03-02 1:00'::TIMESTAMPTZ, '2018-03-08 1:00'::TIMESTAMPTZ,'30 mins')t; + +-- Verify #chunks +SELECT count(*) FROM timescaledb_information.chunks; + +-- Get current sequence and verify updating sequence +SELECT currval(pg_get_serial_sequence('jobs_big_serial', 'job_id')); + +-- Update sequence value to 500 +SELECT setval(pg_get_serial_sequence('jobs_big_serial', 'job_id'), 500, false); + +-- Insert few rows and verify that the next sequence starts from 500 +INSERT INTO jobs_big_serial (device_id, start_time, end_time) +SELECT abs(timestamp_hash(t::timestamp)) % 10, t, t + INTERVAL '1 day' +FROM generate_series('2018-03-09 1:00'::TIMESTAMPTZ, '2018-03-10 1:00'::TIMESTAMPTZ,'30 mins')t; + +-- No data should exist for job_id >= 290 to job_id < 500 +SELECT count(*) FROM jobs_big_serial WHERE job_id >= 290 AND job_id < 500; + +-- The new rows should be added with job_id > 500 +SELECT count(*) from jobs_big_serial WHERE job_id > 500; + +-- Verify show_chunks API +SELECT show_chunks('jobs_big_serial', older_than => 100); +SELECT show_chunks('jobs_big_serial', newer_than => 200, older_than => 300); +SELECT show_chunks('jobs_big_serial', newer_than => 500); + +-- Verify drop_chunks API +SELECT count(*) FROM timescaledb_information.chunks WHERE hypertable_name = 'jobs_big_serial'; + +SELECT drop_chunks('jobs_big_serial', newer_than => 500); +SELECT count(*) FROM timescaledb_information.chunks WHERE hypertable_name = 'jobs_big_serial'; + +SELECT drop_chunks('jobs_big_serial', newer_than => 200, older_than => 300); +SELECT count(*) FROM timescaledb_information.chunks WHERE hypertable_name = 'jobs_big_serial'; + +DROP TABLE jobs_big_serial; + +-- Verify partition function +CREATE TABLE test_table_int(id TEXT, device INTEGER, time TIMESTAMPTZ); +SELECT create_hypertable('test_table_int', by_range('id', 10, partition_func => 'part_func')); + +INSERT INTO test_table_int VALUES('1', 1, '01-01-2023 11:00'::TIMESTAMPTZ); +INSERT INTO test_table_int VALUES('10', 10, '01-01-2023 11:00'::TIMESTAMPTZ); +INSERT INTO test_table_int VALUES('29', 100, '01-01-2023 11:00'::TIMESTAMPTZ); + +SELECT count(*) FROM timescaledb_information.chunks WHERE hypertable_name = 'test_table_int'; + +DROP TABLE test_table_int; +DROP FUNCTION part_func; + +-- Migrate data +CREATE TABLE test_table_int(id INTEGER, device INTEGER, time TIMESTAMPTZ); +INSERT INTO test_table_int SELECT t, t%10, '01-01-2023 11:00'::TIMESTAMPTZ FROM generate_series(1, 50, 1) t; + +SELECT create_hypertable('test_table_int', by_range('id', 10), migrate_data => true); + +-- Show default indexes created for hypertables. +SELECT indexname FROM pg_indexes WHERE tablename = 'test_table_int'; + +SELECT count(*) FROM timescaledb_information.chunks WHERE hypertable_name = 'test_table_int'; + +DROP TABLE test_table_int; + +-- create_hypertable without default indexes +CREATE TABLE test_table_int(id INTEGER, device INTEGER, time TIMESTAMPTZ); + +SELECT create_hypertable('test_table_int', by_range('id', 10), create_default_indexes => false); + +SELECT indexname FROM pg_indexes WHERE tablename = 'test_table_int'; + +DROP TABLE test_table_int; + +-- if_not_exists +CREATE TABLE test_table_int(id INTEGER, device INTEGER, time TIMESTAMPTZ); + +SELECT create_hypertable('test_table_int', by_range('id', 10)); + +-- No error when if_not_exists => true +SELECT create_hypertable('test_table_int', by_range('id', 10), if_not_exists => true); +SELECT * FROM _timescaledb_functions.get_create_command('test_table_int'); + +-- Should throw an error when if_not_exists is not set +\set ON_ERROR_STOP 0 +SELECT create_hypertable('test_table_int', by_range('id', 10)); +\set ON_ERROR_STOP 1 + +DROP TABLE test_table_int; + +-- Add dimension +CREATE TABLE test_table_int(id INTEGER, device INTEGER, time TIMESTAMPTZ); +SELECT create_hypertable('test_table_int', by_range('id', 10), migrate_data => true); + +INSERT INTO test_table_int SELECT t, t%10, '01-01-2023 11:00'::TIMESTAMPTZ FROM generate_series(1, 50, 1) t; + +SELECT add_dimension('test_table_int', by_hash('device', number_partitions => 2)); + +SELECT hypertable_name, dimension_number, column_name FROM timescaledb_information.dimensions WHERE hypertable_name = 'test_table_int'; + +SELECT count(*) FROM timescaledb_information.chunks WHERE hypertable_name='test_table_int'; + +SELECT set_partitioning_interval('test_table_int', 5, 'id'); +SELECT set_number_partitions('test_table_int', 3, 'device'); + +SELECT integer_interval, num_partitions + FROM timescaledb_information.dimensions + WHERE column_name in ('id', 'device'); + +DROP TABLE test_table_int; + +-- Hypertable with time dimension using new API +CREATE TABLE test_time(time TIMESTAMP NOT NULL, device INT, temp FLOAT); +SELECT create_hypertable('test_time', by_range('time')); + +-- Default interval +SELECT time_interval FROM timescaledb_information.dimensions WHERE hypertable_name = 'test_time'; + +INSERT INTO test_time SELECT t, (abs(timestamp_hash(t::timestamp)) % 10) + 1, 0.10 FROM generate_series('2018-03-02 1:00'::TIMESTAMPTZ, '2018-03-08 1:00', '1 hour') t; + +SELECT count(*) FROM timescaledb_information.chunks WHERE hypertable_name='test_time'; + +SELECT add_dimension('test_time', by_range('device', partition_interval => 2)); + +SELECT hypertable_name, dimension_number, column_name FROM timescaledb_information.dimensions WHERE hypertable_name = 'test_time'; + +SELECT set_partitioning_interval('test_time', INTERVAL '1 day', 'time'); + +SELECT time_interval FROM timescaledb_information.dimensions WHERE hypertable_name = 'test_time' AND column_name = 'time'; + +DROP TABLE test_time;