From ac761085a10b5d75ae7aae1f46c6056d0ee5b500 Mon Sep 17 00:00:00 2001 From: Konstantina Skovola Date: Thu, 31 Aug 2023 11:25:47 +0300 Subject: [PATCH] Add API function for updating OSM chunk ranges This commit introduces a function `dimension_slice_update_range_for_osm` in the _timescaledb_functions schema. This function is meant to serve as an API call for the OSM extension to update the time range of a hypertable's OSM chunk with the min and max values present in the contiguous time range its tiered chunks span. If the range is not contiguous, then it must be set to the invalid range an OSM chunk is assigned upon creation. A new status field is also introduced in the hypertable catalog table to keep track of whether the ranges covered by tiered and non-tiered chunks overlap. When there is no overlap detected then it is possible to apply the Ordered Append optimization in the presence of OSM chunks. --- .unreleased/PR_6036 | 1 + cmake/ScriptFiles.cmake | 3 +- sql/osm_api.sql | 14 + sql/pre_install/tables.sql | 1 + sql/updates/latest-dev.sql | 151 ++++++++ sql/updates/reverse-dev.sql | 164 +++++++++ src/chunk.c | 23 ++ src/chunk.h | 2 + src/dimension_slice.c | 75 ++++ src/dimension_slice.h | 6 + src/hypertable.c | 129 +++++++ src/nodes/chunk_dispatch/chunk_dispatch.c | 27 ++ src/planner/planner.c | 10 +- src/ts_catalog/catalog.h | 23 ++ test/expected/alter.out | 6 +- test/expected/alternate_users.out | 12 +- test/expected/create_hypertable.out | 18 +- test/expected/ddl-13.out | 8 +- test/expected/ddl-14.out | 8 +- test/expected/ddl-15.out | 8 +- test/expected/drop_extension.out | 12 +- test/expected/drop_hypertable.out | 16 +- test/expected/drop_owned.out | 18 +- test/expected/drop_rename_hypertable.out | 16 +- test/expected/drop_schema.out | 20 +- test/expected/dump_meta.out | 6 +- test/expected/pg_dump.out | 12 +- test/expected/relocate_extension.out | 10 +- test/expected/truncate.out | 12 +- tsl/test/expected/chunk_utils_internal.out | 391 +++++++++++++++++++-- tsl/test/shared/expected/extension.out | 1 + tsl/test/sql/chunk_utils_internal.sql | 159 ++++++++- 32 files changed, 1230 insertions(+), 132 deletions(-) create mode 100644 .unreleased/PR_6036 create mode 100644 sql/osm_api.sql diff --git a/.unreleased/PR_6036 b/.unreleased/PR_6036 new file mode 100644 index 00000000000..313457f45fb --- /dev/null +++ b/.unreleased/PR_6036 @@ -0,0 +1 @@ +Implements: #6036 Add API function for updating OSM chunk ranges diff --git a/cmake/ScriptFiles.cmake b/cmake/ScriptFiles.cmake index 5a07a7adc12..657d19cf8fd 100644 --- a/cmake/ScriptFiles.cmake +++ b/cmake/ScriptFiles.cmake @@ -59,7 +59,8 @@ set(SOURCE_FILES policy_internal.sql cagg_utils.sql cagg_migrate.sql - job_error_log_retention.sql) + job_error_log_retention.sql + osm_api.sql) if(ENABLE_DEBUG_UTILS AND CMAKE_BUILD_TYPE MATCHES Debug) list(APPEND SOURCE_FILES debug_utils.sql) diff --git a/sql/osm_api.sql b/sql/osm_api.sql new file mode 100644 index 00000000000..b0f7bdcc643 --- /dev/null +++ b/sql/osm_api.sql @@ -0,0 +1,14 @@ +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. + +-- This function updates the dimension slice range stored in the catalog with the min and max +-- values that the OSM chunk contains. Since there is only one OSM chunk per hypertable with +-- only a time dimension, the hypertable is used to determine the corresponding slice +CREATE OR REPLACE FUNCTION _timescaledb_functions.hypertable_osm_range_update( + hypertable REGCLASS, + range_start ANYELEMENT = NULL::bigint, + range_end ANYELEMENT = NULL, + empty BOOL = false +) RETURNS BOOL AS '@MODULE_PATHNAME@', +'ts_hypertable_osm_range_update' LANGUAGE C VOLATILE; diff --git a/sql/pre_install/tables.sql b/sql/pre_install/tables.sql index 6826dc5a531..0f4fb67299f 100644 --- a/sql/pre_install/tables.sql +++ b/sql/pre_install/tables.sql @@ -52,6 +52,7 @@ CREATE TABLE _timescaledb_catalog.hypertable ( compression_state smallint NOT NULL DEFAULT 0, compressed_hypertable_id integer, replication_factor smallint NULL, + status int NOT NULL DEFAULT 0, -- table constraints CONSTRAINT hypertable_pkey PRIMARY KEY (id), CONSTRAINT hypertable_associated_schema_name_associated_table_prefix_key UNIQUE (associated_schema_name, associated_table_prefix), diff --git a/sql/updates/latest-dev.sql b/sql/updates/latest-dev.sql index 01b65d15383..ca1f10bfc9b 100644 --- a/sql/updates/latest-dev.sql +++ b/sql/updates/latest-dev.sql @@ -174,3 +174,154 @@ FROM _timescaledb_catalog.chunk_constraint cc INNER JOIN _timescaledb_catalog.chunk c ON c.id = cc.chunk_id AND c.osm_chunk WHERE cc.dimension_slice_id = ds.id AND ds.range_start <> 9223372036854775806; +-- OSM support - table must be rebuilt to ensure consistent attribute numbers +-- we cannot just ALTER TABLE .. ADD COLUMN +ALTER TABLE _timescaledb_config.bgw_job + DROP CONSTRAINT bgw_job_hypertable_id_fkey; +ALTER TABLE _timescaledb_catalog.chunk + DROP CONSTRAINT chunk_hypertable_id_fkey; +ALTER TABLE _timescaledb_catalog.chunk_index + DROP CONSTRAINT chunk_index_hypertable_id_fkey; +ALTER TABLE _timescaledb_catalog.continuous_agg + DROP CONSTRAINT continuous_agg_mat_hypertable_id_fkey, + DROP CONSTRAINT continuous_agg_raw_hypertable_id_fkey; +ALTER TABLE _timescaledb_catalog.continuous_aggs_bucket_function + DROP CONSTRAINT continuous_aggs_bucket_function_mat_hypertable_id_fkey; +ALTER TABLE _timescaledb_catalog.continuous_aggs_invalidation_threshold + DROP CONSTRAINT continuous_aggs_invalidation_threshold_hypertable_id_fkey; +ALTER TABLE _timescaledb_catalog.dimension + DROP CONSTRAINT dimension_hypertable_id_fkey; +ALTER TABLE _timescaledb_catalog.hypertable + DROP CONSTRAINT hypertable_compressed_hypertable_id_fkey; +ALTER TABLE _timescaledb_catalog.hypertable_compression + DROP CONSTRAINT hypertable_compression_hypertable_id_fkey; +ALTER TABLE _timescaledb_catalog.hypertable_data_node + DROP CONSTRAINT hypertable_data_node_hypertable_id_fkey; +ALTER TABLE _timescaledb_catalog.tablespace + DROP CONSTRAINT tablespace_hypertable_id_fkey; + +DROP VIEW IF EXISTS timescaledb_information.hypertables; +DROP VIEW IF EXISTS timescaledb_information.job_stats; +DROP VIEW IF EXISTS timescaledb_information.jobs; +DROP VIEW IF EXISTS timescaledb_information.continuous_aggregates; +DROP VIEW IF EXISTS timescaledb_information.chunks; +DROP VIEW IF EXISTS timescaledb_information.dimensions; +DROP VIEW IF EXISTS timescaledb_information.compression_settings; +DROP VIEW IF EXISTS _timescaledb_internal.hypertable_chunk_local_size; +DROP VIEW IF EXISTS _timescaledb_internal.compressed_chunk_stats; +DROP VIEW IF EXISTS timescaledb_experimental.chunk_replication_status; +DROP VIEW IF EXISTS timescaledb_experimental.policies; + +-- recreate table +CREATE TABLE _timescaledb_catalog.hypertable_tmp AS SELECT * FROM _timescaledb_catalog.hypertable; +CREATE TABLE _timescaledb_catalog.tmp_hypertable_seq_value AS SELECT last_value, is_called FROM _timescaledb_catalog.hypertable_id_seq; + +ALTER EXTENSION timescaledb DROP TABLE _timescaledb_catalog.hypertable; +ALTER EXTENSION timescaledb DROP SEQUENCE _timescaledb_catalog.hypertable_id_seq; + +SET timescaledb.restoring = on; -- must disable the hooks otherwise we can't do anything without the table _timescaledb_catalog.hypertable + +DROP TABLE _timescaledb_catalog.hypertable; + +CREATE SEQUENCE _timescaledb_catalog.hypertable_id_seq MINVALUE 1; +SELECT setval('_timescaledb_catalog.hypertable_id_seq', last_value, is_called) FROM _timescaledb_catalog.tmp_hypertable_seq_value; +DROP TABLE _timescaledb_catalog.tmp_hypertable_seq_value; + +CREATE TABLE _timescaledb_catalog.hypertable ( + id INTEGER PRIMARY KEY NOT NULL DEFAULT nextval('_timescaledb_catalog.hypertable_id_seq'), + schema_name name NOT NULL, + table_name name NOT NULL, + associated_schema_name name NOT NULL, + associated_table_prefix name NOT NULL, + num_dimensions smallint NOT NULL, + chunk_sizing_func_schema name NOT NULL, + chunk_sizing_func_name name NOT NULL, + chunk_target_size bigint NOT NULL, -- size in bytes + compression_state smallint NOT NULL DEFAULT 0, + compressed_hypertable_id integer, + replication_factor smallint NULL, + status integer NOT NULL DEFAULT 0 +); + +SET timescaledb.restoring = off; + +INSERT INTO _timescaledb_catalog.hypertable ( + id, + schema_name, + table_name, + associated_schema_name, + associated_table_prefix, + num_dimensions, + chunk_sizing_func_schema, + chunk_sizing_func_name, + chunk_target_size, + compression_state, + compressed_hypertable_id, + replication_factor +) +SELECT + id, + schema_name, + table_name, + associated_schema_name, + associated_table_prefix, + num_dimensions, + chunk_sizing_func_schema, + chunk_sizing_func_name, + chunk_target_size, + compression_state, + compressed_hypertable_id, + replication_factor +FROM + _timescaledb_catalog.hypertable_tmp +ORDER BY id; + +UPDATE _timescaledb_catalog.hypertable h +SET status = 3 +WHERE EXISTS ( + SELECT FROM _timescaledb_catalog.chunk c WHERE c.osm_chunk AND c.hypertable_id = h.id +); + +ALTER SEQUENCE _timescaledb_catalog.hypertable_id_seq OWNED BY _timescaledb_catalog.hypertable.id; +SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.hypertable', 'WHERE id >= 1'); +SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.hypertable_id_seq', ''); + +GRANT SELECT ON _timescaledb_catalog.hypertable TO PUBLIC; +GRANT SELECT ON _timescaledb_catalog.hypertable_id_seq TO PUBLIC; + +DROP TABLE _timescaledb_catalog.hypertable_tmp; +-- now add any constraints +ALTER TABLE _timescaledb_catalog.hypertable + ADD CONSTRAINT hypertable_associated_schema_name_associated_table_prefix_key UNIQUE (associated_schema_name, associated_table_prefix), + ADD CONSTRAINT hypertable_table_name_schema_name_key UNIQUE (table_name, schema_name), + ADD CONSTRAINT hypertable_schema_name_check CHECK (schema_name != '_timescaledb_catalog'), + ADD CONSTRAINT hypertable_dim_compress_check CHECK (num_dimensions > 0 OR compression_state = 2), + ADD CONSTRAINT hypertable_chunk_target_size_check CHECK (chunk_target_size >= 0), + ADD CONSTRAINT hypertable_compress_check CHECK ( (compression_state = 0 OR compression_state = 1 ) OR (compression_state = 2 AND compressed_hypertable_id IS NULL)), + ADD CONSTRAINT hypertable_replication_factor_check CHECK (replication_factor > 0 OR replication_factor = -1), + ADD CONSTRAINT hypertable_compressed_hypertable_id_fkey FOREIGN KEY (compressed_hypertable_id) REFERENCES _timescaledb_catalog.hypertable (id); + +GRANT SELECT ON TABLE _timescaledb_catalog.hypertable TO PUBLIC; + +-- 3. reestablish constraints on other tables +ALTER TABLE _timescaledb_config.bgw_job + ADD CONSTRAINT bgw_job_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; +ALTER TABLE _timescaledb_catalog.chunk + ADD CONSTRAINT chunk_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id); +ALTER TABLE _timescaledb_catalog.chunk_index + ADD CONSTRAINT chunk_index_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; +ALTER TABLE _timescaledb_catalog.continuous_agg + ADD CONSTRAINT continuous_agg_mat_hypertable_id_fkey FOREIGN KEY (mat_hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE, + ADD CONSTRAINT continuous_agg_raw_hypertable_id_fkey FOREIGN KEY (raw_hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; +ALTER TABLE _timescaledb_catalog.continuous_aggs_bucket_function + ADD CONSTRAINT continuous_aggs_bucket_function_mat_hypertable_id_fkey FOREIGN KEY (mat_hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; +ALTER TABLE _timescaledb_catalog.continuous_aggs_invalidation_threshold + ADD CONSTRAINT continuous_aggs_invalidation_threshold_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; +ALTER TABLE _timescaledb_catalog.dimension + ADD CONSTRAINT dimension_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; +ALTER TABLE _timescaledb_catalog.hypertable_compression + ADD CONSTRAINT hypertable_compression_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; +ALTER TABLE _timescaledb_catalog.hypertable_data_node + ADD CONSTRAINT hypertable_data_node_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id); +ALTER TABLE _timescaledb_catalog.tablespace + ADD CONSTRAINT tablespace_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; diff --git a/sql/updates/reverse-dev.sql b/sql/updates/reverse-dev.sql index 0f17c6c39b5..0e4b8b779eb 100644 --- a/sql/updates/reverse-dev.sql +++ b/sql/updates/reverse-dev.sql @@ -273,3 +273,167 @@ ALTER FUNCTION _timescaledb_functions.finalize_agg_sfunc(internal,text,name,name ALTER FUNCTION _timescaledb_functions.partialize_agg(anyelement) SET SCHEMA _timescaledb_internal; ALTER AGGREGATE _timescaledb_functions.finalize_agg(text,name,name,name[][],bytea,anyelement) SET SCHEMA _timescaledb_internal; +DROP FUNCTION _timescaledb_functions.hypertable_osm_range_update(regclass, anyelement, anyelement, boolean); + +-- recreate the _timescaledb_catalog.hypertable table as new field was added +-- 1. drop CONSTRAINTS from other tables referencing the existing one +ALTER TABLE _timescaledb_config.bgw_job + DROP CONSTRAINT bgw_job_hypertable_id_fkey; +ALTER TABLE _timescaledb_catalog.chunk + DROP CONSTRAINT chunk_hypertable_id_fkey; +ALTER TABLE _timescaledb_catalog.chunk_index + DROP CONSTRAINT chunk_index_hypertable_id_fkey; +ALTER TABLE _timescaledb_catalog.continuous_agg + DROP CONSTRAINT continuous_agg_mat_hypertable_id_fkey, + DROP CONSTRAINT continuous_agg_raw_hypertable_id_fkey; +ALTER TABLE _timescaledb_catalog.continuous_aggs_bucket_function + DROP CONSTRAINT continuous_aggs_bucket_function_mat_hypertable_id_fkey; +ALTER TABLE _timescaledb_catalog.continuous_aggs_invalidation_threshold + DROP CONSTRAINT continuous_aggs_invalidation_threshold_hypertable_id_fkey; +ALTER TABLE _timescaledb_catalog.dimension + DROP CONSTRAINT dimension_hypertable_id_fkey; +ALTER TABLE _timescaledb_catalog.hypertable + DROP CONSTRAINT hypertable_compressed_hypertable_id_fkey; +ALTER TABLE _timescaledb_catalog.hypertable_compression + DROP CONSTRAINT hypertable_compression_hypertable_id_fkey; +ALTER TABLE _timescaledb_catalog.hypertable_data_node + DROP CONSTRAINT hypertable_data_node_hypertable_id_fkey; +ALTER TABLE _timescaledb_catalog.tablespace + DROP CONSTRAINT tablespace_hypertable_id_fkey; + +-- drop dependent views +ALTER EXTENSION timescaledb DROP VIEW timescaledb_information.hypertables; +ALTER EXTENSION timescaledb DROP VIEW timescaledb_information.job_stats; +ALTER EXTENSION timescaledb DROP VIEW timescaledb_information.jobs; +ALTER EXTENSION timescaledb DROP VIEW timescaledb_information.continuous_aggregates; +ALTER EXTENSION timescaledb DROP VIEW timescaledb_information.chunks; +ALTER EXTENSION timescaledb DROP VIEW timescaledb_information.dimensions; +ALTER EXTENSION timescaledb DROP VIEW timescaledb_information.compression_settings; +ALTER EXTENSION timescaledb DROP VIEW _timescaledb_internal.hypertable_chunk_local_size; +ALTER EXTENSION timescaledb DROP VIEW _timescaledb_internal.compressed_chunk_stats; +ALTER EXTENSION timescaledb DROP VIEW timescaledb_experimental.chunk_replication_status; +ALTER EXTENSION timescaledb DROP VIEW timescaledb_experimental.policies; + +DROP VIEW timescaledb_information.hypertables; +DROP VIEW timescaledb_information.job_stats; +DROP VIEW timescaledb_information.jobs; +DROP VIEW timescaledb_information.continuous_aggregates; +DROP VIEW timescaledb_information.chunks; +DROP VIEW timescaledb_information.dimensions; +DROP VIEW timescaledb_information.compression_settings; +DROP VIEW _timescaledb_internal.hypertable_chunk_local_size; +DROP VIEW _timescaledb_internal.compressed_chunk_stats; +DROP VIEW timescaledb_experimental.chunk_replication_status; +DROP VIEW timescaledb_experimental.policies; + +-- recreate table +CREATE TABLE _timescaledb_catalog.hypertable_tmp AS SELECT * FROM _timescaledb_catalog.hypertable; +CREATE TABLE _timescaledb_catalog.tmp_hypertable_seq_value AS SELECT last_value, is_called FROM _timescaledb_catalog.hypertable_id_seq; + +ALTER EXTENSION timescaledb DROP TABLE _timescaledb_catalog.hypertable; +ALTER EXTENSION timescaledb DROP SEQUENCE _timescaledb_catalog.hypertable_id_seq; + +SET timescaledb.restoring = on; -- must disable the hooks otherwise we can't do anything without the table _timescaledb_catalog.hypertable + +DROP TABLE _timescaledb_catalog.hypertable; + +CREATE SEQUENCE _timescaledb_catalog.hypertable_id_seq MINVALUE 1; +SELECT setval('_timescaledb_catalog.hypertable_id_seq', last_value, is_called) FROM _timescaledb_catalog.tmp_hypertable_seq_value; +DROP TABLE _timescaledb_catalog.tmp_hypertable_seq_value; + +CREATE TABLE _timescaledb_catalog.hypertable ( + id INTEGER PRIMARY KEY NOT NULL DEFAULT nextval('_timescaledb_catalog.hypertable_id_seq'), + schema_name name NOT NULL, + table_name name NOT NULL, + associated_schema_name name NOT NULL, + associated_table_prefix name NOT NULL, + num_dimensions smallint NOT NULL, + chunk_sizing_func_schema name NOT NULL, + chunk_sizing_func_name name NOT NULL, + chunk_target_size bigint NOT NULL, -- size in bytes + compression_state smallint NOT NULL DEFAULT 0, + compressed_hypertable_id integer, + replication_factor smallint NULL +); + +SET timescaledb.restoring = off; + +INSERT INTO _timescaledb_catalog.hypertable ( + id, + schema_name, + table_name, + associated_schema_name, + associated_table_prefix, + num_dimensions, + chunk_sizing_func_schema, + chunk_sizing_func_name, + chunk_target_size, + compression_state, + compressed_hypertable_id, + replication_factor +) +SELECT + id, + schema_name, + table_name, + associated_schema_name, + associated_table_prefix, + num_dimensions, + chunk_sizing_func_schema, + chunk_sizing_func_name, + chunk_target_size, + compression_state, + compressed_hypertable_id, + replication_factor +FROM + _timescaledb_catalog.hypertable_tmp +ORDER BY id; + +ALTER SEQUENCE _timescaledb_catalog.hypertable_id_seq OWNED BY _timescaledb_catalog.hypertable.id; +SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.hypertable', 'WHERE id >= 1'); +SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.hypertable_id_seq', ''); + +GRANT SELECT ON _timescaledb_catalog.hypertable TO PUBLIC; +GRANT SELECT ON _timescaledb_catalog.hypertable_id_seq TO PUBLIC; + +DROP TABLE _timescaledb_catalog.hypertable_tmp; +-- now add any constraints +ALTER TABLE _timescaledb_catalog.hypertable + -- ADD CONSTRAINT hypertable_pkey PRIMARY KEY (id), + ADD CONSTRAINT hypertable_associated_schema_name_associated_table_prefix_key UNIQUE (associated_schema_name, associated_table_prefix), + ADD CONSTRAINT hypertable_table_name_schema_name_key UNIQUE (table_name, schema_name), + ADD CONSTRAINT hypertable_schema_name_check CHECK (schema_name != '_timescaledb_catalog'), + -- internal compressed hypertables have compression state = 2 + ADD CONSTRAINT hypertable_dim_compress_check CHECK (num_dimensions > 0 OR compression_state = 2), + ADD CONSTRAINT hypertable_chunk_target_size_check CHECK (chunk_target_size >= 0), + ADD CONSTRAINT hypertable_compress_check CHECK ( (compression_state = 0 OR compression_state = 1 ) OR (compression_state = 2 AND compressed_hypertable_id IS NULL)), + -- replication_factor NULL: regular hypertable + -- replication_factor > 0: distributed hypertable on access node + -- replication_factor -1: distributed hypertable on data node, which is part of a larger table + ADD CONSTRAINT hypertable_replication_factor_check CHECK (replication_factor > 0 OR replication_factor = -1), + ADD CONSTRAINT hypertable_compressed_hypertable_id_fkey FOREIGN KEY (compressed_hypertable_id) REFERENCES _timescaledb_catalog.hypertable (id); + +GRANT SELECT ON TABLE _timescaledb_catalog.hypertable TO PUBLIC; + +-- 3. reestablish constraints on other tables +ALTER TABLE _timescaledb_config.bgw_job + ADD CONSTRAINT bgw_job_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; +ALTER TABLE _timescaledb_catalog.chunk + ADD CONSTRAINT chunk_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id); +ALTER TABLE _timescaledb_catalog.chunk_index + ADD CONSTRAINT chunk_index_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; +ALTER TABLE _timescaledb_catalog.continuous_agg + ADD CONSTRAINT continuous_agg_mat_hypertable_id_fkey FOREIGN KEY (mat_hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE, + ADD CONSTRAINT continuous_agg_raw_hypertable_id_fkey FOREIGN KEY (raw_hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; +ALTER TABLE _timescaledb_catalog.continuous_aggs_bucket_function + ADD CONSTRAINT continuous_aggs_bucket_function_mat_hypertable_id_fkey FOREIGN KEY (mat_hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; +ALTER TABLE _timescaledb_catalog.continuous_aggs_invalidation_threshold + ADD CONSTRAINT continuous_aggs_invalidation_threshold_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; +ALTER TABLE _timescaledb_catalog.dimension + ADD CONSTRAINT dimension_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; +ALTER TABLE _timescaledb_catalog.hypertable_compression + ADD CONSTRAINT hypertable_compression_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; +ALTER TABLE _timescaledb_catalog.hypertable_data_node + ADD CONSTRAINT hypertable_data_node_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id); +ALTER TABLE _timescaledb_catalog.tablespace + ADD CONSTRAINT tablespace_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; diff --git a/src/chunk.c b/src/chunk.c index bd7ff145614..4d37417e40a 100644 --- a/src/chunk.c +++ b/src/chunk.c @@ -4612,6 +4612,13 @@ add_foreign_table_as_chunk(Oid relid, Hypertable *parent_ht) ts_chunk_constraints_add_dimension_constraints(chunk->constraints, chunk->fd.id, chunk->cube); ts_chunk_constraints_insert_metadata(chunk->constraints); chunk_add_inheritance(chunk, parent_ht); + /* + * Update hypertable entry with tiering status information. + * Noncontiguous flag is not set since the chunk is empty upon creation, + * with an invalid range assigned, so ordered append should be allowed. + */ + parent_ht->fd.status = ts_set_flags_32(parent_ht->fd.status, HYPERTABLE_STATUS_OSM); + ts_hypertable_update(parent_ht); } void @@ -4867,3 +4874,19 @@ ts_chunk_get_osm_chunk_id(int hypertable_id) return chunk_id; } + +/* Upon creation, OSM chunks are assigned an invalid range [INT64_MAX -1, infinity) */ +bool +ts_osm_chunk_range_is_invalid(int64 range_start, int64 range_end) +{ + return ((range_end == PG_INT64_MAX) && (range_start == range_end - 1)); +} + +int32 +ts_chunk_get_osm_slice_id(int32 chunk_id, int32 time_dim_id) +{ + Chunk *chunk = ts_chunk_get_by_id(chunk_id, true); + const DimensionSlice *ds = ts_hypercube_get_slice_by_dimension_id(chunk->cube, time_dim_id); + const int slice_id = ds->fd.id; + return slice_id; +} diff --git a/src/chunk.h b/src/chunk.h index cc3c82fa45f..892bb06ab43 100644 --- a/src/chunk.h +++ b/src/chunk.h @@ -309,5 +309,7 @@ extern TSDLLEXPORT void ts_chunk_merge_on_dimension(const Hypertable *ht, Chunk #define CHUNK_STATUS_COMPRESSED_PARTIAL 8 extern TSDLLEXPORT bool ts_chunk_clear_status(Chunk *chunk, int32 status); +extern bool ts_osm_chunk_range_is_invalid(int64 range_start, int64 range_end); +extern int32 ts_chunk_get_osm_slice_id(int32 chunk_id, int32 time_dim_id); #endif /* TIMESCALEDB_CHUNK_H */ diff --git a/src/dimension_slice.c b/src/dimension_slice.c index 64fa6cc0e8b..0551e47146b 100644 --- a/src/dimension_slice.c +++ b/src/dimension_slice.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -1148,3 +1149,77 @@ ts_dimension_slice_get_chunkids_to_compress(int32 dimension_id, StrategyNumber s return chunk_ids; } + +/* This function checks for overlap between the range we want to update + for the OSM chunk and the chunks currently in timescaledb (not managed by OSM) + */ +bool +ts_osm_chunk_range_overlaps(int32 osm_dimension_slice_id, int32 dimension_id, int64 range_start, + int64 range_end) +{ + bool res; + DimensionVec *vec = dimension_slice_collision_scan(dimension_id, range_start, range_end); + /* there is only one dimension slice for the OSM chunk. The OSM chunk may not + * necessarily appear in the list of overlapping ranges because when first tiered, + * it is given a range [max, infinity) + */ + if (vec->num_slices >= 2 || + (vec->num_slices == 1 && vec->slices[0]->fd.id != osm_dimension_slice_id)) + res = true; + else + res = false; + pfree(vec); + return res; +} + +static ScanTupleResult +dimension_slice_tuple_update(TupleInfo *ti, void *data) +{ + bool should_free; + HeapTuple tuple = ts_scanner_fetch_heap_tuple(ti, false, &should_free); + FormData_dimension_slice *fd = (FormData_dimension_slice *) data; + + Datum values[Natts_dimension_slice] = { 0 }; + bool isnull[Natts_dimension_slice] = { 0 }; + bool doReplace[Natts_dimension_slice] = { 0 }; + + values[AttrNumberGetAttrOffset(Anum_dimension_slice_range_start)] = + Int64GetDatum(fd->range_start); + doReplace[AttrNumberGetAttrOffset(Anum_dimension_slice_range_start)] = true; + + values[AttrNumberGetAttrOffset(Anum_dimension_slice_range_end)] = Int64GetDatum(fd->range_end); + doReplace[AttrNumberGetAttrOffset(Anum_dimension_slice_range_end)] = true; + + HeapTuple new_tuple = + heap_modify_tuple(tuple, ts_scanner_get_tupledesc(ti), values, isnull, doReplace); + + ts_catalog_update(ti->scanrel, new_tuple); + + heap_freetuple(new_tuple); + if (should_free) + heap_freetuple(tuple); + + return SCAN_DONE; +} + +int +ts_dimension_slice_update_by_id(int32 dimension_slice_id, FormData_dimension_slice *fd_slice) +{ + ScanKeyData scankey[1]; + + ScanKeyInit(&scankey[0], + Anum_dimension_slice_id_idx_id, + BTEqualStrategyNumber, + F_INT4EQ, + Int32GetDatum(dimension_slice_id)); + + return dimension_slice_scan_limit_internal(DIMENSION_SLICE_ID_IDX, + scankey, + 1, + dimension_slice_tuple_update, + fd_slice, + 1, + RowExclusiveLock, + NULL, + CurrentMemoryContext); +} diff --git a/src/dimension_slice.h b/src/dimension_slice.h index 7790899ebc8..53922d1b079 100644 --- a/src/dimension_slice.h +++ b/src/dimension_slice.h @@ -101,6 +101,12 @@ extern int ts_dimension_slice_scan_iterator_set_range(ScanIterator *it, int32 di int64 start_value, StrategyNumber end_strategy, int64 end_value); +extern bool ts_osm_chunk_range_overlaps(int32 osm_dimension_slice_id, int32 dimension_id, + int64 range_start, int64 range_end); + +extern int ts_dimension_slice_update_by_id(int32 dimension_slice_id, + FormData_dimension_slice *fd_slice); + #define dimension_slice_insert(slice) ts_dimension_slice_insert_multi(&(slice), 1) #define dimension_slice_scan(dimension_id, coordinate, tuplock) \ diff --git a/src/hypertable.c b/src/hypertable.c index 2c40bef16d1..2ff9b2df4cd 100644 --- a/src/hypertable.c +++ b/src/hypertable.c @@ -172,6 +172,7 @@ hypertable_formdata_make_tuple(const FormData_hypertable *fd, TupleDesc desc) else values[AttrNumberGetAttrOffset(Anum_hypertable_replication_factor)] = Int16GetDatum(fd->replication_factor); + values[AttrNumberGetAttrOffset(Anum_hypertable_status)] = Int32GetDatum(fd->status); return heap_form_tuple(desc, values, nulls); } @@ -197,6 +198,7 @@ ts_hypertable_formdata_fill(FormData_hypertable *fd, const TupleInfo *ti) Assert(!nulls[AttrNumberGetAttrOffset(Anum_hypertable_chunk_sizing_func_name)]); Assert(!nulls[AttrNumberGetAttrOffset(Anum_hypertable_chunk_target_size)]); Assert(!nulls[AttrNumberGetAttrOffset(Anum_hypertable_compression_state)]); + Assert(!nulls[AttrNumberGetAttrOffset(Anum_hypertable_status)]); fd->id = DatumGetInt32(values[AttrNumberGetAttrOffset(Anum_hypertable_id)]); memcpy(&fd->schema_name, @@ -237,6 +239,7 @@ ts_hypertable_formdata_fill(FormData_hypertable *fd, const TupleInfo *ti) else fd->replication_factor = DatumGetInt16(values[AttrNumberGetAttrOffset(Anum_hypertable_replication_factor)]); + fd->status = DatumGetInt32(values[AttrNumberGetAttrOffset(Anum_hypertable_status)]); if (should_free) heap_freetuple(tuple); @@ -1003,6 +1006,9 @@ hypertable_insert(int32 hypertable_id, Name schema_name, Name table_name, /* when creating a hypertable, there is never an associated compressed dual */ fd.compressed_hypertable_id = INVALID_HYPERTABLE_ID; + /* new hypertable does not have OSM chunk */ + fd.status = HYPERTABLE_STATUS_DEFAULT; + /* finally, set replication factor */ fd.replication_factor = replication_factor; @@ -3014,3 +3020,126 @@ ts_hypertable_update_dimension_partitions(const Hypertable *ht) return false; } + +/* + * hypertable_osm_range_update + * 0 hypertable REGCLASS, + * 1 range_start=NULL::bigint, + * 2 range_end=NULL, + * 3 empty=false + * If empty is set to true then the range will be set to invalid range + * but the overlap flag will be unset, indicating that no data is managed + * by OSM and therefore timescaledb optimizations can be applied. + */ +TS_FUNCTION_INFO_V1(ts_hypertable_osm_range_update); +Datum +ts_hypertable_osm_range_update(PG_FUNCTION_ARGS) +{ + Oid relid = PG_ARGISNULL(0) ? InvalidOid : PG_GETARG_OID(0); + Hypertable *ht; + const Dimension *time_dim; + Cache *hcache; + + Oid time_type; /* required for resolving the argument types, should match the hypertable + partitioning column type */ + + hcache = ts_hypertable_cache_pin(); + ht = ts_resolve_hypertable_from_table_or_cagg(hcache, relid, true); + Assert(ht != NULL); + time_dim = hyperspace_get_open_dimension(ht->space, 0); + + if (time_dim == NULL) + elog(ERROR, + "could not find time dimension for hypertable %s.%s", + quote_identifier(NameStr(ht->fd.schema_name)), + quote_identifier(NameStr(ht->fd.table_name))); + + time_type = ts_dimension_get_partition_type(time_dim); + + int32 osm_chunk_id = ts_chunk_get_osm_chunk_id(ht->fd.id); + if (osm_chunk_id == INVALID_CHUNK_ID) + elog(ERROR, + "no OSM chunk found for hypertable %s.%s", + quote_identifier(NameStr(ht->fd.schema_name)), + quote_identifier(NameStr(ht->fd.table_name))); + + int32 dimension_slice_id = ts_chunk_get_osm_slice_id(osm_chunk_id, time_dim->fd.id); + + /* + * range_start, range_end arguments must be converted to internal representation + * a NULL start value is interpreted as INT64_MAX - 1 and a NULL end value is + * interpreted as INT64_MAX. + * Passing both start and end NULL values will reset the range to the default range an + * OSM chunk is given upon creation, which is [INT64_MAX - 1, INT64_MAX] + */ + if ((PG_ARGISNULL(1) && !PG_ARGISNULL(2)) || (!PG_ARGISNULL(1) && PG_ARGISNULL(2))) + elog(ERROR, "range_start and range_end parameters must be both NULL or both non-NULL"); + + int64 range_start_internal, range_end_internal; + if (PG_ARGISNULL(1)) + range_start_internal = PG_INT64_MAX - 1; + else + range_start_internal = ts_time_value_to_internal(PG_GETARG_DATUM(1), time_type); + if (PG_ARGISNULL(2)) + range_end_internal = PG_INT64_MAX; + else + range_end_internal = ts_time_value_to_internal(PG_GETARG_DATUM(2), time_type); + + if (range_start_internal > range_end_internal) + ereport(ERROR, errmsg("dimension slice range_end cannot be less than range_start")); + + bool osm_chunk_empty = PG_GETARG_BOOL(3); + + bool overlap = false, range_invalid = false; + + ScanTupLock tuplock = { + .lockmode = LockTupleExclusive, + .waitpolicy = LockWaitBlock, + }; + DimensionSlice *slice = + ts_dimension_slice_scan_by_id_and_lock(dimension_slice_id, &tuplock, CurrentMemoryContext); + + if (!slice) + ereport(ERROR, errmsg("could not find slice with id %d", dimension_slice_id)); + overlap = ts_osm_chunk_range_overlaps(dimension_slice_id, + slice->fd.dimension_id, + range_start_internal, + range_end_internal); + /* + * It should not be possible for OSM chunks to overlap with the range + * managed by timescaledb. OSM extension should update the range of the + * OSM chunk to [INT64_MAX -1, infinity) when it detects that it is + * noncontiguous, so we should not end up detecting overlaps anyway. + * But throw an error in case we encounter this situation. + */ + if (overlap) + ereport(ERROR, + errmsg("attempting to set overlapping range for tiered chunk of %s.%s", + NameStr(ht->fd.schema_name), + NameStr(ht->fd.table_name)), + errhint("Range should be set to invalid for tiered chunk")); + range_invalid = ts_osm_chunk_range_is_invalid(range_start_internal, range_end_internal); + /* Update the hypertable flags regarding the validity of the OSM range */ + if (range_invalid) + { + /* range is set to infinity so the OSM chunk is considered last */ + range_start_internal = PG_INT64_MAX - 1; + range_end_internal = PG_INT64_MAX; + if (!osm_chunk_empty) + ht->fd.status = + ts_set_flags_32(ht->fd.status, HYPERTABLE_STATUS_OSM_CHUNK_NONCONTIGUOUS); + else + ht->fd.status = + ts_clear_flags_32(ht->fd.status, HYPERTABLE_STATUS_OSM_CHUNK_NONCONTIGUOUS); + } + else + ht->fd.status = ts_clear_flags_32(ht->fd.status, HYPERTABLE_STATUS_OSM_CHUNK_NONCONTIGUOUS); + ts_hypertable_update(ht); + ts_cache_release(hcache); + + slice->fd.range_start = range_start_internal; + slice->fd.range_end = range_end_internal; + ts_dimension_slice_update_by_id(dimension_slice_id, &slice->fd); + + PG_RETURN_BOOL(overlap); +} diff --git a/src/nodes/chunk_dispatch/chunk_dispatch.c b/src/nodes/chunk_dispatch/chunk_dispatch.c index 395dd94b26a..1bdc93da855 100644 --- a/src/nodes/chunk_dispatch/chunk_dispatch.c +++ b/src/nodes/chunk_dispatch/chunk_dispatch.c @@ -23,6 +23,7 @@ #include "guc.h" #include "nodes/hypertable_modify.h" #include "ts_catalog/chunk_data_node.h" +#include "hypercube.h" static Node *chunk_dispatch_state_create(CustomScan *cscan); @@ -105,6 +106,32 @@ ts_chunk_dispatch_get_chunk_insert_state(ChunkDispatch *dispatch, Point *point, if (chunk && ts_chunk_is_frozen(chunk)) elog(ERROR, "cannot INSERT into frozen chunk \"%s\"", get_rel_name(chunk->table_id)); #endif + if (chunk && IS_OSM_CHUNK(chunk)) + { + const Dimension *time_dim = + hyperspace_get_open_dimension(dispatch->hypertable->space, 0); + Assert(time_dim != NULL); + + Oid outfuncid = InvalidOid; + bool isvarlena; + getTypeOutputInfo(time_dim->fd.column_type, &outfuncid, &isvarlena); + Assert(!isvarlena); + Datum start_ts = ts_internal_to_time_value(chunk->cube->slices[0]->fd.range_start, + time_dim->fd.column_type); + Datum end_ts = ts_internal_to_time_value(chunk->cube->slices[0]->fd.range_end, + time_dim->fd.column_type); + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Cannot insert into tiered chunk range of %s.%s - attempt to create " + "new chunk " + "with range [%s %s] failed", + NameStr(dispatch->hypertable->fd.schema_name), + NameStr(dispatch->hypertable->fd.table_name), + DatumGetCString(OidFunctionCall1(outfuncid, start_ts)), + DatumGetCString(OidFunctionCall1(outfuncid, end_ts))), + errhint( + "Hypertable has tiered data with time range that overlaps the insert"))); + } if (!chunk) { diff --git a/src/planner/planner.c b/src/planner/planner.c index e8bfaaf39a5..69d918515d0 100644 --- a/src/planner/planner.c +++ b/src/planner/planner.c @@ -911,11 +911,15 @@ should_chunk_append(Hypertable *ht, PlannerInfo *root, RelOptInfo *rel, Path *pa if (!ordered || path->pathkeys == NIL || list_length(merge->subpaths) == 0) return false; - /* cannot support ordered append with OSM chunks. OSM chunk - * ranges are not recorded with the catalog + /* + * Do not try to do ordered append if the OSM chunk range is noncontiguous */ if (ht && ts_chunk_get_osm_chunk_id(ht->fd.id) != INVALID_CHUNK_ID) - return false; + { + if (ts_flags_are_set_32(ht->fd.status, + HYPERTABLE_STATUS_OSM_CHUNK_NONCONTIGUOUS)) + return false; + } /* * If we only have 1 child node there is no need for the diff --git a/src/ts_catalog/catalog.h b/src/ts_catalog/catalog.h index a4b4b020bec..48f7e91d288 100644 --- a/src/ts_catalog/catalog.h +++ b/src/ts_catalog/catalog.h @@ -119,6 +119,7 @@ enum Anum_hypertable Anum_hypertable_compression_state, Anum_hypertable_compressed_hypertable_id, Anum_hypertable_replication_factor, + Anum_hypertable_status, _Anum_hypertable_max, }; @@ -138,6 +139,7 @@ typedef struct FormData_hypertable int16 compression_state; int32 compressed_hypertable_id; int16 replication_factor; + int32 status; } FormData_hypertable; typedef FormData_hypertable *Form_hypertable; @@ -1484,6 +1486,27 @@ typedef struct FormData_job_error typedef FormData_job_error *Form_job_error; +#define HYPERTABLE_STATUS_DEFAULT 0 +/* flag set when hypertable has an attached OSM chunk */ +#define HYPERTABLE_STATUS_OSM 1 +/* + * Currently, the time slice range metadata is updated in + * the timescaledb catalog with the min and max of the range managed by OSM. + * However, this range has to be contiguous in order to + * update our catalog with its min and max value. If it is not contiguous, + * then we cannot store the min and max in our catalog because tuple routing + * will not work properly with gaps in the range. + * When attempting to insert into one of the gaps, which do not in fact contain + * tiered data, we error out because this is perceived as an attempt to insert + * into tiered chunks, which are immutable. + * When the range is noncontiguous, we store [INT64_MAX - 1, INT64_MAX) and set + * this flag. + * This flag also serves to allow or block the ordered append optimization. When + * the range covered by OSM is contiguous, then it is possible to do ordered + * append. + */ +#define HYPERTABLE_STATUS_OSM_CHUNK_NONCONTIGUOUS 2 + extern void ts_catalog_table_info_init(CatalogTableInfo *tables, int max_table, const TableInfoDef *table_ary, const TableIndexDef *index_ary, const char **serial_id_ary); diff --git a/test/expected/alter.out b/test/expected/alter.out index 3c711c6ae2d..532cb07d2ba 100644 --- a/test/expected/alter.out +++ b/test/expected/alter.out @@ -675,9 +675,9 @@ ALTER SCHEMA my_associated_schema RENAME TO new_associated_schema; INSERT INTO my_table (date, quantity) VALUES ('2018-08-10T23:00:00+00:00', 20); -- Make sure the schema name is changed in both catalog tables SELECT * from _timescaledb_catalog.hypertable; - id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor -----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+-------------------- - 12 | public | my_table | new_associated_schema | _hyper_12 | 1 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | + id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor | status +----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+--------------------+-------- + 12 | public | my_table | new_associated_schema | _hyper_12 | 1 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | | 0 (1 row) SELECT * from _timescaledb_catalog.chunk; diff --git a/test/expected/alternate_users.out b/test/expected/alternate_users.out index 7f76da07bac..cbabf5911a7 100644 --- a/test/expected/alternate_users.out +++ b/test/expected/alternate_users.out @@ -135,12 +135,12 @@ SELECT * FROM create_hypertable('"customSchema"."Hypertable_1"', 'time', NULL, 1 (1 row) SELECT * FROM _timescaledb_catalog.hypertable; - id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor -----+--------------+---------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+-------------------- - 1 | public | one_Partition | one_Partition | _hyper_1 | 1 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | - 2 | public | 1dim | _timescaledb_internal | _hyper_2 | 1 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | - 3 | public | Hypertable_1 | _timescaledb_internal | _hyper_3 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | - 4 | customSchema | Hypertable_1 | _timescaledb_internal | _hyper_4 | 1 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | + id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor | status +----+--------------+---------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+--------------------+-------- + 1 | public | one_Partition | one_Partition | _hyper_1 | 1 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | | 0 + 2 | public | 1dim | _timescaledb_internal | _hyper_2 | 1 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | | 0 + 3 | public | Hypertable_1 | _timescaledb_internal | _hyper_3 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | | 0 + 4 | customSchema | Hypertable_1 | _timescaledb_internal | _hyper_4 | 1 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | | 0 (4 rows) CREATE INDEX ON PUBLIC."Hypertable_1" (time, "temp_c"); diff --git a/test/expected/create_hypertable.out b/test/expected/create_hypertable.out index 2d6d1081aa4..fe463f51e18 100644 --- a/test/expected/create_hypertable.out +++ b/test/expected/create_hypertable.out @@ -86,9 +86,9 @@ select add_dimension('test_schema.test_table', 'location', 4); (1 row) select * from _timescaledb_catalog.hypertable where table_name = 'test_table'; - id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor -----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+-------------------- - 2 | test_schema | test_table | chunk_schema | _hyper_2 | 3 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | + id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor | status +----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+--------------------+-------- + 2 | test_schema | test_table | chunk_schema | _hyper_2 | 3 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | | 0 (1 row) select * from _timescaledb_catalog.dimension; @@ -149,9 +149,9 @@ NOTICE: adding not-null constraint to column "id" (1 row) select * from _timescaledb_catalog.hypertable where table_name = 'test_table'; - id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor -----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+-------------------- - 2 | test_schema | test_table | chunk_schema | _hyper_2 | 4 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | + id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor | status +----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+--------------------+-------- + 2 | test_schema | test_table | chunk_schema | _hyper_2 | 4 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | | 0 (1 row) select * from _timescaledb_catalog.dimension; @@ -541,9 +541,9 @@ NOTICE: migrating data to chunks --there should be two new chunks select * from _timescaledb_catalog.hypertable where table_name = 'test_migrate'; - id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor -----+-------------+--------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+-------------------- - 10 | test_schema | test_migrate | _timescaledb_internal | _hyper_10 | 1 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | + id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor | status +----+-------------+--------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+--------------------+-------- + 10 | test_schema | test_migrate | _timescaledb_internal | _hyper_10 | 1 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | | 0 (1 row) select * from _timescaledb_catalog.chunk; diff --git a/test/expected/ddl-13.out b/test/expected/ddl-13.out index ba33a2b5bec..bfd3b0bb527 100644 --- a/test/expected/ddl-13.out +++ b/test/expected/ddl-13.out @@ -43,10 +43,10 @@ SELECT * FROM create_hypertable('"customSchema"."Hypertable_1"', 'time', NULL, 1 (1 row) SELECT * FROM _timescaledb_catalog.hypertable; - id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor -----+--------------+--------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+-------------------- - 1 | public | Hypertable_1 | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | - 2 | customSchema | Hypertable_1 | _timescaledb_internal | _hyper_2 | 1 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | + id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor | status +----+--------------+--------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+--------------------+-------- + 1 | public | Hypertable_1 | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | | 0 + 2 | customSchema | Hypertable_1 | _timescaledb_internal | _hyper_2 | 1 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | | 0 (2 rows) CREATE INDEX ON PUBLIC."Hypertable_1" (time, "temp_c"); diff --git a/test/expected/ddl-14.out b/test/expected/ddl-14.out index 85aa8ab5297..a61e572e08f 100644 --- a/test/expected/ddl-14.out +++ b/test/expected/ddl-14.out @@ -43,10 +43,10 @@ SELECT * FROM create_hypertable('"customSchema"."Hypertable_1"', 'time', NULL, 1 (1 row) SELECT * FROM _timescaledb_catalog.hypertable; - id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor -----+--------------+--------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+-------------------- - 1 | public | Hypertable_1 | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | - 2 | customSchema | Hypertable_1 | _timescaledb_internal | _hyper_2 | 1 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | + id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor | status +----+--------------+--------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+--------------------+-------- + 1 | public | Hypertable_1 | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | | 0 + 2 | customSchema | Hypertable_1 | _timescaledb_internal | _hyper_2 | 1 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | | 0 (2 rows) CREATE INDEX ON PUBLIC."Hypertable_1" (time, "temp_c"); diff --git a/test/expected/ddl-15.out b/test/expected/ddl-15.out index 85aa8ab5297..a61e572e08f 100644 --- a/test/expected/ddl-15.out +++ b/test/expected/ddl-15.out @@ -43,10 +43,10 @@ SELECT * FROM create_hypertable('"customSchema"."Hypertable_1"', 'time', NULL, 1 (1 row) SELECT * FROM _timescaledb_catalog.hypertable; - id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor -----+--------------+--------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+-------------------- - 1 | public | Hypertable_1 | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | - 2 | customSchema | Hypertable_1 | _timescaledb_internal | _hyper_2 | 1 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | + id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor | status +----+--------------+--------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+--------------------+-------- + 1 | public | Hypertable_1 | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | | 0 + 2 | customSchema | Hypertable_1 | _timescaledb_internal | _hyper_2 | 1 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | | 0 (2 rows) CREATE INDEX ON PUBLIC."Hypertable_1" (time, "temp_c"); diff --git a/test/expected/drop_extension.out b/test/expected/drop_extension.out index a6738fedd0d..7e9f728fe07 100644 --- a/test/expected/drop_extension.out +++ b/test/expected/drop_extension.out @@ -11,9 +11,9 @@ NOTICE: adding not-null constraint to column "time" (1 row) SELECT * FROM _timescaledb_catalog.hypertable; - id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor -----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+-------------------- - 1 | public | drop_test | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | + id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor | status +----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+--------------------+-------- + 1 | public | drop_test | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | | 0 (1 row) INSERT INTO drop_test VALUES('Mon Mar 20 09:17:00.936242 2017', 23.4, 'dev1'); @@ -57,9 +57,9 @@ WARNING: column type "timestamp without time zone" used for "time" does not fol (1 row) SELECT * FROM _timescaledb_catalog.hypertable; - id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor -----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+-------------------- - 1 | public | drop_test | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | + id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor | status +----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+--------------------+-------- + 1 | public | drop_test | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | | 0 (1 row) INSERT INTO drop_test VALUES('Mon Mar 20 09:18:19.100462 2017', 22.1, 'dev1'); diff --git a/test/expected/drop_hypertable.out b/test/expected/drop_hypertable.out index 4833780b975..0ca624f1783 100644 --- a/test/expected/drop_hypertable.out +++ b/test/expected/drop_hypertable.out @@ -2,8 +2,8 @@ -- Please see the included NOTICE for copyright information and -- LICENSE-APACHE for a copy of the license. SELECT * from _timescaledb_catalog.hypertable; - id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor -----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+------------------------+-------------------+-------------------+--------------------------+-------------------- + id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor | status +----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+------------------------+-------------------+-------------------+--------------------------+--------------------+-------- (0 rows) SELECT * from _timescaledb_catalog.dimension; @@ -76,9 +76,9 @@ NOTICE: table "should_drop" is already a hypertable, skipping (1 row) SELECT * from _timescaledb_catalog.hypertable; - id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor -----+-------------+-------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+-------------------- - 1 | public | should_drop | _timescaledb_internal | _hyper_1 | 1 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | + id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor | status +----+-------------+-------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+--------------------+-------- + 1 | public | should_drop | _timescaledb_internal | _hyper_1 | 1 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | | 0 (1 row) SELECT * from _timescaledb_catalog.dimension; @@ -99,9 +99,9 @@ NOTICE: adding not-null constraint to column "time" INSERT INTO should_drop VALUES (now(), 1.0); SELECT * from _timescaledb_catalog.hypertable; - id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor -----+-------------+-------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+-------------------- - 4 | public | should_drop | _timescaledb_internal | _hyper_4 | 1 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | + id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor | status +----+-------------+-------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+--------------------+-------- + 4 | public | should_drop | _timescaledb_internal | _hyper_4 | 1 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | | 0 (1 row) SELECT * from _timescaledb_catalog.dimension; diff --git a/test/expected/drop_owned.out b/test/expected/drop_owned.out index bfcd4b96e14..238784ee7ae 100644 --- a/test/expected/drop_owned.out +++ b/test/expected/drop_owned.out @@ -25,10 +25,10 @@ NOTICE: adding not-null constraint to column "time" INSERT INTO hypertable_schema.superuser VALUES ('2001-01-01 01:01:01', 23.3, 1); SELECT * FROM _timescaledb_catalog.hypertable ORDER BY id; - id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor -----+-------------------+-------------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+-------------------- - 1 | hypertable_schema | default_perm_user | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | - 2 | hypertable_schema | superuser | _timescaledb_internal | _hyper_2 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | + id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor | status +----+-------------------+-------------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+--------------------+-------- + 1 | hypertable_schema | default_perm_user | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | | 0 + 2 | hypertable_schema | superuser | _timescaledb_internal | _hyper_2 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | | 0 (2 rows) SELECT * FROM _timescaledb_catalog.chunk; @@ -40,9 +40,9 @@ SELECT * FROM _timescaledb_catalog.chunk; DROP OWNED BY :ROLE_DEFAULT_PERM_USER; SELECT * FROM _timescaledb_catalog.hypertable ORDER BY id; - id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor -----+-------------------+------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+-------------------- - 2 | hypertable_schema | superuser | _timescaledb_internal | _hyper_2 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | + id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor | status +----+-------------------+------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+--------------------+-------- + 2 | hypertable_schema | superuser | _timescaledb_internal | _hyper_2 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | | 0 (1 row) SELECT * FROM _timescaledb_catalog.chunk; @@ -54,8 +54,8 @@ SELECT * FROM _timescaledb_catalog.chunk; DROP TABLE hypertable_schema.superuser; --everything should be cleaned up SELECT * FROM _timescaledb_catalog.hypertable GROUP BY id; - id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor -----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+------------------------+-------------------+-------------------+--------------------------+-------------------- + id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor | status +----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+------------------------+-------------------+-------------------+--------------------------+--------------------+-------- (0 rows) SELECT * FROM _timescaledb_catalog.chunk; diff --git a/test/expected/drop_rename_hypertable.out b/test/expected/drop_rename_hypertable.out index 4a26a2aa4dd..0ba03bd6609 100644 --- a/test/expected/drop_rename_hypertable.out +++ b/test/expected/drop_rename_hypertable.out @@ -146,9 +146,9 @@ SELECT * FROM "newname"; (12 rows) SELECT * FROM _timescaledb_catalog.hypertable; - id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor -----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+-------------------- - 1 | public | newname | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | + id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor | status +----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+--------------------+-------- + 1 | public | newname | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | | 0 (1 row) \c :TEST_DBNAME :ROLE_SUPERUSER @@ -173,15 +173,15 @@ SELECT * FROM "newschema"."newname"; (12 rows) SELECT * FROM _timescaledb_catalog.hypertable; - id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor -----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+-------------------- - 1 | newschema | newname | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | + id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor | status +----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+--------------------+-------- + 1 | newschema | newname | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | | 0 (1 row) DROP TABLE "newschema"."newname"; SELECT * FROM _timescaledb_catalog.hypertable; - id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor -----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+------------------------+-------------------+-------------------+--------------------------+-------------------- + id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor | status +----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+------------------------+-------------------+-------------------+--------------------------+--------------------+-------- (0 rows) \dt "public".* diff --git a/test/expected/drop_schema.out b/test/expected/drop_schema.out index 046abd4eddb..b12375cd041 100644 --- a/test/expected/drop_schema.out +++ b/test/expected/drop_schema.out @@ -30,10 +30,10 @@ NOTICE: adding not-null constraint to column "time" INSERT INTO hypertable_schema.test1 VALUES ('2001-01-01 01:01:01', 23.3, 1); INSERT INTO hypertable_schema.test2 VALUES ('2001-01-01 01:01:01', 23.3, 1); SELECT * FROM _timescaledb_catalog.hypertable ORDER BY id; - id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor -----+-------------------+------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+-------------------- - 1 | hypertable_schema | test1 | chunk_schema1 | _hyper_1 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | - 2 | hypertable_schema | test2 | chunk_schema2 | _hyper_2 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | + id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor | status +----+-------------------+------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+--------------------+-------- + 1 | hypertable_schema | test1 | chunk_schema1 | _hyper_1 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | | 0 + 2 | hypertable_schema | test2 | chunk_schema2 | _hyper_2 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | | 0 (2 rows) SELECT * FROM _timescaledb_catalog.chunk; @@ -53,10 +53,10 @@ SET ROLE :ROLE_DEFAULT_PERM_USER; --show that the metadata for the table using the dropped schema is --changed. The other table is not affected. SELECT * FROM _timescaledb_catalog.hypertable ORDER BY id; - id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor -----+-------------------+------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+-------------------- - 1 | hypertable_schema | test1 | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | - 2 | hypertable_schema | test2 | chunk_schema2 | _hyper_2 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | + id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor | status +----+-------------------+------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+--------------------+-------- + 1 | hypertable_schema | test1 | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | | 0 + 2 | hypertable_schema | test2 | chunk_schema2 | _hyper_2 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | | 0 (2 rows) SELECT * FROM _timescaledb_catalog.chunk; @@ -86,8 +86,8 @@ NOTICE: drop cascades to 4 other objects SET ROLE :ROLE_DEFAULT_PERM_USER; --everything should be cleaned up SELECT * FROM _timescaledb_catalog.hypertable GROUP BY id; - id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor -----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+------------------------+-------------------+-------------------+--------------------------+-------------------- + id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor | status +----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+------------------------+-------------------+-------------------+--------------------------+--------------------+-------- (0 rows) SELECT * FROM _timescaledb_catalog.chunk; diff --git a/test/expected/dump_meta.out b/test/expected/dump_meta.out index 7086798f41c..e359fa6ac3c 100644 --- a/test/expected/dump_meta.out +++ b/test/expected/dump_meta.out @@ -66,9 +66,9 @@ List of tables \echo 'List of hypertables' List of hypertables SELECT * FROM _timescaledb_catalog.hypertable; - id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor -----+-------------+----------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+-------------------- - 1 | public | two_Partitions | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | + id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor | status +----+-------------+----------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+--------------------+-------- + 1 | public | two_Partitions | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | | 0 (1 row) \echo 'List of chunk indexes' diff --git a/test/expected/pg_dump.out b/test/expected/pg_dump.out index a862ab2db83..a4743238458 100644 --- a/test/expected/pg_dump.out +++ b/test/expected/pg_dump.out @@ -77,9 +77,9 @@ WARNING: target chunk size for adaptive chunking is less than 10 MB -- Chunk sizing func set SELECT * FROM _timescaledb_catalog.hypertable; - id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor -----+-------------+----------------+------------------------+-------------------------+----------------+--------------------------+---------------------------------+-------------------+-------------------+--------------------------+-------------------- - 1 | test_schema | two_Partitions | _timescaledb_internal | _hyper_1 | 2 | public | custom_calculate_chunk_interval | 1048576 | 0 | | + id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor | status +----+-------------+----------------+------------------------+-------------------------+----------------+--------------------------+---------------------------------+-------------------+-------------------+--------------------------+--------------------+-------- + 1 | test_schema | two_Partitions | _timescaledb_internal | _hyper_1 | 2 | public | custom_calculate_chunk_interval | 1048576 | 0 | | | 0 (1 row) SELECT proname, pronamespace, pronargs @@ -518,9 +518,9 @@ SELECT * FROM _timescaledb_catalog.chunk_constraint; --Chunk sizing function should have been restored SELECT * FROM _timescaledb_catalog.hypertable; - id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor -----+-------------+----------------+------------------------+-------------------------+----------------+--------------------------+---------------------------------+-------------------+-------------------+--------------------------+-------------------- - 1 | test_schema | two_Partitions | _timescaledb_internal | _hyper_1 | 2 | public | custom_calculate_chunk_interval | 1048576 | 0 | | + id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor | status +----+-------------+----------------+------------------------+-------------------------+----------------+--------------------------+---------------------------------+-------------------+-------------------+--------------------------+--------------------+-------- + 1 | test_schema | two_Partitions | _timescaledb_internal | _hyper_1 | 2 | public | custom_calculate_chunk_interval | 1048576 | 0 | | | 0 (1 row) SELECT proname, pronamespace, pronargs diff --git a/test/expected/relocate_extension.out b/test/expected/relocate_extension.out index c7555db5764..a25d73ed32b 100644 --- a/test/expected/relocate_extension.out +++ b/test/expected/relocate_extension.out @@ -38,11 +38,11 @@ NOTICE: adding not-null constraint to column "time" (1 row) SELECT * FROM _timescaledb_catalog.hypertable; - id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor -----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+-------------------- - 1 | public | test_ts | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | - 2 | public | test_tz | _timescaledb_internal | _hyper_2 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | - 3 | public | test_dt | _timescaledb_internal | _hyper_3 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | + id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor | status +----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+--------------------+-------- + 1 | public | test_ts | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | | 0 + 2 | public | test_tz | _timescaledb_internal | _hyper_2 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | | 0 + 3 | public | test_dt | _timescaledb_internal | _hyper_3 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | | 0 (3 rows) INSERT INTO test_ts VALUES('Mon Mar 20 09:17:00.936242 2017', 23.4, 'dev1'); diff --git a/test/expected/truncate.out b/test/expected/truncate.out index 6c44ccd2fb6..ac227ef84c7 100644 --- a/test/expected/truncate.out +++ b/test/expected/truncate.out @@ -35,9 +35,9 @@ INSERT INTO "two_Partitions"("timeCustom", device_id, series_0, series_1) VALUES \set QUIET on \o SELECT * FROM _timescaledb_catalog.hypertable; - id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor -----+-------------+----------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+-------------------- - 1 | public | two_Partitions | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | + id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor | status +----+-------------+----------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+--------------------+-------- + 1 | public | two_Partitions | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | | 0 (1 row) SELECT * FROM _timescaledb_catalog.chunk; @@ -78,9 +78,9 @@ SELECT * FROM "two_Partitions"; SET client_min_messages = WARNING; TRUNCATE "two_Partitions"; SELECT * FROM _timescaledb_catalog.hypertable; - id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor -----+-------------+----------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+-------------------- - 1 | public | two_Partitions | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | + id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | replication_factor | status +----+-------------+----------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+--------------------+-------- + 1 | public | two_Partitions | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | | 0 (1 row) SELECT * FROM _timescaledb_catalog.chunk; diff --git a/tsl/test/expected/chunk_utils_internal.out b/tsl/test/expected/chunk_utils_internal.out index 10beb185340..44a2eb9829c 100644 --- a/tsl/test/expected/chunk_utils_internal.out +++ b/tsl/test/expected/chunk_utils_internal.out @@ -7,6 +7,7 @@ -- * freeze_chunk -- * drop_chunk -- * attach_foreign_table_chunk +-- * hypertable_osm_range_update CREATE OR REPLACE VIEW chunk_view AS SELECT ht.table_name AS hypertable_name, @@ -514,12 +515,26 @@ SELECT * FROM child_fdw_table; Wed Jan 01 01:00:00 2020 PST | 100 | 1000 (1 row) +-- error should be thrown as the hypertable does not yet have an associated tiered chunk +\set ON_ERROR_STOP 0 +SELECT _timescaledb_functions.hypertable_osm_range_update('ht_try','2020-01-01 01:00'::timestamptz, '2020-01-01 03:00'); +ERROR: no OSM chunk found for hypertable public.ht_try +\set ON_ERROR_STOP 1 SELECT _timescaledb_functions.attach_osm_table_chunk('ht_try', 'child_fdw_table'); attach_osm_table_chunk ------------------------ t (1 row) +-- must also update the range since the created chunk is assumed to be empty, +-- and its range actually updated when data is moved to OSM. But in this mock +-- test case, the attached OSM chunk contains data +SELECT _timescaledb_functions.hypertable_osm_range_update('ht_try', '2020-01-01'::timestamptz, '2020-01-02'); + hypertable_osm_range_update +----------------------------- + f +(1 row) + -- OSM chunk is not visible in chunks view SELECT chunk_name, range_start, range_end FROM timescaledb_information.chunks @@ -533,10 +548,10 @@ SELECT chunk_name, range_start, range_end FROM chunk_view WHERE hypertable_name = 'ht_try' ORDER BY chunk_name; - chunk_name | range_start | range_end --------------------+---------------------------------------+------------------------------ - _hyper_5_10_chunk | Wed May 04 17:00:00 2022 PDT | Thu May 05 17:00:00 2022 PDT - child_fdw_table | Sat Jan 09 20:00:54.775806 294247 PST | infinity + chunk_name | range_start | range_end +-------------------+------------------------------+------------------------------ + _hyper_5_10_chunk | Wed May 04 17:00:00 2022 PDT | Thu May 05 17:00:00 2022 PDT + child_fdw_table | Wed Jan 01 00:00:00 2020 PST | Thu Jan 02 00:00:00 2020 PST (2 rows) SELECT * FROM ht_try ORDER BY 1; @@ -596,6 +611,97 @@ SELECT * from ht_try WHERE timec > '2020-01-01 01:00' ORDER BY 1; Thu May 05 01:00:00 2022 PDT | 222 | 222 (1 row) +-- test ordered append +BEGIN; +-- before updating the ranges +EXPLAIN SELECT * FROM ht_try ORDER BY 1; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on ht_try (cost=100.00..202.29 rows=3276 width=24) + Order: ht_try.timec + -> Foreign Scan on child_fdw_table (cost=100.00..166.59 rows=1706 width=24) + -> Index Scan Backward using _hyper_5_10_chunk_ht_try_timec_idx on _hyper_5_10_chunk (cost=0.15..35.70 rows=1570 width=24) +(4 rows) + +-- range before update +SELECT cc.chunk_id, c.table_name, c.status, c.osm_chunk, cc.dimension_slice_id, ds.range_start, ds.range_end +FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.chunk_constraint cc, _timescaledb_catalog.dimension_slice ds +WHERE c.table_name = 'child_fdw_table' AND cc.chunk_id = c.id AND ds.id = cc.dimension_slice_id; + chunk_id | table_name | status | osm_chunk | dimension_slice_id | range_start | range_end +----------+-----------------+--------+-----------+--------------------+------------------+------------------ + 11 | child_fdw_table | 0 | t | 10 | 1577865600000000 | 1577952000000000 +(1 row) + +SELECT _timescaledb_functions.hypertable_osm_range_update('ht_try', '2020-01-01 01:00'::timestamptz, '2020-01-02'); + hypertable_osm_range_update +----------------------------- + f +(1 row) + +SELECT id, schema_name, table_name, status FROM _timescaledb_catalog.hypertable WHERE table_name = 'ht_try'; + id | schema_name | table_name | status +----+-------------+------------+-------- + 5 | public | ht_try | 1 +(1 row) + +-- verify range was updated +SELECT cc.chunk_id, c.table_name, c.status, c.osm_chunk, cc.dimension_slice_id, ds.range_start, ds.range_end +FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.chunk_constraint cc, _timescaledb_catalog.dimension_slice ds +WHERE c.table_name = 'child_fdw_table' AND cc.chunk_id = c.id AND ds.id = cc.dimension_slice_id; + chunk_id | table_name | status | osm_chunk | dimension_slice_id | range_start | range_end +----------+-----------------+--------+-----------+--------------------+------------------+------------------ + 11 | child_fdw_table | 0 | t | 10 | 1577869200000000 | 1577952000000000 +(1 row) + +-- should be ordered append now +EXPLAIN SELECT * FROM ht_try ORDER BY 1; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on ht_try (cost=100.00..202.29 rows=3276 width=24) + Order: ht_try.timec + -> Foreign Scan on child_fdw_table (cost=100.00..166.59 rows=1706 width=24) + -> Index Scan Backward using _hyper_5_10_chunk_ht_try_timec_idx on _hyper_5_10_chunk (cost=0.15..35.70 rows=1570 width=24) +(4 rows) + +SELECT * FROM ht_try ORDER BY 1; + timec | acq_id | value +------------------------------+--------+------- + Wed Jan 01 01:00:00 2020 PST | 100 | 1000 + Thu May 05 01:00:00 2022 PDT | 222 | 222 +(2 rows) + +-- test invalid range - should not be ordered append +SELECT _timescaledb_functions.hypertable_osm_range_update('ht_try'); + hypertable_osm_range_update +----------------------------- + f +(1 row) + +EXPLAIN SELECT * from ht_try ORDER BY 1; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------- + Merge Append (cost=100.16..235.06 rows=3276 width=24) + Sort Key: _hyper_5_10_chunk.timec + -> Index Scan Backward using _hyper_5_10_chunk_ht_try_timec_idx on _hyper_5_10_chunk (cost=0.15..35.70 rows=1570 width=24) + -> Foreign Scan on child_fdw_table (cost=100.00..166.59 rows=1706 width=24) +(4 rows) + +SELECT * from ht_try ORDER BY 1; + timec | acq_id | value +------------------------------+--------+------- + Wed Jan 01 01:00:00 2020 PST | 100 | 1000 + Thu May 05 01:00:00 2022 PDT | 222 | 222 +(2 rows) + +ROLLBACK; +\set ON_ERROR_STOP 0 +-- test that error is produced when range_start < range_end +SELECT _timescaledb_functions.hypertable_osm_range_update('ht_try', '2020-01-02 01:00'::timestamptz, '2020-01-02 00:00'); +ERROR: dimension slice range_end cannot be less than range_start +-- error when range overlaps +SELECT _timescaledb_functions.hypertable_osm_range_update('ht_try', '2022-05-05 01:00'::timestamptz, '2022-05-06'); +ERROR: attempting to set overlapping range for tiered chunk of public.ht_try +\set ON_ERROR_STOP 1 --TEST GUC variable to enable/disable OSM chunk SET timescaledb.enable_tiered_reads=false; EXPLAIN (COSTS OFF) SELECT * from ht_try; @@ -761,6 +867,13 @@ SELECT _timescaledb_functions.attach_osm_table_chunk('hyper_constr', 'child_hype t (1 row) +-- was attached with data, so must update the range +SELECT _timescaledb_functions.hypertable_osm_range_update('hyper_constr', 100, 110); + hypertable_osm_range_update +----------------------------- + f +(1 row) + SELECT table_name, status, osm_chunk FROM _timescaledb_catalog.chunk WHERE hypertable_id IN (SELECT id from _timescaledb_catalog.hypertable @@ -787,26 +900,6 @@ where conrelid = 'child_hyper_constr'::regclass ORDER BY 1; hyper_constr_temp_check (1 row) ---TEST policy is not applied on OSM chunk -CREATE OR REPLACE FUNCTION dummy_now_smallint() RETURNS BIGINT LANGUAGE SQL IMMUTABLE as 'SELECT 500::bigint' ; -SELECT set_integer_now_func('hyper_constr', 'dummy_now_smallint'); - set_integer_now_func ----------------------- - -(1 row) - -SELECT add_retention_policy('hyper_constr', 100::int) AS deljob_id \gset -CALL run_job(:deljob_id); -CALL run_job(:deljob_id); -SELECT chunk_name, range_start, range_end -FROM chunk_view -WHERE hypertable_name = 'hyper_constr' -ORDER BY chunk_name; - chunk_name | range_start | range_end ---------------------+---------------------------------------+----------- - child_hyper_constr | Sat Jan 09 20:00:54.775806 294247 PST | infinity -(1 row) - ----- TESTS for copy into frozen chunk ------------ \c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER CREATE TABLE test1.copy_test ( @@ -910,6 +1003,26 @@ SELECT indexname, tablename FROM pg_indexes WHERE indexname = 'hyper_constr_mid_ (1 row) DROP INDEX hyper_constr_mid_idx; +--TEST policy is applied on OSM chunk +-- XXX this is to be updated once the hook for dropping chunks is added +CREATE OR REPLACE FUNCTION dummy_now_smallint() RETURNS BIGINT LANGUAGE SQL IMMUTABLE as 'SELECT 500::bigint' ; +SELECT set_integer_now_func('hyper_constr', 'dummy_now_smallint'); + set_integer_now_func +---------------------- + +(1 row) + +SELECT add_retention_policy('hyper_constr', 100::int) AS deljob_id \gset +CALL run_job(:deljob_id); +CALL run_job(:deljob_id); +SELECT chunk_name, range_start, range_end +FROM chunk_view +WHERE hypertable_name = 'hyper_constr' +ORDER BY chunk_name; + chunk_name | range_start | range_end +------------+-------------+----------- +(0 rows) + -- test range of dimension slice for osm chunk for different datatypes CREATE TABLE osm_int2(time int2 NOT NULL); CREATE TABLE osm_int4(time int4 NOT NULL); @@ -987,6 +1100,236 @@ ORDER BY 2,3; osm_tstz | 22 | 14 | 9223372036854775806 | 9223372036854775807 (6 rows) +-- test that correct slice is found and updated for table with multiple chunk constraints +CREATE TABLE test_multicon(time timestamptz not null unique, a int); +SELECT hypertable_id as htid FROM create_hypertable('test_multicon', 'time', chunk_time_interval => interval '1 day') \gset +insert into test_multicon values ('2020-01-02 01:00'::timestamptz, 1); +SELECT * FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.chunk_constraint cc WHERE c.hypertable_id = :htid +AND c.id = cc.chunk_id; + id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk | chunk_id | dimension_slice_id | constraint_name | hypertable_constraint_name +----+---------------+-----------------------+--------------------+---------------------+---------+--------+-----------+----------+--------------------+-----------------------------+---------------------------- + 23 | 15 | _timescaledb_internal | _hyper_15_23_chunk | | f | 0 | f | 23 | | 23_3_test_multicon_time_key | test_multicon_time_key + 23 | 15 | _timescaledb_internal | _hyper_15_23_chunk | | f | 0 | f | 23 | 23 | constraint_23 | +(2 rows) + +\c :TEST_DBNAME :ROLE_SUPERUSER ; +UPDATE _timescaledb_catalog.chunk SET osm_chunk = true WHERE hypertable_id = :htid; +\c :TEST_DBNAME :ROLE_4; +SELECT _timescaledb_functions.hypertable_osm_range_update('test_multicon', '2020-01-02 01:00'::timestamptz, '2020-01-04 01:00'); + hypertable_osm_range_update +----------------------------- + f +(1 row) + +-- view udpated range +SELECT cc.chunk_id, c.table_name, c.status, c.osm_chunk, cc.dimension_slice_id, ds.range_start, ds.range_end +FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.chunk_constraint cc, _timescaledb_catalog.dimension_slice ds +WHERE c.hypertable_id = :htid AND cc.chunk_id = c.id AND ds.id = cc.dimension_slice_id; + chunk_id | table_name | status | osm_chunk | dimension_slice_id | range_start | range_end +----------+--------------------+--------+-----------+--------------------+------------------+------------------ + 23 | _hyper_15_23_chunk | 0 | t | 23 | 1577955600000000 | 1578128400000000 +(1 row) + +-- check that range was reset to default - infinity +\set ON_ERROR_STOP 0 +-- both range_start and range_end must be NULL, or non-NULL +SELECT _timescaledb_functions.hypertable_osm_range_update('test_multicon', NULL, '2020-01-04 01:00'::timestamptz); +ERROR: range_start and range_end parameters must be both NULL or both non-NULL +SELECT _timescaledb_functions.hypertable_osm_range_update('test_multicon', NULL, NULL); +ERROR: could not determine polymorphic type because input has type unknown +SELECT _timescaledb_functions.hypertable_osm_range_update('test_multicon'); + hypertable_osm_range_update +----------------------------- + f +(1 row) + +\set ON_ERROR_STOP 1 +SELECT _timescaledb_functions.hypertable_osm_range_update('test_multicon', NULL::timestamptz, NULL); + hypertable_osm_range_update +----------------------------- + f +(1 row) + +SELECT cc.chunk_id, c.table_name, c.status, c.osm_chunk, cc.dimension_slice_id, ds.range_start, ds.range_end +FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.chunk_constraint cc, _timescaledb_catalog.dimension_slice ds +WHERE c.hypertable_id = :htid AND cc.chunk_id = c.id AND ds.id = cc.dimension_slice_id; + chunk_id | table_name | status | osm_chunk | dimension_slice_id | range_start | range_end +----------+--------------------+--------+-----------+--------------------+---------------------+--------------------- + 23 | _hyper_15_23_chunk | 0 | t | 23 | 9223372036854775806 | 9223372036854775807 +(1 row) + +-- test further with ordered append +\c postgres_fdw_db :ROLE_4; +CREATE TABLE test_chunkapp_fdw (time timestamptz NOT NULL, a int); +INSERT INTO test_chunkapp_fdw (time, a) VALUES ('2020-01-03 02:00'::timestamptz, 3); +\c :TEST_DBNAME :ROLE_4 +CREATE TABLE test_chunkapp(time timestamptz NOT NULL, a int); +SELECT hypertable_id as htid FROM create_hypertable('test_chunkapp', 'time', chunk_time_interval => interval '1day') \gset +INSERT INTO test_chunkapp (time, a) VALUES ('2020-01-01 01:00'::timestamptz, 1), ('2020-01-02 01:00'::timestamptz, 2); +CREATE FOREIGN TABLE test_chunkapp_fdw_child(time timestamptz NOT NULL, a int) SERVER s3_server OPTIONS (schema_name 'public', table_name 'test_chunkapp_fdw');; +SELECT _timescaledb_functions.attach_osm_table_chunk('test_chunkapp','test_chunkapp_fdw_child'); + attach_osm_table_chunk +------------------------ + t +(1 row) + +-- view range before update +SELECT cc.chunk_id, c.table_name, c.status, c.osm_chunk, cc.dimension_slice_id, ds.range_start, ds.range_end +FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.chunk_constraint cc, _timescaledb_catalog.dimension_slice ds +WHERE c.hypertable_id = :htid AND cc.chunk_id = c.id AND ds.id = cc.dimension_slice_id; + chunk_id | table_name | status | osm_chunk | dimension_slice_id | range_start | range_end +----------+-------------------------+--------+-----------+--------------------+---------------------+--------------------- + 24 | _hyper_16_24_chunk | 0 | f | 24 | 1577836800000000 | 1577923200000000 + 25 | _hyper_16_25_chunk | 0 | f | 25 | 1577923200000000 | 1578009600000000 + 26 | test_chunkapp_fdw_child | 0 | t | 26 | 9223372036854775806 | 9223372036854775807 +(3 rows) + +-- attempt to update overlapping range, should fail +\set ON_ERROR_STOP 0 +SELECT _timescaledb_functions.hypertable_osm_range_update('test_chunkapp', '2020-01-02 01:00'::timestamptz, '2020-01-04 01:00'); +ERROR: attempting to set overlapping range for tiered chunk of public.test_chunkapp +\set ON_ERROR_STOP 1 +-- update actual range of OSM chunk, should work +SELECT _timescaledb_functions.hypertable_osm_range_update('test_chunkapp', '2020-01-03 00:00'::timestamptz, '2020-01-04 00:00'); + hypertable_osm_range_update +----------------------------- + f +(1 row) + +-- view udpated range +SELECT cc.chunk_id, c.table_name, c.status, c.osm_chunk, cc.dimension_slice_id, ds.range_start, ds.range_end +FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.chunk_constraint cc, _timescaledb_catalog.dimension_slice ds +WHERE c.hypertable_id = :htid AND cc.chunk_id = c.id AND ds.id = cc.dimension_slice_id; + chunk_id | table_name | status | osm_chunk | dimension_slice_id | range_start | range_end +----------+-------------------------+--------+-----------+--------------------+------------------+------------------ + 24 | _hyper_16_24_chunk | 0 | f | 24 | 1577836800000000 | 1577923200000000 + 25 | _hyper_16_25_chunk | 0 | f | 25 | 1577923200000000 | 1578009600000000 + 26 | test_chunkapp_fdw_child | 0 | t | 26 | 1578038400000000 | 1578124800000000 +(3 rows) + +-- ordered append should be possible as ranges do not overlap +EXPLAIN SELECT * FROM test_chunkapp ORDER BY 1; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on test_chunkapp (cost=0.15..270.31 rows=6355 width=12) + Order: test_chunkapp."time" + -> Index Scan Backward using _hyper_16_24_chunk_test_chunkapp_time_idx on _hyper_16_24_chunk (cost=0.15..42.75 rows=2040 width=12) + -> Index Scan Backward using _hyper_16_25_chunk_test_chunkapp_time_idx on _hyper_16_25_chunk (cost=0.15..42.75 rows=2040 width=12) + -> Foreign Scan on test_chunkapp_fdw_child (cost=100.00..184.80 rows=2275 width=12) +(5 rows) + +SELECT * FROM test_chunkapp ORDER BY 1; + time | a +------------------------------+--- + Wed Jan 01 01:00:00 2020 PST | 1 + Thu Jan 02 01:00:00 2020 PST | 2 + Fri Jan 03 02:00:00 2020 PST | 3 +(3 rows) + +-- but, insert should not be possible +SELECT ts_setup_osm_hook(); + ts_setup_osm_hook +------------------- + +(1 row) + +\set ON_ERROR_STOP 0 +INSERT INTO test_chunkapp VALUES ('2020-01-03 02:00'::timestamptz, 3); +ERROR: Cannot insert into tiered chunk range of public.test_chunkapp - attempt to create new chunk with range [Fri Jan 03 00:00:00 2020 PST Sat Jan 04 00:00:00 2020 PST] failed +\set ON_ERROR_STOP 1 +SELECT ts_undo_osm_hook(); + ts_undo_osm_hook +------------------ + +(1 row) + +-- reset range to infinity +SELECT _timescaledb_functions.hypertable_osm_range_update('test_chunkapp',empty:=false); + hypertable_osm_range_update +----------------------------- + f +(1 row) + +-- ordered append not possible because range is invalid and empty was not specified +EXPLAIN SELECT * FROM test_chunkapp ORDER BY 1; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------- + Merge Append (cost=100.33..352.47 rows=6355 width=12) + Sort Key: _hyper_16_24_chunk."time" + -> Index Scan Backward using _hyper_16_24_chunk_test_chunkapp_time_idx on _hyper_16_24_chunk (cost=0.15..42.75 rows=2040 width=12) + -> Index Scan Backward using _hyper_16_25_chunk_test_chunkapp_time_idx on _hyper_16_25_chunk (cost=0.15..42.75 rows=2040 width=12) + -> Foreign Scan on test_chunkapp_fdw_child (cost=100.00..184.80 rows=2275 width=12) +(5 rows) + +SELECT * FROM test_chunkapp ORDER BY 1; + time | a +------------------------------+--- + Wed Jan 01 01:00:00 2020 PST | 1 + Thu Jan 02 01:00:00 2020 PST | 2 + Fri Jan 03 02:00:00 2020 PST | 3 +(3 rows) + +SELECT cc.chunk_id, c.table_name, c.status, c.osm_chunk, cc.dimension_slice_id, ds.range_start, ds.range_end +FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.chunk_constraint cc, _timescaledb_catalog.dimension_slice ds +WHERE c.hypertable_id = :htid AND cc.chunk_id = c.id AND ds.id = cc.dimension_slice_id; + chunk_id | table_name | status | osm_chunk | dimension_slice_id | range_start | range_end +----------+-------------------------+--------+-----------+--------------------+---------------------+--------------------- + 24 | _hyper_16_24_chunk | 0 | f | 24 | 1577836800000000 | 1577923200000000 + 25 | _hyper_16_25_chunk | 0 | f | 25 | 1577923200000000 | 1578009600000000 + 26 | test_chunkapp_fdw_child | 0 | t | 26 | 9223372036854775806 | 9223372036854775807 +(3 rows) + +-- now set empty to true, should ordered append +\c postgres_fdw_db :ROLE_4; +DELETE FROM test_chunkapp_fdw; +\c :TEST_DBNAME :ROLE_4; +SELECT _timescaledb_functions.hypertable_osm_range_update('test_chunkapp', NULL::timestamptz, NULL, empty => true); + hypertable_osm_range_update +----------------------------- + f +(1 row) + +EXPLAIN SELECT * FROM test_chunkapp ORDER BY 1; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on test_chunkapp (cost=0.15..270.31 rows=6355 width=12) + Order: test_chunkapp."time" + -> Index Scan Backward using _hyper_16_24_chunk_test_chunkapp_time_idx on _hyper_16_24_chunk (cost=0.15..42.75 rows=2040 width=12) + -> Index Scan Backward using _hyper_16_25_chunk_test_chunkapp_time_idx on _hyper_16_25_chunk (cost=0.15..42.75 rows=2040 width=12) + -> Foreign Scan on test_chunkapp_fdw_child (cost=100.00..184.80 rows=2275 width=12) +(5 rows) + +SELECT * FROM test_chunkapp ORDER BY 1; + time | a +------------------------------+--- + Wed Jan 01 01:00:00 2020 PST | 1 + Thu Jan 02 01:00:00 2020 PST | 2 +(2 rows) + +-- test error is triggered when time dimension not found +CREATE TABLE test2(time timestamptz not null, a int); +SELECT create_hypertable('test2', 'time'); + create_hypertable +--------------------- + (17,public,test2,t) +(1 row) + +INSERT INTO test2 VALUES ('2020-01-01'::timestamptz, 1); +ALTER TABLE test2 SET (timescaledb.compress); +SELECT compress_chunk(show_chunks('test2')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_17_27_chunk +(1 row) + +-- find internal compression table, call API function on it +SELECT format('%I.%I', cht.schema_name, cht.table_name) AS "COMPRESSION_TBLNM" +FROM _timescaledb_catalog.hypertable ht, _timescaledb_catalog.hypertable cht +WHERE ht.table_name = 'test2' and cht.id = ht.compressed_hypertable_id \gset +\set ON_ERROR_STOP 0 +SELECT _timescaledb_functions.hypertable_osm_range_update(:'COMPRESSION_TBLNM'::regclass, '2020-01-01'::timestamptz); +ERROR: could not find time dimension for hypertable _timescaledb_internal._compressed_hypertable_18 +\set ON_ERROR_STOP 1 -- clean up databases created \c :TEST_DBNAME :ROLE_SUPERUSER DROP DATABASE postgres_fdw_db; diff --git a/tsl/test/shared/expected/extension.out b/tsl/test/shared/expected/extension.out index dff45519ccd..23d851e5f5d 100644 --- a/tsl/test/shared/expected/extension.out +++ b/tsl/test/shared/expected/extension.out @@ -91,6 +91,7 @@ ORDER BY pronamespace::regnamespace::text COLLATE "C", p.oid::regprocedure::text _timescaledb_functions.hypertable_constraint_add_table_fk_constraint(name,name,name,integer) _timescaledb_functions.hypertable_invalidation_log_delete(integer) _timescaledb_functions.hypertable_local_size(name,name) + _timescaledb_functions.hypertable_osm_range_update(regclass,anyelement,anyelement,boolean) _timescaledb_functions.hypertable_remote_size(name,name) _timescaledb_functions.indexes_local_size(name,name) _timescaledb_functions.indexes_remote_size(name,name,name) diff --git a/tsl/test/sql/chunk_utils_internal.sql b/tsl/test/sql/chunk_utils_internal.sql index 3ed1736b394..92c3178f834 100644 --- a/tsl/test/sql/chunk_utils_internal.sql +++ b/tsl/test/sql/chunk_utils_internal.sql @@ -8,6 +8,7 @@ -- * freeze_chunk -- * drop_chunk -- * attach_foreign_table_chunk +-- * hypertable_osm_range_update CREATE OR REPLACE VIEW chunk_view AS SELECT @@ -325,7 +326,16 @@ INSERT INTO ht_try VALUES ('2022-05-05 01:00', 222, 222); SELECT * FROM child_fdw_table; +-- error should be thrown as the hypertable does not yet have an associated tiered chunk +\set ON_ERROR_STOP 0 +SELECT _timescaledb_functions.hypertable_osm_range_update('ht_try','2020-01-01 01:00'::timestamptz, '2020-01-01 03:00'); +\set ON_ERROR_STOP 1 + SELECT _timescaledb_functions.attach_osm_table_chunk('ht_try', 'child_fdw_table'); +-- must also update the range since the created chunk is assumed to be empty, +-- and its range actually updated when data is moved to OSM. But in this mock +-- test case, the attached OSM chunk contains data +SELECT _timescaledb_functions.hypertable_osm_range_update('ht_try', '2020-01-01'::timestamptz, '2020-01-02'); -- OSM chunk is not visible in chunks view SELECT chunk_name, range_start, range_end @@ -355,6 +365,37 @@ SELECT * from ht_try WHERE timec > '2000-01-01 01:00' and timec < '2022-01-01 0 SELECT * from ht_try WHERE timec > '2020-01-01 01:00' ORDER BY 1; +-- test ordered append +BEGIN; +-- before updating the ranges +EXPLAIN SELECT * FROM ht_try ORDER BY 1; +-- range before update +SELECT cc.chunk_id, c.table_name, c.status, c.osm_chunk, cc.dimension_slice_id, ds.range_start, ds.range_end +FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.chunk_constraint cc, _timescaledb_catalog.dimension_slice ds +WHERE c.table_name = 'child_fdw_table' AND cc.chunk_id = c.id AND ds.id = cc.dimension_slice_id; + +SELECT _timescaledb_functions.hypertable_osm_range_update('ht_try', '2020-01-01 01:00'::timestamptz, '2020-01-02'); +SELECT id, schema_name, table_name, status FROM _timescaledb_catalog.hypertable WHERE table_name = 'ht_try'; +-- verify range was updated +SELECT cc.chunk_id, c.table_name, c.status, c.osm_chunk, cc.dimension_slice_id, ds.range_start, ds.range_end +FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.chunk_constraint cc, _timescaledb_catalog.dimension_slice ds +WHERE c.table_name = 'child_fdw_table' AND cc.chunk_id = c.id AND ds.id = cc.dimension_slice_id; +-- should be ordered append now +EXPLAIN SELECT * FROM ht_try ORDER BY 1; +SELECT * FROM ht_try ORDER BY 1; +-- test invalid range - should not be ordered append +SELECT _timescaledb_functions.hypertable_osm_range_update('ht_try'); +EXPLAIN SELECT * from ht_try ORDER BY 1; +SELECT * from ht_try ORDER BY 1; +ROLLBACK; + +\set ON_ERROR_STOP 0 +-- test that error is produced when range_start < range_end +SELECT _timescaledb_functions.hypertable_osm_range_update('ht_try', '2020-01-02 01:00'::timestamptz, '2020-01-02 00:00'); +-- error when range overlaps +SELECT _timescaledb_functions.hypertable_osm_range_update('ht_try', '2022-05-05 01:00'::timestamptz, '2022-05-06'); +\set ON_ERROR_STOP 1 + --TEST GUC variable to enable/disable OSM chunk SET timescaledb.enable_tiered_reads=false; EXPLAIN (COSTS OFF) SELECT * from ht_try; @@ -452,6 +493,8 @@ CREATE FOREIGN TABLE child_hyper_constr --check constraints are automatically added for the foreign table SELECT _timescaledb_functions.attach_osm_table_chunk('hyper_constr', 'child_hyper_constr'); +-- was attached with data, so must update the range +SELECT _timescaledb_functions.hypertable_osm_range_update('hyper_constr', 100, 110); SELECT table_name, status, osm_chunk FROM _timescaledb_catalog.chunk @@ -465,19 +508,6 @@ SELECT * FROM hyper_constr order by time; SELECT conname FROM pg_constraint where conrelid = 'child_hyper_constr'::regclass ORDER BY 1; ---TEST policy is not applied on OSM chunk -CREATE OR REPLACE FUNCTION dummy_now_smallint() RETURNS BIGINT LANGUAGE SQL IMMUTABLE as 'SELECT 500::bigint' ; - -SELECT set_integer_now_func('hyper_constr', 'dummy_now_smallint'); -SELECT add_retention_policy('hyper_constr', 100::int) AS deljob_id \gset - -CALL run_job(:deljob_id); -CALL run_job(:deljob_id); -SELECT chunk_name, range_start, range_end -FROM chunk_view -WHERE hypertable_name = 'hyper_constr' -ORDER BY chunk_name; - ----- TESTS for copy into frozen chunk ------------ \c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER CREATE TABLE test1.copy_test ( @@ -555,6 +585,20 @@ CREATE INDEX hyper_constr_mid_idx ON hyper_constr(mid, time) WITH (timescaledb.t SELECT indexname, tablename FROM pg_indexes WHERE indexname = 'hyper_constr_mid_idx'; DROP INDEX hyper_constr_mid_idx; +--TEST policy is applied on OSM chunk +-- XXX this is to be updated once the hook for dropping chunks is added +CREATE OR REPLACE FUNCTION dummy_now_smallint() RETURNS BIGINT LANGUAGE SQL IMMUTABLE as 'SELECT 500::bigint' ; + +SELECT set_integer_now_func('hyper_constr', 'dummy_now_smallint'); +SELECT add_retention_policy('hyper_constr', 100::int) AS deljob_id \gset + +CALL run_job(:deljob_id); +CALL run_job(:deljob_id); +SELECT chunk_name, range_start, range_end +FROM chunk_view +WHERE hypertable_name = 'hyper_constr' +ORDER BY chunk_name; + -- test range of dimension slice for osm chunk for different datatypes CREATE TABLE osm_int2(time int2 NOT NULL); CREATE TABLE osm_int4(time int4 NOT NULL); @@ -585,7 +629,96 @@ INNER JOIN _timescaledb_catalog.dimension d ON d.id=ds.dimension_id INNER JOIN _timescaledb_catalog.hypertable ht on ht.id=d.hypertable_id WHERE ht.table_name LIKE 'osm%' ORDER BY 2,3; +-- test that correct slice is found and updated for table with multiple chunk constraints +CREATE TABLE test_multicon(time timestamptz not null unique, a int); +SELECT hypertable_id as htid FROM create_hypertable('test_multicon', 'time', chunk_time_interval => interval '1 day') \gset +insert into test_multicon values ('2020-01-02 01:00'::timestamptz, 1); +SELECT * FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.chunk_constraint cc WHERE c.hypertable_id = :htid +AND c.id = cc.chunk_id; +\c :TEST_DBNAME :ROLE_SUPERUSER ; +UPDATE _timescaledb_catalog.chunk SET osm_chunk = true WHERE hypertable_id = :htid; +\c :TEST_DBNAME :ROLE_4; +SELECT _timescaledb_functions.hypertable_osm_range_update('test_multicon', '2020-01-02 01:00'::timestamptz, '2020-01-04 01:00'); +-- view udpated range +SELECT cc.chunk_id, c.table_name, c.status, c.osm_chunk, cc.dimension_slice_id, ds.range_start, ds.range_end +FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.chunk_constraint cc, _timescaledb_catalog.dimension_slice ds +WHERE c.hypertable_id = :htid AND cc.chunk_id = c.id AND ds.id = cc.dimension_slice_id; +-- check that range was reset to default - infinity +\set ON_ERROR_STOP 0 +-- both range_start and range_end must be NULL, or non-NULL +SELECT _timescaledb_functions.hypertable_osm_range_update('test_multicon', NULL, '2020-01-04 01:00'::timestamptz); +SELECT _timescaledb_functions.hypertable_osm_range_update('test_multicon', NULL, NULL); +SELECT _timescaledb_functions.hypertable_osm_range_update('test_multicon'); +\set ON_ERROR_STOP 1 +SELECT _timescaledb_functions.hypertable_osm_range_update('test_multicon', NULL::timestamptz, NULL); +SELECT cc.chunk_id, c.table_name, c.status, c.osm_chunk, cc.dimension_slice_id, ds.range_start, ds.range_end +FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.chunk_constraint cc, _timescaledb_catalog.dimension_slice ds +WHERE c.hypertable_id = :htid AND cc.chunk_id = c.id AND ds.id = cc.dimension_slice_id; + +-- test further with ordered append +\c postgres_fdw_db :ROLE_4; +CREATE TABLE test_chunkapp_fdw (time timestamptz NOT NULL, a int); +INSERT INTO test_chunkapp_fdw (time, a) VALUES ('2020-01-03 02:00'::timestamptz, 3); +\c :TEST_DBNAME :ROLE_4 +CREATE TABLE test_chunkapp(time timestamptz NOT NULL, a int); +SELECT hypertable_id as htid FROM create_hypertable('test_chunkapp', 'time', chunk_time_interval => interval '1day') \gset +INSERT INTO test_chunkapp (time, a) VALUES ('2020-01-01 01:00'::timestamptz, 1), ('2020-01-02 01:00'::timestamptz, 2); + +CREATE FOREIGN TABLE test_chunkapp_fdw_child(time timestamptz NOT NULL, a int) SERVER s3_server OPTIONS (schema_name 'public', table_name 'test_chunkapp_fdw');; +SELECT _timescaledb_functions.attach_osm_table_chunk('test_chunkapp','test_chunkapp_fdw_child'); +-- view range before update +SELECT cc.chunk_id, c.table_name, c.status, c.osm_chunk, cc.dimension_slice_id, ds.range_start, ds.range_end +FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.chunk_constraint cc, _timescaledb_catalog.dimension_slice ds +WHERE c.hypertable_id = :htid AND cc.chunk_id = c.id AND ds.id = cc.dimension_slice_id; +-- attempt to update overlapping range, should fail +\set ON_ERROR_STOP 0 +SELECT _timescaledb_functions.hypertable_osm_range_update('test_chunkapp', '2020-01-02 01:00'::timestamptz, '2020-01-04 01:00'); +\set ON_ERROR_STOP 1 +-- update actual range of OSM chunk, should work +SELECT _timescaledb_functions.hypertable_osm_range_update('test_chunkapp', '2020-01-03 00:00'::timestamptz, '2020-01-04 00:00'); +-- view udpated range +SELECT cc.chunk_id, c.table_name, c.status, c.osm_chunk, cc.dimension_slice_id, ds.range_start, ds.range_end +FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.chunk_constraint cc, _timescaledb_catalog.dimension_slice ds +WHERE c.hypertable_id = :htid AND cc.chunk_id = c.id AND ds.id = cc.dimension_slice_id; +-- ordered append should be possible as ranges do not overlap +EXPLAIN SELECT * FROM test_chunkapp ORDER BY 1; +SELECT * FROM test_chunkapp ORDER BY 1; +-- but, insert should not be possible +SELECT ts_setup_osm_hook(); +\set ON_ERROR_STOP 0 +INSERT INTO test_chunkapp VALUES ('2020-01-03 02:00'::timestamptz, 3); +\set ON_ERROR_STOP 1 +SELECT ts_undo_osm_hook(); +-- reset range to infinity +SELECT _timescaledb_functions.hypertable_osm_range_update('test_chunkapp',empty:=false); +-- ordered append not possible because range is invalid and empty was not specified +EXPLAIN SELECT * FROM test_chunkapp ORDER BY 1; +SELECT * FROM test_chunkapp ORDER BY 1; +SELECT cc.chunk_id, c.table_name, c.status, c.osm_chunk, cc.dimension_slice_id, ds.range_start, ds.range_end +FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.chunk_constraint cc, _timescaledb_catalog.dimension_slice ds +WHERE c.hypertable_id = :htid AND cc.chunk_id = c.id AND ds.id = cc.dimension_slice_id; +-- now set empty to true, should ordered append +\c postgres_fdw_db :ROLE_4; +DELETE FROM test_chunkapp_fdw; +\c :TEST_DBNAME :ROLE_4; +SELECT _timescaledb_functions.hypertable_osm_range_update('test_chunkapp', NULL::timestamptz, NULL, empty => true); +EXPLAIN SELECT * FROM test_chunkapp ORDER BY 1; +SELECT * FROM test_chunkapp ORDER BY 1; + +-- test error is triggered when time dimension not found +CREATE TABLE test2(time timestamptz not null, a int); +SELECT create_hypertable('test2', 'time'); +INSERT INTO test2 VALUES ('2020-01-01'::timestamptz, 1); +ALTER TABLE test2 SET (timescaledb.compress); +SELECT compress_chunk(show_chunks('test2')); +-- find internal compression table, call API function on it +SELECT format('%I.%I', cht.schema_name, cht.table_name) AS "COMPRESSION_TBLNM" +FROM _timescaledb_catalog.hypertable ht, _timescaledb_catalog.hypertable cht +WHERE ht.table_name = 'test2' and cht.id = ht.compressed_hypertable_id \gset +\set ON_ERROR_STOP 0 +SELECT _timescaledb_functions.hypertable_osm_range_update(:'COMPRESSION_TBLNM'::regclass, '2020-01-01'::timestamptz); +\set ON_ERROR_STOP 1 -- clean up databases created \c :TEST_DBNAME :ROLE_SUPERUSER DROP DATABASE postgres_fdw_db;