Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/3269-docs-rfc-update-the-readme-…
Browse files Browse the repository at this point in the history
…in-the-timescaledb-github-repo-to-match-the-pgai-docs' into 3269-docs-rfc-update-the-readme-in-the-timescaledb-github-repo-to-match-the-pgai-docs
  • Loading branch information
atovpeko committed Nov 28, 2024
2 parents 35b26ca + 50e64b2 commit 932c4cd
Show file tree
Hide file tree
Showing 64 changed files with 2,509 additions and 433 deletions.
8 changes: 4 additions & 4 deletions .github/ci_settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,19 +12,19 @@
#

PG14_EARLIEST = "14.0"
PG14_LATEST = "14.13"
PG14_LATEST = "14.15"
PG14_ABI_MIN = "14.0"

PG15_EARLIEST = "15.0"
PG15_LATEST = "15.8"
PG15_LATEST = "15.10"
PG15_ABI_MIN = "15.0"

PG16_EARLIEST = "16.0"
PG16_LATEST = "16.4"
PG16_LATEST = "16.6"
PG16_ABI_MIN = "16.0"

PG17_EARLIEST = "17.0"
PG17_LATEST = "17.0"
PG17_LATEST = "17.2"
PG17_ABI_MIN = "17.0"

PG_LATEST = [PG14_LATEST, PG15_LATEST, PG16_LATEST, PG17_LATEST]
5 changes: 4 additions & 1 deletion .github/workflows/pr-approvals.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,10 @@ jobs:
# Get the list of modified files in this pull request
echo "Modified files: "
gh pr view $PR_NUMBER --json files
files=$(gh pr view $PR_NUMBER --json files --jq '.files.[].path | select(startswith(".github") | not)')
# Get modified files, but exclude those that are workflow
# files or are related to Hypercore table access
# method. These require only a single reviewer.
files=$(gh pr view $PR_NUMBER --json files --jq '.files.[].path | select(startswith(".github") or test("hypercore|columnar_scan") | not)')
# Get the number of approvals in this pull request
echo "Reviews: "
Expand Down
33 changes: 25 additions & 8 deletions .github/workflows/windows-build-and-test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,10 @@ jobs:
runs-on: ubuntu-latest
outputs:
build_type: ${{ steps.build_type.outputs.build_type }}
pg14_latest: ${{ steps.config.outputs.pg14_latest }}
pg15_latest: ${{ steps.config.outputs.pg15_latest }}
pg16_latest: ${{ steps.config.outputs.pg16_latest }}
pg17_latest: ${{ steps.config.outputs.pg17_latest }}

steps:
- name: Checkout source code
Expand Down Expand Up @@ -56,6 +60,15 @@ jobs:
tsl_ignores: ["compression_algos"]
tsl_skips: ["bgw_db_scheduler bgw_db_scheduler_fixed"]
pg_config: ["-cfsync=off -cstatement_timeout=60s"]
include:
- pg: 14
pg_version: ${{ needs.config.outputs.pg14_latest }}
- pg: 15
pg_version: ${{ needs.config.outputs.pg15_latest }}
- pg: 16
pg_version: ${{ needs.config.outputs.pg16_latest }}
- pg: 17
pg_version: ${{ needs.config.outputs.pg17_latest }}
env:
# PostgreSQL configuration
PGPORT: 55432
Expand Down Expand Up @@ -115,18 +128,22 @@ jobs:

# Force install PostgreSQL 17 since the package still on moderation
# https://community.chocolatey.org/packages/postgresql17
- name: Install PostgreSQL ${{ matrix.pg }} (using ${{ matrix.pg_version }})
if: github.event_name != 'schedule' && steps.cache-postgresql.outputs.cache-hit != 'true'
run: |
choco feature disable --name=usePackageExitCodes
choco feature disable --name=showDownloadProgress
choco install postgresql${{ matrix.pg }} --version ${{ matrix.pg_version }} `
--force -y --install-args="'--prefix $HOME\PostgreSQL\${{ matrix.pg }} --extract-only yes'"
# This is for nightly builds. Here we pick the latest version of the package.
- name: Install PostgreSQL ${{ matrix.pg }}
if: steps.cache-postgresql.outputs.cache-hit != 'true'
if: github.event_name == 'schedule' && steps.cache-postgresql.outputs.cache-hit != 'true'
run: |
choco feature disable --name=usePackageExitCodes
choco feature disable --name=showDownloadProgress
if(${{ matrix.pg }} -eq 17) {
choco install postgresql${{ matrix.pg }} --version 17.0.0 `
--force -y --install-args="'--prefix $HOME\PostgreSQL\${{ matrix.pg }} --extract-only yes'"
} else {
choco install postgresql${{ matrix.pg }} `
--force -y --install-args="'--prefix $HOME\PostgreSQL\${{ matrix.pg }} --extract-only yes'"
}
choco install postgresql${{ matrix.pg }} `
--force -y --install-args="'--prefix $HOME\PostgreSQL\${{ matrix.pg }} --extract-only yes'"
- name: Configure TimescaleDB
run: cmake -B build_win -DCMAKE_BUILD_TYPE=${{ matrix.build_type }} `
Expand Down
2 changes: 1 addition & 1 deletion .unreleased/pr_7104
Original file line number Diff line number Diff line change
@@ -1 +1 @@
Implements: #7271 Hypercore table access method
Implements: #7104 Hypercore table access method
1 change: 1 addition & 0 deletions .unreleased/pr_7455
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Implements: #7455: Support DROP NOT NULL on compressed hypertables
1 change: 1 addition & 0 deletions .unreleased/pr_7486
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Implements: #7486 Prevent building against postgres versions with broken ABI
1 change: 1 addition & 0 deletions .unreleased/pr_7488
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Fixes: #7488 Emit error for transition table trigger on chunks
2 changes: 2 additions & 0 deletions .unreleased/resolve-vars
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
Fixes: #7410 "aggregated compressed column not found" error on aggregation query.
Thanks: @uasiddiqi for reporting the "aggregated compressed column not found" error.
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -197,3 +197,4 @@ Timescale is PostgreSQL made powerful. To learn more about the company and its p
[postgres-breaking-change]: https://www.postgresql.org/about/news/postgresql-172-166-1510-1415-1318-and-1222-released-2965/
17 changes: 13 additions & 4 deletions src/compat/compat.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,10 +26,19 @@

#define PG_MAJOR_MIN 14

#define is_supported_pg_version_14(version) ((version >= 140000) && (version < 150000))
#define is_supported_pg_version_15(version) ((version >= 150000) && (version < 160000))
#define is_supported_pg_version_16(version) ((version >= 160000) && (version < 170000))
#define is_supported_pg_version_17(version) ((version >= 170000) && (version < 180000))
/*
* Prevent building against upstream versions that had ABI breaking change (14.14, 15.9, 16.5, 17.1)
* that was reverted in the following release.
*/

#define is_supported_pg_version_14(version) \
((version >= 140000) && (version < 150000) && (version != 140014))
#define is_supported_pg_version_15(version) \
((version >= 150000) && (version < 160000) && (version != 150009))
#define is_supported_pg_version_16(version) \
((version >= 160000) && (version < 170000) && (version != 160005))
#define is_supported_pg_version_17(version) \
((version >= 170000) && (version < 180000) && (version != 170001))

/*
* PG16 support is a WIP and not complete yet.
Expand Down
23 changes: 23 additions & 0 deletions src/guc.c
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,12 @@ static const struct config_enum_entry transparent_decompression_options[] = {
{ NULL, 0, false }
};

static const struct config_enum_entry hypercore_copy_to_options[] = {
{ "all_data", HYPERCORE_COPY_ALL_DATA, false },
{ "no_compressed_data", HYPERCORE_COPY_NO_COMPRESSED_DATA, false },
{ NULL, 0, false }
};

bool ts_guc_enable_deprecation_warnings = true;
bool ts_guc_enable_optimizations = true;
bool ts_guc_restoring = false;
Expand Down Expand Up @@ -156,6 +162,8 @@ bool ts_guc_enable_tss_callbacks = true;
TSDLLEXPORT bool ts_guc_enable_delete_after_compression = false;
TSDLLEXPORT bool ts_guc_enable_merge_on_cagg_refresh = false;
TSDLLEXPORT char *ts_guc_hypercore_indexam_whitelist;
TSDLLEXPORT HypercoreCopyToBehavior ts_guc_hypercore_copy_to_behavior =
HYPERCORE_COPY_NO_COMPRESSED_DATA;

/* default value of ts_guc_max_open_chunks_per_insert and
* ts_guc_max_cached_chunks_per_hypertable will be set as their respective boot-value when the
Expand All @@ -172,6 +180,7 @@ char *ts_last_tune_time = NULL;
char *ts_last_tune_version = NULL;

bool ts_guc_debug_require_batch_sorted_merge = false;

bool ts_guc_debug_allow_cagg_with_deprecated_funcs = false;

#ifdef TS_DEBUG
Expand Down Expand Up @@ -1032,6 +1041,20 @@ _guc_init(void)
/* assign_hook= */ NULL,
/* show_hook= */ NULL);

DefineCustomEnumVariable(MAKE_EXTOPTION("hypercore_copy_to_behavior"),
"The behavior of COPY TO on a hypercore table",
"Set to 'all_data' to return both compressed and uncompressed data "
"via the Hypercore table when using COPY TO. Set to "
"'no_compressed_data' to skip compressed data.",
/* valueAddr= */ (int *) &ts_guc_hypercore_copy_to_behavior,
/* bootValue= */ HYPERCORE_COPY_NO_COMPRESSED_DATA,
/* options= */ hypercore_copy_to_options,
/* context= */ PGC_USERSET,
0,
NULL,
NULL,
NULL);

#ifdef TS_DEBUG
DefineCustomBoolVariable(/* name= */ MAKE_EXTOPTION("shutdown_bgw_scheduler"),
/* short_desc= */ "immediately shutdown the bgw scheduler",
Expand Down
19 changes: 19 additions & 0 deletions src/guc.h
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,25 @@ extern TSDLLEXPORT bool ts_guc_debug_require_batch_sorted_merge;
extern TSDLLEXPORT bool ts_guc_debug_allow_cagg_with_deprecated_funcs;
extern TSDLLEXPORT char *ts_guc_hypercore_indexam_whitelist;

/*
* Defines the behavior of COPY TO when used on a Hypercore table.
*
* If set to COPY_ALL_DATA, all data is copied from a Hypercore table,
* including compressed data (but in uncompressed form) from the internal
* compressed relation. When doing a COPY TO on the internal compressed
* relation, no data is returned.
*
* If set to COPY_NO_COMPRESSED_DATA, then only uncompressed data is copied
* (if any). This behavior is compatible with compression without hypercore.
*/
typedef enum HypercoreCopyToBehavior
{
HYPERCORE_COPY_NO_COMPRESSED_DATA,
HYPERCORE_COPY_ALL_DATA,
} HypercoreCopyToBehavior;

extern TSDLLEXPORT HypercoreCopyToBehavior ts_guc_hypercore_copy_to_behavior;

void _guc_init(void);

typedef enum
Expand Down
64 changes: 56 additions & 8 deletions src/process_utility.c
Original file line number Diff line number Diff line change
Expand Up @@ -280,6 +280,8 @@ check_alter_table_allowed_on_ht_with_compression(Hypertable *ht, AlterTableStmt
case AT_ReplicaIdentity:
case AT_ReAddStatistics:
case AT_SetCompression:
case AT_DropNotNull:
case AT_SetNotNull:
#if PG15_GE
case AT_SetAccessMethod:
#endif
Expand Down Expand Up @@ -2562,11 +2564,50 @@ process_altertable_validate_constraint_end(Hypertable *ht, AlterTableCmd *cmd)
foreach_chunk(ht, validate_hypertable_constraint, cmd);
}

/*
* Validate that SET NOT NULL is ok for this chunk.
*
* Throws an error if SET NOT NULL on this chunk is not allowed, right now,
* SET NOT NULL is allowed on chunks that are either a fully decompressed, or
* are using the Hypercore table access method.
*/
static void
process_altertable_drop_not_null(Hypertable *ht, AlterTableCmd *cmd)
validate_set_not_null(Hypertable *ht, Oid chunk_relid, void *arg)
{
Chunk *chunk = ts_chunk_get_by_relid(chunk_relid, true);
if (ts_chunk_is_compressed(chunk) && !ts_is_hypercore_am(chunk->amoid))
{
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("operation not supported on compressed chunks not using the "
"\"hypercore\" table access method"),
errdetail("Chunk %s.%s is using the heap table access method and has compressed "
"data.",
NameStr(chunk->fd.schema_name),
NameStr(chunk->fd.table_name)),
errhint("Either decompress all chunks of the hypertable or use \"ALTER TABLE "
"%s.%s SET ACCESS METHOD hypercore\" on all chunks to change access "
"method.",
NameStr(chunk->fd.schema_name),
NameStr(chunk->fd.table_name))));
}
}

/*
* This function checks that we are not dropping NOT NULL from bad columns and
* that all chunks support the modification.
*/
static void
process_altertable_alter_not_null_start(Hypertable *ht, AlterTableCmd *cmd)
{
int i;

if (cmd->subtype == AT_SetNotNull)
foreach_chunk(ht, validate_set_not_null, cmd);

if (cmd->subtype != AT_DropNotNull)
return;

for (i = 0; i < ht->space->num_dimensions; i++)
{
Dimension *dim = &ht->space->dimensions[i];
Expand Down Expand Up @@ -3802,9 +3843,10 @@ process_altertable_start_table(ProcessUtilityArgs *args)
verify_constraint_hypertable(ht, cmd->def);
}
break;
case AT_SetNotNull:
case AT_DropNotNull:
if (ht != NULL)
process_altertable_drop_not_null(ht, cmd);
process_altertable_alter_not_null_start(ht, cmd);
break;
case AT_AddColumn:
#if PG16_LT
Expand Down Expand Up @@ -4186,6 +4228,8 @@ process_altertable_end_subcmd(Hypertable *ht, Node *parsetree, ObjectAddress *ob
case AT_DropCluster:
foreach_chunk(ht, process_altertable_chunk, cmd);
break;
case AT_SetNotNull:
case AT_DropNotNull:
case AT_SetRelOptions:
case AT_ResetRelOptions:
case AT_ReplaceRelOptions:
Expand All @@ -4212,8 +4256,6 @@ process_altertable_end_subcmd(Hypertable *ht, Node *parsetree, ObjectAddress *ob
case AT_SetStorage:
case AT_ColumnDefault:
case AT_CookedColumnDefault:
case AT_SetNotNull:
case AT_DropNotNull:
case AT_AddOf:
case AT_DropOf:
case AT_AddIdentity:
Expand Down Expand Up @@ -4410,6 +4452,12 @@ process_create_trigger_start(ProcessUtilityArgs *args)
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("triggers are not supported on continuous aggregate")));

if (stmt->transitionRels)
if (ts_chunk_get_by_relid(relid, false) != NULL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg(
"trigger with transition tables not supported on hypertable chunks")));
return DDL_CONTINUE;
}

Expand Down Expand Up @@ -4493,8 +4541,8 @@ process_reassign_owned_start(ProcessUtilityArgs *args)
Oid newrole_oid = get_rolespec_oid(stmt->newrole, false);
HeapTuple tuple = ts_scanner_fetch_heap_tuple(ti, false, &should_free);

/* We do not need to check privileges here since ReassignOwnedObjects() will check the
* privileges and error out if they are not correct. */
/* We do not need to check privileges here since ReassignOwnedObjects() will check
* the privileges and error out if they are not correct. */
ts_bgw_job_update_owner(ti->scanrel, tuple, ts_scanner_get_tupledesc(ti), newrole_oid);

if (should_free)
Expand Down Expand Up @@ -4630,8 +4678,8 @@ process_create_stmt(ProcessUtilityArgs *args)
errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("hypercore access method not supported on \"%s\"", stmt->relation->relname),
errdetail("The hypercore access method is only supported for hypertables."),
errhint("It does not make sense to set the default access method for all tables "
"to \"%s\" since it is only supported for hypertables.",
errhint("It does not make sense to set the default access method for all "
"tables to \"%s\" since it is only supported for hypertables.",
TS_HYPERCORE_TAM_NAME));

return DDL_CONTINUE;
Expand Down
8 changes: 6 additions & 2 deletions src/utils.c
Original file line number Diff line number Diff line change
Expand Up @@ -1865,9 +1865,12 @@ relation_set_reloption_impl(Relation rel, List *options, LOCKMODE lockmode)

Relation pgclass = table_open(RelationRelationId, RowExclusiveLock);
Oid relid = RelationGetRelid(rel);
HeapTuple tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
HeapTuple tuple = SearchSysCacheLockedCopy1(RELOID, ObjectIdGetDatum(relid));
if (!HeapTupleIsValid(tuple))
elog(ERROR, "cache lookup failed for relation %u", relid);
#ifdef SYSCACHE_TUPLE_LOCK_NEEDED
ItemPointerData otid = tuple->t_self;
#endif

/* Get the old reloptions */
Datum datum = SysCacheGetAttr(RELOID, tuple, Anum_pg_class_reloptions, &isnull);
Expand All @@ -1892,8 +1895,9 @@ relation_set_reloption_impl(Relation rel, List *options, LOCKMODE lockmode)
/* Not sure if we need this one, but keeping it as a precaution */
InvokeObjectPostAlterHook(RelationRelationId, RelationGetRelid(rel), 0);

UnlockSysCacheTuple(pgclass, &otid);
heap_freetuple(newtuple);
ReleaseSysCache(tuple);
heap_freetuple(tuple);
table_close(pgclass, RowExclusiveLock);
}

Expand Down
9 changes: 9 additions & 0 deletions test/expected/triggers.out
Original file line number Diff line number Diff line change
Expand Up @@ -428,6 +428,9 @@ SELECT create_hypertable('transition_test','time');
(4,public,transition_test,t)
(1 row)

-- Insert some rows to create a chunk
INSERT INTO transition_test values ('2020-01-10');
SELECT chunk FROM show_chunks('transition_test') tbl(chunk) limit 1 \gset
-- test creating trigger with transition tables on existing hypertable
\set ON_ERROR_STOP 0
CREATE TRIGGER t2 AFTER INSERT ON transition_test REFERENCING NEW TABLE AS new_trans FOR EACH STATEMENT EXECUTE FUNCTION test_trigger();
Expand All @@ -436,6 +439,12 @@ CREATE TRIGGER t3 AFTER UPDATE ON transition_test REFERENCING NEW TABLE AS new_t
ERROR: trigger with transition tables not supported on hypertables
CREATE TRIGGER t4 AFTER DELETE ON transition_test REFERENCING OLD TABLE AS old_trans FOR EACH STATEMENT EXECUTE FUNCTION test_trigger();
ERROR: trigger with transition tables not supported on hypertables
CREATE TRIGGER t2 AFTER INSERT ON :chunk REFERENCING NEW TABLE AS new_trans FOR EACH STATEMENT EXECUTE FUNCTION test_trigger();
ERROR: trigger with transition tables not supported on hypertable chunks
CREATE TRIGGER t3 AFTER UPDATE ON :chunk REFERENCING NEW TABLE AS new_trans OLD TABLE AS old_trans FOR EACH STATEMENT EXECUTE FUNCTION test_trigger();
ERROR: trigger with transition tables not supported on hypertable chunks
CREATE TRIGGER t4 AFTER DELETE ON :chunk REFERENCING OLD TABLE AS old_trans FOR EACH STATEMENT EXECUTE FUNCTION test_trigger();
ERROR: trigger with transition tables not supported on hypertable chunks
CREATE TRIGGER t2 AFTER INSERT ON transition_test REFERENCING NEW TABLE AS new_trans FOR EACH ROW EXECUTE FUNCTION test_trigger();
ERROR: trigger with transition tables not supported on hypertables
CREATE TRIGGER t3 AFTER UPDATE ON transition_test REFERENCING NEW TABLE AS new_trans OLD TABLE AS old_trans FOR EACH ROW EXECUTE FUNCTION test_trigger();
Expand Down
Loading

0 comments on commit 932c4cd

Please sign in to comment.