Skip to content

Commit

Permalink
Merge remote-tracking branch 'akuzm/vector-filter' into HEAD
Browse files Browse the repository at this point in the history
  • Loading branch information
akuzm committed Dec 2, 2024
2 parents 5243d25 + b717f74 commit 56df16a
Show file tree
Hide file tree
Showing 33 changed files with 1,319 additions and 62 deletions.
37 changes: 6 additions & 31 deletions .github/workflows/abi.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -44,43 +44,18 @@ jobs:
strategy:
fail-fast: false
matrix:
dir: [ "forward", "backward" ]
pg: [ 14, 15, 16, 17 ]
include:
- dir: backward
pg: 14
builder: ${{ fromJson(needs.config.outputs.pg14_latest) }}-alpine3.19
tester: ${{ fromJson(needs.config.outputs.pg14_abi_min) }}-alpine
ignores: memoize
- dir: forward
pg: 14
- pg: 14
builder: ${{ fromJson(needs.config.outputs.pg14_abi_min) }}-alpine
tester: ${{ fromJson(needs.config.outputs.pg14_latest) }}-alpine3.19
- dir: backward
pg: 15
builder: ${{ fromJson(needs.config.outputs.pg15_latest) }}-alpine3.19
tester: ${{ fromJson(needs.config.outputs.pg15_abi_min) }}-alpine
- dir: forward
pg: 15
- pg: 15
builder: ${{ fromJson(needs.config.outputs.pg15_abi_min) }}-alpine
tester: ${{ fromJson(needs.config.outputs.pg15_latest) }}-alpine3.19
- dir: backward
pg: 16
builder: ${{ fromJson(needs.config.outputs.pg16_latest) }}-alpine3.19
tester: ${{ fromJson(needs.config.outputs.pg16_abi_min) }}-alpine
# this test has issues with 16.0 version of pg_dump binary
# which affects backwards test only
ignores: pg_dump_unprivileged
- dir: forward
pg: 16
- pg: 16
builder: ${{ fromJson(needs.config.outputs.pg16_abi_min) }}-alpine
tester: ${{ fromJson(needs.config.outputs.pg16_latest) }}-alpine3.19
- dir: backward
pg: 17
builder: ${{ fromJson(needs.config.outputs.pg17_latest) }}-alpine3.19
tester: ${{ fromJson(needs.config.outputs.pg17_abi_min) }}-alpine
- dir: forward
pg: 17
- pg: 17
builder: ${{ fromJson(needs.config.outputs.pg17_abi_min) }}-alpine
tester: ${{ fromJson(needs.config.outputs.pg17_latest) }}-alpine3.19

Expand All @@ -89,7 +64,7 @@ jobs:
- name: Checkout TimescaleDB
uses: actions/checkout@v4

- name: Build extension
- name: Build extension with ${{ matrix.builder }}
run: |
BUILDER_IMAGE="postgres:${{matrix.builder}}"
Expand All @@ -113,7 +88,7 @@ jobs:
cp `pg_config --pkglibdir`/timescaledb*.so build_abi/install_lib
EOF
- name: Run tests
- name: Run tests on server ${{ matrix.tester }}
run: |
TEST_IMAGE="postgres:${{ matrix.tester }}"
Expand Down
5 changes: 4 additions & 1 deletion .github/workflows/pr-approvals.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,10 @@ jobs:
# Get the list of modified files in this pull request
echo "Modified files: "
gh pr view $PR_NUMBER --json files
files=$(gh pr view $PR_NUMBER --json files --jq '.files.[].path | select(startswith(".github") | not)')
# Get modified files, but exclude those that are workflow
# files or are related to Hypercore table access
# method. These require only a single reviewer.
files=$(gh pr view $PR_NUMBER --json files --jq '.files.[].path | select(startswith(".github") or test("hypercore|columnar_scan") | not)')
# Get the number of approvals in this pull request
echo "Reviews: "
Expand Down
12 changes: 8 additions & 4 deletions .github/workflows/windows-build-and-test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,10 @@ jobs:
runs-on: ubuntu-latest
outputs:
build_type: ${{ steps.build_type.outputs.build_type }}
pg14_latest: ${{ steps.config.outputs.pg14_latest }}
pg15_latest: ${{ steps.config.outputs.pg15_latest }}
pg16_latest: ${{ steps.config.outputs.pg16_latest }}
pg17_latest: ${{ steps.config.outputs.pg17_latest }}

steps:
- name: Checkout source code
Expand Down Expand Up @@ -58,13 +62,13 @@ jobs:
pg_config: ["-cfsync=off -cstatement_timeout=60s"]
include:
- pg: 14
pg_version: 14.13
pg_version: ${{ needs.config.outputs.pg14_latest }}
- pg: 15
pg_version: 15.8
pg_version: ${{ needs.config.outputs.pg15_latest }}
- pg: 16
pg_version: 16.4
pg_version: ${{ needs.config.outputs.pg16_latest }}
- pg: 17
pg_version: 17.0
pg_version: ${{ needs.config.outputs.pg17_latest }}
env:
# PostgreSQL configuration
PGPORT: 55432
Expand Down
1 change: 1 addition & 0 deletions .unreleased/pr_7486
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Implements: #7486 Prevent building against postgres versions with broken ABI
1 change: 1 addition & 0 deletions .unreleased/pr_7488
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Fixes: #7488 Emit error for transition table trigger on chunks
1 change: 1 addition & 0 deletions .unreleased/vectorized-agg-filter
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Implements: #7458 Support vecorized aggregation with aggregate FILTER clauses that are also vectorizable
3 changes: 1 addition & 2 deletions sql/updates/post-update.sql
Original file line number Diff line number Diff line change
Expand Up @@ -144,10 +144,9 @@ BEGIN
format('%I.%I', user_view_schema, user_view_name)
FROM _timescaledb_catalog.continuous_agg
WHERE finalized IS FALSE
AND current_setting('server_version_num')::int >= 150000
ORDER BY 1
LOOP
RAISE WARNING 'Continuous Aggregate: % with old format will not be supported on PostgreSQL version greater or equal to 15. You should upgrade to the new format', cagg_name;
RAISE WARNING 'Continuous Aggregate "%" with old format will not be supported in the next version. You should use `cagg_migrate` procedure to migrate to the new format.', cagg_name;
END LOOP;
END $$;

Expand Down
17 changes: 13 additions & 4 deletions src/compat/compat.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,10 +26,19 @@

#define PG_MAJOR_MIN 14

#define is_supported_pg_version_14(version) ((version >= 140000) && (version < 150000))
#define is_supported_pg_version_15(version) ((version >= 150000) && (version < 160000))
#define is_supported_pg_version_16(version) ((version >= 160000) && (version < 170000))
#define is_supported_pg_version_17(version) ((version >= 170000) && (version < 180000))
/*
* Prevent building against upstream versions that had ABI breaking change (14.14, 15.9, 16.5, 17.1)
* that was reverted in the following release.
*/

#define is_supported_pg_version_14(version) \
((version >= 140000) && (version < 150000) && (version != 140014))
#define is_supported_pg_version_15(version) \
((version >= 150000) && (version < 160000) && (version != 150009))
#define is_supported_pg_version_16(version) \
((version >= 160000) && (version < 170000) && (version != 160005))
#define is_supported_pg_version_17(version) \
((version >= 170000) && (version < 180000) && (version != 170001))

/*
* PG16 support is a WIP and not complete yet.
Expand Down
23 changes: 23 additions & 0 deletions src/guc.c
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,12 @@ static const struct config_enum_entry transparent_decompression_options[] = {
{ NULL, 0, false }
};

static const struct config_enum_entry hypercore_copy_to_options[] = {
{ "all_data", HYPERCORE_COPY_ALL_DATA, false },
{ "no_compressed_data", HYPERCORE_COPY_NO_COMPRESSED_DATA, false },
{ NULL, 0, false }
};

bool ts_guc_enable_deprecation_warnings = true;
bool ts_guc_enable_optimizations = true;
bool ts_guc_restoring = false;
Expand Down Expand Up @@ -156,6 +162,8 @@ bool ts_guc_enable_tss_callbacks = true;
TSDLLEXPORT bool ts_guc_enable_delete_after_compression = false;
TSDLLEXPORT bool ts_guc_enable_merge_on_cagg_refresh = false;
TSDLLEXPORT char *ts_guc_hypercore_indexam_whitelist;
TSDLLEXPORT HypercoreCopyToBehavior ts_guc_hypercore_copy_to_behavior =
HYPERCORE_COPY_NO_COMPRESSED_DATA;

/* default value of ts_guc_max_open_chunks_per_insert and
* ts_guc_max_cached_chunks_per_hypertable will be set as their respective boot-value when the
Expand All @@ -172,6 +180,7 @@ char *ts_last_tune_time = NULL;
char *ts_last_tune_version = NULL;

bool ts_guc_debug_require_batch_sorted_merge = false;

bool ts_guc_debug_allow_cagg_with_deprecated_funcs = false;

#ifdef TS_DEBUG
Expand Down Expand Up @@ -1032,6 +1041,20 @@ _guc_init(void)
/* assign_hook= */ NULL,
/* show_hook= */ NULL);

DefineCustomEnumVariable(MAKE_EXTOPTION("hypercore_copy_to_behavior"),
"The behavior of COPY TO on a hypercore table",
"Set to 'all_data' to return both compressed and uncompressed data "
"via the Hypercore table when using COPY TO. Set to "
"'no_compressed_data' to skip compressed data.",
/* valueAddr= */ (int *) &ts_guc_hypercore_copy_to_behavior,
/* bootValue= */ HYPERCORE_COPY_NO_COMPRESSED_DATA,
/* options= */ hypercore_copy_to_options,
/* context= */ PGC_USERSET,
0,
NULL,
NULL,
NULL);

#ifdef TS_DEBUG
DefineCustomBoolVariable(/* name= */ MAKE_EXTOPTION("shutdown_bgw_scheduler"),
/* short_desc= */ "immediately shutdown the bgw scheduler",
Expand Down
19 changes: 19 additions & 0 deletions src/guc.h
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,25 @@ extern TSDLLEXPORT bool ts_guc_debug_require_batch_sorted_merge;
extern TSDLLEXPORT bool ts_guc_debug_allow_cagg_with_deprecated_funcs;
extern TSDLLEXPORT char *ts_guc_hypercore_indexam_whitelist;

/*
* Defines the behavior of COPY TO when used on a Hypercore table.
*
* If set to COPY_ALL_DATA, all data is copied from a Hypercore table,
* including compressed data (but in uncompressed form) from the internal
* compressed relation. When doing a COPY TO on the internal compressed
* relation, no data is returned.
*
* If set to COPY_NO_COMPRESSED_DATA, then only uncompressed data is copied
* (if any). This behavior is compatible with compression without hypercore.
*/
typedef enum HypercoreCopyToBehavior
{
HYPERCORE_COPY_NO_COMPRESSED_DATA,
HYPERCORE_COPY_ALL_DATA,
} HypercoreCopyToBehavior;

extern TSDLLEXPORT HypercoreCopyToBehavior ts_guc_hypercore_copy_to_behavior;

void _guc_init(void);

typedef enum
Expand Down
6 changes: 6 additions & 0 deletions src/process_utility.c
Original file line number Diff line number Diff line change
Expand Up @@ -4452,6 +4452,12 @@ process_create_trigger_start(ProcessUtilityArgs *args)
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("triggers are not supported on continuous aggregate")));

if (stmt->transitionRels)
if (ts_chunk_get_by_relid(relid, false) != NULL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg(
"trigger with transition tables not supported on hypertable chunks")));
return DDL_CONTINUE;
}

Expand Down
9 changes: 9 additions & 0 deletions test/expected/triggers.out
Original file line number Diff line number Diff line change
Expand Up @@ -428,6 +428,9 @@ SELECT create_hypertable('transition_test','time');
(4,public,transition_test,t)
(1 row)

-- Insert some rows to create a chunk
INSERT INTO transition_test values ('2020-01-10');
SELECT chunk FROM show_chunks('transition_test') tbl(chunk) limit 1 \gset
-- test creating trigger with transition tables on existing hypertable
\set ON_ERROR_STOP 0
CREATE TRIGGER t2 AFTER INSERT ON transition_test REFERENCING NEW TABLE AS new_trans FOR EACH STATEMENT EXECUTE FUNCTION test_trigger();
Expand All @@ -436,6 +439,12 @@ CREATE TRIGGER t3 AFTER UPDATE ON transition_test REFERENCING NEW TABLE AS new_t
ERROR: trigger with transition tables not supported on hypertables
CREATE TRIGGER t4 AFTER DELETE ON transition_test REFERENCING OLD TABLE AS old_trans FOR EACH STATEMENT EXECUTE FUNCTION test_trigger();
ERROR: trigger with transition tables not supported on hypertables
CREATE TRIGGER t2 AFTER INSERT ON :chunk REFERENCING NEW TABLE AS new_trans FOR EACH STATEMENT EXECUTE FUNCTION test_trigger();
ERROR: trigger with transition tables not supported on hypertable chunks
CREATE TRIGGER t3 AFTER UPDATE ON :chunk REFERENCING NEW TABLE AS new_trans OLD TABLE AS old_trans FOR EACH STATEMENT EXECUTE FUNCTION test_trigger();
ERROR: trigger with transition tables not supported on hypertable chunks
CREATE TRIGGER t4 AFTER DELETE ON :chunk REFERENCING OLD TABLE AS old_trans FOR EACH STATEMENT EXECUTE FUNCTION test_trigger();
ERROR: trigger with transition tables not supported on hypertable chunks
CREATE TRIGGER t2 AFTER INSERT ON transition_test REFERENCING NEW TABLE AS new_trans FOR EACH ROW EXECUTE FUNCTION test_trigger();
ERROR: trigger with transition tables not supported on hypertables
CREATE TRIGGER t3 AFTER UPDATE ON transition_test REFERENCING NEW TABLE AS new_trans OLD TABLE AS old_trans FOR EACH ROW EXECUTE FUNCTION test_trigger();
Expand Down
7 changes: 7 additions & 0 deletions test/sql/triggers.sql
Original file line number Diff line number Diff line change
Expand Up @@ -313,11 +313,18 @@ SELECT create_hypertable('transition_test','time');
DROP TRIGGER t1 ON transition_test;
SELECT create_hypertable('transition_test','time');

-- Insert some rows to create a chunk
INSERT INTO transition_test values ('2020-01-10');
SELECT chunk FROM show_chunks('transition_test') tbl(chunk) limit 1 \gset

-- test creating trigger with transition tables on existing hypertable
\set ON_ERROR_STOP 0
CREATE TRIGGER t2 AFTER INSERT ON transition_test REFERENCING NEW TABLE AS new_trans FOR EACH STATEMENT EXECUTE FUNCTION test_trigger();
CREATE TRIGGER t3 AFTER UPDATE ON transition_test REFERENCING NEW TABLE AS new_trans OLD TABLE AS old_trans FOR EACH STATEMENT EXECUTE FUNCTION test_trigger();
CREATE TRIGGER t4 AFTER DELETE ON transition_test REFERENCING OLD TABLE AS old_trans FOR EACH STATEMENT EXECUTE FUNCTION test_trigger();
CREATE TRIGGER t2 AFTER INSERT ON :chunk REFERENCING NEW TABLE AS new_trans FOR EACH STATEMENT EXECUTE FUNCTION test_trigger();
CREATE TRIGGER t3 AFTER UPDATE ON :chunk REFERENCING NEW TABLE AS new_trans OLD TABLE AS old_trans FOR EACH STATEMENT EXECUTE FUNCTION test_trigger();
CREATE TRIGGER t4 AFTER DELETE ON :chunk REFERENCING OLD TABLE AS old_trans FOR EACH STATEMENT EXECUTE FUNCTION test_trigger();

CREATE TRIGGER t2 AFTER INSERT ON transition_test REFERENCING NEW TABLE AS new_trans FOR EACH ROW EXECUTE FUNCTION test_trigger();
CREATE TRIGGER t3 AFTER UPDATE ON transition_test REFERENCING NEW TABLE AS new_trans OLD TABLE AS old_trans FOR EACH ROW EXECUTE FUNCTION test_trigger();
Expand Down
3 changes: 2 additions & 1 deletion test/sql/utils/pg_dump_aux_dump.sh
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
DUMPFILE=${DUMPFILE:-$1}
EXTRA_PGOPTIONS=${EXTRA_PGOPTIONS:-$2}
# Override PGOPTIONS to remove verbose output
PGOPTIONS='--client-min-messages=warning'
PGOPTIONS="--client-min-messages=warning $EXTRA_PGOPTIONS"

export PGOPTIONS

Expand Down
4 changes: 2 additions & 2 deletions tsl/src/compression/api.c
Original file line number Diff line number Diff line change
Expand Up @@ -1141,6 +1141,7 @@ fetch_unmatched_uncompressed_chunk_into_tuplesort(Tuplesortstate *segment_tuples
TableScanDesc scan;
TupleTableSlot *slot = table_slot_create(uncompressed_chunk_rel, NULL);
Snapshot snapshot = GetLatestSnapshot();

scan = table_beginscan(uncompressed_chunk_rel, snapshot, 0, NULL);
hypercore_scan_set_skip_compressed(scan, true);

Expand Down Expand Up @@ -1209,11 +1210,10 @@ fetch_matching_uncompressed_chunk_into_tuplesort(Tuplesortstate *segment_tupleso
}

snapshot = GetLatestSnapshot();
/* Let compression TAM know it should only return tuples from the
* non-compressed relation. */

scan = table_beginscan(uncompressed_chunk_rel, snapshot, nsegbycols_nonnull, scankey);
hypercore_scan_set_skip_compressed(scan, true);

TupleTableSlot *slot = table_slot_create(uncompressed_chunk_rel, NULL);

while (table_scan_getnextslot(scan, ForwardScanDirection, slot))
Expand Down
25 changes: 18 additions & 7 deletions tsl/src/compression/arrow_c_data_interface.h
Original file line number Diff line number Diff line change
Expand Up @@ -192,25 +192,36 @@ arrow_combine_validity(size_t num_words, uint64 *restrict storage, const uint64
{
/*
* Any and all of the filters can be null. For simplicity, move the non-null
* filters to the front.
* filters to the leading positions.
*/
const uint64 *tmp;
#define SWAP(X, Y) \
tmp = (X); \
(X) = (Y); \
(Y) = tmp;

if (filter2 == NULL)
{
SWAP(filter2, filter3);
}

if (filter1 == NULL)
{
SWAP(filter1, filter2);
/*
* We have at least one NULL that goes to the last position.
*/
SWAP(filter1, filter3);

if (filter1 == NULL)
{
/*
* We have another NULL that goes to the second position.
*/
SWAP(filter1, filter2);
}
}
else
{
if (filter2 == NULL)
{
/*
* We have at least one NULL that goes to the last position.
*/
SWAP(filter2, filter3);
}
}
Expand Down
Loading

0 comments on commit 56df16a

Please sign in to comment.