Skip to content

Commit

Permalink
Disable custom hashagg code
Browse files Browse the repository at this point in the history
In the past postgres would not create hashagg plans due to costing.
This has been fixed upstream a couple major versions ago and this
code is no longer required. This patch does not remove the code
yet but hides it behind a GUC to give us confidence that it is
safe to remove in a later version.
  • Loading branch information
svenklemm committed Nov 11, 2024
1 parent b1aa565 commit 43e1bdb
Show file tree
Hide file tree
Showing 13 changed files with 521 additions and 584 deletions.
1 change: 1 addition & 0 deletions .unreleased/pr_7390
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Implements: #7390 Disable custom hashagg planner code
12 changes: 12 additions & 0 deletions src/guc.c
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,7 @@ TSDLLEXPORT bool ts_guc_enable_compression_wal_markers = false;
TSDLLEXPORT bool ts_guc_enable_decompression_sorted_merge = true;
bool ts_guc_enable_chunkwise_aggregation = true;
bool ts_guc_enable_vectorized_aggregation = true;
bool ts_guc_enable_custom_hashagg = false;
TSDLLEXPORT bool ts_guc_enable_compression_indexscan = false;
TSDLLEXPORT bool ts_guc_enable_bulk_decompression = true;
TSDLLEXPORT bool ts_guc_auto_sparse_indexes = true;
Expand Down Expand Up @@ -759,6 +760,17 @@ _guc_init(void)
NULL,
NULL);

DefineCustomBoolVariable(MAKE_EXTOPTION("enable_custom_hashagg"),
"Enable custom hash aggregation",
"Enable creating custom hash aggregation plans",
&ts_guc_enable_custom_hashagg,
false,
PGC_USERSET,
0,
NULL,
NULL,
NULL);

DefineCustomBoolVariable(MAKE_EXTOPTION("enable_vectorized_aggregation"),
"Enable vectorized aggregation",
"Enable vectorized aggregation for compressed data",
Expand Down
1 change: 1 addition & 0 deletions src/guc.h
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ extern TSDLLEXPORT bool ts_guc_enable_decompression_sorted_merge;
extern TSDLLEXPORT bool ts_guc_enable_skip_scan;
extern TSDLLEXPORT bool ts_guc_enable_chunkwise_aggregation;
extern TSDLLEXPORT bool ts_guc_enable_vectorized_aggregation;
extern TSDLLEXPORT bool ts_guc_enable_custom_hashagg;
extern bool ts_guc_restoring;
extern int ts_guc_max_open_chunks_per_insert;
extern int ts_guc_max_cached_chunks_per_hypertable;
Expand Down
4 changes: 4 additions & 0 deletions src/planner/add_hashagg.c
Original file line number Diff line number Diff line change
Expand Up @@ -142,6 +142,10 @@ ts_plan_add_hashagg(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *output
PathTarget *target = root->upper_targets[UPPERREL_GROUP_AGG];
bool try_parallel_aggregation;

/* Custom HashAgg is disabled by default */
if (!ts_guc_enable_custom_hashagg)
return;

if (parse->groupingSets || !parse->hasAggs || parse->groupClause == NIL)
return;

Expand Down
74 changes: 38 additions & 36 deletions test/expected/parallel-14.out
Original file line number Diff line number Diff line change
Expand Up @@ -70,21 +70,22 @@ FROM "test"
GROUP BY sec
ORDER BY sec
LIMIT 5;
QUERY PLAN
--------------------------------------------------------------------------------------
Gather
Workers Planned: 1
Single Copy: true
-> Limit
-> Sort
Sort Key: (time_bucket('@ 1 sec'::interval, _hyper_1_1_chunk.ts))
-> HashAggregate
Group Key: time_bucket('@ 1 sec'::interval, _hyper_1_1_chunk.ts)
-> Result
-> Append
-> Seq Scan on _hyper_1_1_chunk
-> Seq Scan on _hyper_1_2_chunk
(12 rows)
QUERY PLAN
---------------------------------------------------------------------------------------------
Limit
-> Finalize GroupAggregate
Group Key: (time_bucket('@ 1 sec'::interval, _hyper_1_1_chunk.ts))
-> Gather Merge
Workers Planned: 2
-> Partial GroupAggregate
Group Key: (time_bucket('@ 1 sec'::interval, _hyper_1_1_chunk.ts))
-> Sort
Sort Key: (time_bucket('@ 1 sec'::interval, _hyper_1_1_chunk.ts))
-> Result
-> Parallel Append
-> Parallel Seq Scan on _hyper_1_1_chunk
-> Parallel Seq Scan on _hyper_1_2_chunk
(13 rows)

-- test single copy parallel plan with parallel chunk append
:PREFIX SELECT time_bucket('1 second', ts) sec, last(i, j)
Expand All @@ -93,27 +94,28 @@ WHERE length(version()) > 0
GROUP BY sec
ORDER BY sec
LIMIT 5;
QUERY PLAN
--------------------------------------------------------------------------------
Gather
Workers Planned: 1
Single Copy: true
-> Limit
-> Sort
Sort Key: (time_bucket('@ 1 sec'::interval, test.ts))
-> HashAggregate
Group Key: time_bucket('@ 1 sec'::interval, test.ts)
-> Result
One-Time Filter: (length(version()) > 0)
-> Custom Scan (ChunkAppend) on test
Chunks excluded during startup: 0
-> Result
One-Time Filter: (length(version()) > 0)
-> Seq Scan on _hyper_1_1_chunk
-> Result
One-Time Filter: (length(version()) > 0)
-> Seq Scan on _hyper_1_2_chunk
(18 rows)
QUERY PLAN
---------------------------------------------------------------------------------------
Limit
-> Finalize GroupAggregate
Group Key: (time_bucket('@ 1 sec'::interval, test.ts))
-> Gather Merge
Workers Planned: 2
-> Partial GroupAggregate
Group Key: (time_bucket('@ 1 sec'::interval, test.ts))
-> Sort
Sort Key: (time_bucket('@ 1 sec'::interval, test.ts))
-> Result
One-Time Filter: (length(version()) > 0)
-> Parallel Custom Scan (ChunkAppend) on test
Chunks excluded during startup: 0
-> Result
One-Time Filter: (length(version()) > 0)
-> Parallel Seq Scan on _hyper_1_1_chunk
-> Result
One-Time Filter: (length(version()) > 0)
-> Parallel Seq Scan on _hyper_1_2_chunk
(19 rows)

SELECT time_bucket('1 second', ts) sec, last(i, j)
FROM "test"
Expand Down
74 changes: 38 additions & 36 deletions test/expected/parallel-15.out
Original file line number Diff line number Diff line change
Expand Up @@ -70,21 +70,22 @@ FROM "test"
GROUP BY sec
ORDER BY sec
LIMIT 5;
QUERY PLAN
--------------------------------------------------------------------------------------
Gather
Workers Planned: 1
Single Copy: true
-> Limit
-> Sort
Sort Key: (time_bucket('@ 1 sec'::interval, _hyper_1_1_chunk.ts))
-> HashAggregate
Group Key: time_bucket('@ 1 sec'::interval, _hyper_1_1_chunk.ts)
-> Result
-> Append
-> Seq Scan on _hyper_1_1_chunk
-> Seq Scan on _hyper_1_2_chunk
(12 rows)
QUERY PLAN
---------------------------------------------------------------------------------------------
Limit
-> Finalize GroupAggregate
Group Key: (time_bucket('@ 1 sec'::interval, _hyper_1_1_chunk.ts))
-> Gather Merge
Workers Planned: 2
-> Partial GroupAggregate
Group Key: (time_bucket('@ 1 sec'::interval, _hyper_1_1_chunk.ts))
-> Sort
Sort Key: (time_bucket('@ 1 sec'::interval, _hyper_1_1_chunk.ts))
-> Result
-> Parallel Append
-> Parallel Seq Scan on _hyper_1_1_chunk
-> Parallel Seq Scan on _hyper_1_2_chunk
(13 rows)

-- test single copy parallel plan with parallel chunk append
:PREFIX SELECT time_bucket('1 second', ts) sec, last(i, j)
Expand All @@ -93,28 +94,29 @@ WHERE length(version()) > 0
GROUP BY sec
ORDER BY sec
LIMIT 5;
QUERY PLAN
--------------------------------------------------------------------------------------
Gather
Workers Planned: 1
Single Copy: true
-> Limit
-> Sort
Sort Key: (time_bucket('@ 1 sec'::interval, test.ts))
-> HashAggregate
Group Key: time_bucket('@ 1 sec'::interval, test.ts)
-> Result
QUERY PLAN
---------------------------------------------------------------------------------------------
Limit
-> Finalize GroupAggregate
Group Key: (time_bucket('@ 1 sec'::interval, test.ts))
-> Gather Merge
Workers Planned: 2
-> Partial GroupAggregate
Group Key: (time_bucket('@ 1 sec'::interval, test.ts))
-> Sort
Sort Key: (time_bucket('@ 1 sec'::interval, test.ts))
-> Result
One-Time Filter: (length(version()) > 0)
-> Custom Scan (ChunkAppend) on test
Chunks excluded during startup: 0
-> Result
One-Time Filter: (length(version()) > 0)
-> Seq Scan on _hyper_1_1_chunk
-> Result
One-Time Filter: (length(version()) > 0)
-> Seq Scan on _hyper_1_2_chunk
(19 rows)
-> Result
One-Time Filter: (length(version()) > 0)
-> Parallel Custom Scan (ChunkAppend) on test
Chunks excluded during startup: 0
-> Result
One-Time Filter: (length(version()) > 0)
-> Parallel Seq Scan on _hyper_1_1_chunk
-> Result
One-Time Filter: (length(version()) > 0)
-> Parallel Seq Scan on _hyper_1_2_chunk
(20 rows)

SELECT time_bucket('1 second', ts) sec, last(i, j)
FROM "test"
Expand Down
74 changes: 38 additions & 36 deletions test/expected/parallel-16.out
Original file line number Diff line number Diff line change
Expand Up @@ -70,21 +70,22 @@ FROM "test"
GROUP BY sec
ORDER BY sec
LIMIT 5;
QUERY PLAN
--------------------------------------------------------------------------------------
Gather
Workers Planned: 1
Single Copy: true
-> Limit
-> Sort
Sort Key: (time_bucket('@ 1 sec'::interval, _hyper_1_1_chunk.ts))
-> HashAggregate
Group Key: time_bucket('@ 1 sec'::interval, _hyper_1_1_chunk.ts)
-> Result
-> Append
-> Seq Scan on _hyper_1_1_chunk
-> Seq Scan on _hyper_1_2_chunk
(12 rows)
QUERY PLAN
---------------------------------------------------------------------------------------------
Limit
-> Finalize GroupAggregate
Group Key: (time_bucket('@ 1 sec'::interval, _hyper_1_1_chunk.ts))
-> Gather Merge
Workers Planned: 2
-> Partial GroupAggregate
Group Key: (time_bucket('@ 1 sec'::interval, _hyper_1_1_chunk.ts))
-> Sort
Sort Key: (time_bucket('@ 1 sec'::interval, _hyper_1_1_chunk.ts))
-> Result
-> Parallel Append
-> Parallel Seq Scan on _hyper_1_1_chunk
-> Parallel Seq Scan on _hyper_1_2_chunk
(13 rows)

-- test single copy parallel plan with parallel chunk append
:PREFIX SELECT time_bucket('1 second', ts) sec, last(i, j)
Expand All @@ -93,28 +94,29 @@ WHERE length(version()) > 0
GROUP BY sec
ORDER BY sec
LIMIT 5;
QUERY PLAN
--------------------------------------------------------------------------------------
Gather
Workers Planned: 1
Single Copy: true
-> Limit
-> Sort
Sort Key: (time_bucket('@ 1 sec'::interval, test.ts))
-> HashAggregate
Group Key: time_bucket('@ 1 sec'::interval, test.ts)
-> Result
QUERY PLAN
---------------------------------------------------------------------------------------------
Limit
-> Finalize GroupAggregate
Group Key: (time_bucket('@ 1 sec'::interval, test.ts))
-> Gather Merge
Workers Planned: 2
-> Partial GroupAggregate
Group Key: (time_bucket('@ 1 sec'::interval, test.ts))
-> Sort
Sort Key: (time_bucket('@ 1 sec'::interval, test.ts))
-> Result
One-Time Filter: (length(version()) > 0)
-> Custom Scan (ChunkAppend) on test
Chunks excluded during startup: 0
-> Result
One-Time Filter: (length(version()) > 0)
-> Seq Scan on _hyper_1_1_chunk
-> Result
One-Time Filter: (length(version()) > 0)
-> Seq Scan on _hyper_1_2_chunk
(19 rows)
-> Result
One-Time Filter: (length(version()) > 0)
-> Parallel Custom Scan (ChunkAppend) on test
Chunks excluded during startup: 0
-> Result
One-Time Filter: (length(version()) > 0)
-> Parallel Seq Scan on _hyper_1_1_chunk
-> Result
One-Time Filter: (length(version()) > 0)
-> Parallel Seq Scan on _hyper_1_2_chunk
(20 rows)

SELECT time_bucket('1 second', ts) sec, last(i, j)
FROM "test"
Expand Down
Loading

0 comments on commit 43e1bdb

Please sign in to comment.