Skip to content

Commit

Permalink
Removed dead code
Browse files Browse the repository at this point in the history
  • Loading branch information
jnidzwetzki committed Aug 29, 2023
1 parent ee558c1 commit 09d0bb4
Show file tree
Hide file tree
Showing 4 changed files with 47 additions and 56 deletions.
71 changes: 15 additions & 56 deletions src/planner/partialize.c
Original file line number Diff line number Diff line change
Expand Up @@ -550,57 +550,20 @@ generate_partial_agg_pushdown_path(PlannerInfo *root, Path *cheapest_partial_pat

Assert(subpath->parallel_safe);

/* Check if we have an append path under an append path (e.g., a partially compressed
* chunk)*/
List *subsubpaths = get_subpaths_from_append_path(subpath, false);

if (subsubpaths != NIL)
{
List *sorted_subsubpaths = NIL;
List *hashed_subsubpaths = NIL;

ListCell *lc2;
foreach (lc2, subsubpaths)
{
Path *subsubpath = lfirst(lc2);

add_partially_aggregated_subpaths(root,
cheapest_partial_path,
partial_grouping_target,
d_num_groups,
extra_data,
can_sort,
can_hash,
subsubpath,
&sorted_subsubpaths /* Result path */,
&hashed_subsubpaths /* Result path */);
}

if (can_sort)
{
sorted_subpaths = lappend(sorted_subpaths,
copy_append_like_path(root, subpath, sorted_subsubpaths));
}

if (can_hash)
{
hashed_subpaths = lappend(hashed_subpaths,
copy_append_like_path(root, subpath, hashed_subsubpaths));
}
}
else
{
add_partially_aggregated_subpaths(root,
cheapest_partial_path,
partial_grouping_target,
d_num_groups,
extra_data,
can_sort,
can_hash,
subpath,
&sorted_subpaths /* Result paths */,
&hashed_subpaths /* Result paths */);
}
/* There should be no nested append paths in the partial paths to construct the upper
* relation */
Assert(get_subpaths_from_append_path(subpath, false) == NIL);

add_partially_aggregated_subpaths(root,
cheapest_partial_path,
partial_grouping_target,
d_num_groups,
extra_data,
can_sort,
can_hash,
subpath,
&sorted_subpaths /* Result paths */,
&hashed_subpaths /* Result paths */);
}

/* Create new append paths */
Expand Down Expand Up @@ -705,7 +668,7 @@ ts_pushdown_partial_agg(PlannerInfo *root, Hypertable *ht, RelOptInfo *input_rel
if (!parse->hasAggs)
return;

/* We can only perform a partial partitionwise aggregation, if no grouping is performed */
/* Groupting sets are not supported by the partial aggregation pushdown */
if (parse->groupingSets)
return;

Expand All @@ -721,10 +684,6 @@ ts_pushdown_partial_agg(PlannerInfo *root, Hypertable *ht, RelOptInfo *input_rel
!ts_is_gapfill_path(linitial(output_rel->pathlist)) &&
ts_guc_enable_partitionwise_hashed_aggregation;

/* No sorted or hashed aggregation possible, nothing to do for us */
if (!can_sort && !can_hash)
return;

Assert(extra != NULL);
GroupPathExtraData *extra_data = (GroupPathExtraData *) extra;

Expand Down
1 change: 1 addition & 0 deletions tsl/src/nodes/decompress_chunk/decompress_chunk.c
Original file line number Diff line number Diff line change
Expand Up @@ -892,6 +892,7 @@ ts_decompress_chunk_generate_paths(PlannerInfo *root, RelOptInfo *chunk_rel, Hyp
NIL,
path->rows + uncompressed_path->rows);
}

add_partial_path(chunk_rel, path);
}
/* the chunk_rel now owns the paths, remove them from the compressed_rel so they can't be
Expand Down
27 changes: 27 additions & 0 deletions tsl/test/expected/agg_partials_pushdown.out
Original file line number Diff line number Diff line change
Expand Up @@ -437,6 +437,33 @@ SELECT timeCustom t, min(series_0) FROM PUBLIC.testtable2 GROUP BY t ORDER BY t
Worker 0: actual rows=4 loops=1
(49 rows)

-- Test that we don't process groupingSets
:PREFIX
SELECT timeCustom t, min(series_0) FROM PUBLIC.testtable2 GROUP BY ROLLUP(t);
QUERY PLAN
-------------------------------------------------------------------------------------------------------
MixedAggregate (actual rows=7 loops=1)
Output: _hyper_3_5_chunk.timecustom, min(_hyper_3_5_chunk.series_0)
Hash Key: _hyper_3_5_chunk.timecustom
Group Key: ()
Batches: 1
-> Gather (actual rows=11 loops=1)
Output: _hyper_3_5_chunk.timecustom, _hyper_3_5_chunk.series_0
Workers Planned: 2
Workers Launched: 2
-> Parallel Append (actual rows=4 loops=3)
Worker 0: actual rows=0 loops=1
Worker 1: actual rows=0 loops=1
-> Parallel Seq Scan on _timescaledb_internal._hyper_3_5_chunk (actual rows=7 loops=1)
Output: _hyper_3_5_chunk.timecustom, _hyper_3_5_chunk.series_0
-> Parallel Seq Scan on _timescaledb_internal._hyper_3_6_chunk (actual rows=1 loops=1)
Output: _hyper_3_6_chunk.timecustom, _hyper_3_6_chunk.series_0
-> Parallel Seq Scan on _timescaledb_internal._hyper_3_7_chunk (actual rows=2 loops=1)
Output: _hyper_3_7_chunk.timecustom, _hyper_3_7_chunk.series_0
-> Parallel Seq Scan on _timescaledb_internal._hyper_3_8_chunk (actual rows=1 loops=1)
Output: _hyper_3_8_chunk.timecustom, _hyper_3_8_chunk.series_0
(20 rows)

-- Check parallel fallback into a non-partial aggregation
SET timescaledb.partitionwise_plain_sorted_aggregation = OFF;
SET timescaledb.partitionwise_hashed_aggregation = OFF;
Expand Down
4 changes: 4 additions & 0 deletions tsl/test/sql/agg_partials_pushdown.sql
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,10 @@ SELECT timeCustom t, min(series_0) FROM PUBLIC.testtable2 GROUP BY t ORDER BY t
:PREFIX
SELECT timeCustom t, min(series_0) FROM PUBLIC.testtable2 GROUP BY t ORDER BY t DESC NULLS LAST limit 2;

-- Test that we don't process groupingSets
:PREFIX
SELECT timeCustom t, min(series_0) FROM PUBLIC.testtable2 GROUP BY ROLLUP(t);

-- Check parallel fallback into a non-partial aggregation
SET timescaledb.partitionwise_plain_sorted_aggregation = OFF;
SET timescaledb.partitionwise_hashed_aggregation = OFF;
Expand Down

0 comments on commit 09d0bb4

Please sign in to comment.