Skip to content

Commit

Permalink
vector agg memory test is fixed now
Browse files Browse the repository at this point in the history
  • Loading branch information
akuzm committed Dec 5, 2024
1 parent 62f0610 commit 0f0c14e
Show file tree
Hide file tree
Showing 3 changed files with 16 additions and 31 deletions.
2 changes: 1 addition & 1 deletion tsl/src/nodes/decompress_chunk/decompress_chunk.c
Original file line number Diff line number Diff line change
Expand Up @@ -1007,7 +1007,7 @@ ts_decompress_chunk_generate_paths(PlannerInfo *root, RelOptInfo *chunk_rel, con
compressed_path->pathkeys))
{
/*
* The decompressed path already has the required ordering.
* The compressed path already has the required ordering.
*/
DecompressChunkPath *path = (DecompressChunkPath *) chunk_path_no_sort;
path->reverse = sort_info.reverse;
Expand Down
34 changes: 12 additions & 22 deletions tsl/test/expected/vector_agg_memory.out
Original file line number Diff line number Diff line change
Expand Up @@ -72,34 +72,26 @@ select * from log where (
truncate log;
set max_parallel_workers_per_gather = 0;
set timescaledb.debug_require_vector_agg = 'require';
-- Despite the tweaks above, we are unable to force the HashAggregation, because
-- the unsorted DecompressChunk paths for aggregation are not created properly
-- (see issue #6836). Limit the memory consumed by tuplesort.
set work_mem = '64kB';
-- We should reliably see HashAggregate here because of the tweaks we made above.
explain (costs off) select ts_debug_allocated_bytes() bytes,
count(*) a, count(t) b, sum(t) c, avg(t) d, min(t) e, max(t) f
from mvagg where t >= -1 and t < 1000000 group by s1;
QUERY PLAN
--------------------------------------------------------------------------------------------------------------------------------------------
Finalize GroupAggregate
QUERY PLAN
--------------------------------------------------------------------------------------------------------------------------------------
Finalize HashAggregate
Group Key: _hyper_1_1_chunk.s1
-> Merge Append
Sort Key: _hyper_1_1_chunk.s1
-> Append
-> Custom Scan (VectorAgg)
-> Custom Scan (DecompressChunk) on _hyper_1_1_chunk
Vectorized Filter: ((t >= '-1'::integer) AND (t < 1000000))
-> Sort
Sort Key: compress_hyper_2_3_chunk.s1
-> Seq Scan on compress_hyper_2_3_chunk
Filter: ((_ts_meta_max_1 >= '-1'::integer) AND (_ts_meta_min_1 < 1000000))
-> Seq Scan on compress_hyper_2_3_chunk
Filter: ((_ts_meta_max_1 >= '-1'::integer) AND (_ts_meta_min_1 < 1000000))
-> Custom Scan (VectorAgg)
-> Custom Scan (DecompressChunk) on _hyper_1_2_chunk
Vectorized Filter: ((t >= '-1'::integer) AND (t < 1000000))
-> Sort
Sort Key: compress_hyper_2_4_chunk.s1
-> Index Scan using compress_hyper_2_4_chunk_s0_s1__ts_meta_min_1__ts_meta_max__idx on compress_hyper_2_4_chunk
Index Cond: ((_ts_meta_min_1 < 1000000) AND (_ts_meta_max_1 >= '-1'::integer))
(18 rows)
-> Index Scan using compress_hyper_2_4_chunk_s0_s1__ts_meta_min_1__ts_meta_max__idx on compress_hyper_2_4_chunk
Index Cond: ((_ts_meta_min_1 < 1000000) AND (_ts_meta_max_1 >= '-1'::integer))
(13 rows)

\set ECHO none
reset timescaledb.debug_require_vector_agg;
Expand All @@ -108,10 +100,8 @@ reset work_mem;
select * from log where (
-- For aggregation by segmentby, memory usage should be constant regardless
-- of the number of tuples. Still, we have to allow for small variations
-- that can be caused by other reasons. Currently the major increase is
-- caused by tuplesort, because we are unable to force hash aggregation due
-- to unrelated planning bugs.
select regr_slope(bytes, n) > 0.05 from log
-- that can be caused by other reasons.
select regr_slope(bytes, n) > 0.01 from log
);
n | bytes | a | b | c | d | e | f
---+-------+---+---+---+---+---+---
Expand Down
11 changes: 3 additions & 8 deletions tsl/test/sql/vector_agg_memory.sql
Original file line number Diff line number Diff line change
Expand Up @@ -63,11 +63,8 @@ select * from log where (
truncate log;
set max_parallel_workers_per_gather = 0;
set timescaledb.debug_require_vector_agg = 'require';
-- Despite the tweaks above, we are unable to force the HashAggregation, because
-- the unsorted DecompressChunk paths for aggregation are not created properly
-- (see issue #6836). Limit the memory consumed by tuplesort.
set work_mem = '64kB';

-- We should reliably see HashAggregate here because of the tweaks we made above.
explain (costs off) select ts_debug_allocated_bytes() bytes,
count(*) a, count(t) b, sum(t) c, avg(t) d, min(t) e, max(t) f
from mvagg where t >= -1 and t < 1000000 group by s1;
Expand All @@ -90,10 +87,8 @@ reset work_mem;
select * from log where (
-- For aggregation by segmentby, memory usage should be constant regardless
-- of the number of tuples. Still, we have to allow for small variations
-- that can be caused by other reasons. Currently the major increase is
-- caused by tuplesort, because we are unable to force hash aggregation due
-- to unrelated planning bugs.
select regr_slope(bytes, n) > 0.05 from log
-- that can be caused by other reasons.
select regr_slope(bytes, n) > 0.01 from log
);

reset timescaledb.debug_require_vector_agg;

0 comments on commit 0f0c14e

Please sign in to comment.