Skip to content

Commit

Permalink
Use processed group clauses in PG16
Browse files Browse the repository at this point in the history
Latest version of Postgres introduced an optimization
which removes redundant grouping and DISTINCT columns.
This optimization needs to be taken into account when
generating pushdown aggregation plans so we can create
valid plans with correct grouping columns.
  • Loading branch information
antekresic committed Dec 5, 2023
1 parent 2a5552d commit c81ae6f
Show file tree
Hide file tree
Showing 10 changed files with 3,654 additions and 7 deletions.
1 change: 1 addition & 0 deletions .unreleased/fix_6377
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Fixes: #6377 Use processed group clauses in PG16
20 changes: 17 additions & 3 deletions src/planner/partialize.c
Original file line number Diff line number Diff line change
Expand Up @@ -352,7 +352,11 @@ create_sorted_partial_agg_path(PlannerInfo *root, Path *path, PathTarget *target
target,
parse->groupClause ? AGG_SORTED : AGG_PLAIN,
AGGSPLIT_INITIAL_SERIAL,
#if PG16_LT
parse->groupClause,
#else
root->processed_groupClause,
#endif
NIL,
agg_partial_costs,
d_num_groups);
Expand All @@ -367,8 +371,6 @@ static AggPath *
create_hashed_partial_agg_path(PlannerInfo *root, Path *path, PathTarget *target,
double d_num_groups, GroupPathExtraData *extra_data)
{
Query *parse = root->parse;

/* Determine costs for aggregations */
AggClauseCosts *agg_partial_costs = &extra_data->agg_partial_costs;

Expand All @@ -378,7 +380,11 @@ create_hashed_partial_agg_path(PlannerInfo *root, Path *path, PathTarget *target
target,
AGG_HASHED,
AGGSPLIT_INITIAL_SERIAL,
parse->groupClause,
#if PG16_LT
root->parse->groupClause,
#else
root->processed_groupClause,
#endif
NIL,
agg_partial_costs,
d_num_groups);
Expand Down Expand Up @@ -890,7 +896,11 @@ ts_pushdown_partial_agg(PlannerInfo *root, Hypertable *ht, RelOptInfo *input_rel
grouping_target,
parse->groupClause ? AGG_SORTED : AGG_PLAIN,
AGGSPLIT_FINAL_DESERIAL,
#if PG16_LT
parse->groupClause,
#else
root->processed_groupClause,
#endif
(List *) parse->havingQual,
agg_final_costs,
d_num_groups));
Expand All @@ -904,7 +914,11 @@ ts_pushdown_partial_agg(PlannerInfo *root, Hypertable *ht, RelOptInfo *input_rel
grouping_target,
AGG_HASHED,
AGGSPLIT_FINAL_DESERIAL,
#if PG16_LT
parse->groupClause,
#else
root->processed_groupClause,
#endif
(List *) parse->havingQual,
agg_final_costs,
d_num_groups));
Expand Down
906 changes: 906 additions & 0 deletions test/expected/partitionwise-13.out

Large diffs are not rendered by default.

906 changes: 906 additions & 0 deletions test/expected/partitionwise-14.out

Large diffs are not rendered by default.

906 changes: 906 additions & 0 deletions test/expected/partitionwise-15.out

Large diffs are not rendered by default.

904 changes: 904 additions & 0 deletions test/expected/partitionwise-16.out

Large diffs are not rendered by default.

1 change: 1 addition & 0 deletions test/sql/.gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
/insert_many-*.sql
/parallel-*.sql
/partitioning-*.sql
/partitionwise-*.sql
/plan_expand_hypertable-*.sql
/plan_hashagg-*.sql
/plan_hashagg_optimized-*.sql
Expand Down
2 changes: 1 addition & 1 deletion test/sql/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@ set(TEST_FILES
null_exclusion.sql
partition.sql
partitioning.sql
partitionwise.sql
pg_dump_unprivileged.sql
pg_join.sql
plain.sql
Expand Down Expand Up @@ -68,6 +67,7 @@ set(TEST_TEMPLATES
rowsecurity.sql.in
update.sql.in
parallel.sql.in
partitionwise.sql.in
plan_expand_hypertable.sql.in
plan_ordered_append.sql.in
query.sql.in
Expand Down
9 changes: 9 additions & 0 deletions test/sql/partitionwise.sql → test/sql/partitionwise.sql.in
Original file line number Diff line number Diff line change
Expand Up @@ -273,3 +273,12 @@ FROM hyper_timepart
GROUP BY 1, 2
ORDER BY 1, 2
LIMIT 10;

-- Test removal of redundant group key optimization in PG16
-- All lower versions include the redundant key on device column
:PREFIX
SELECT device, avg(temp)
FROM hyper_timepart
WHERE device = 1
GROUP BY 1
LIMIT 10;
6 changes: 3 additions & 3 deletions tsl/test/expected/merge_append_partially_compressed-16.out
Original file line number Diff line number Diff line change
Expand Up @@ -702,7 +702,7 @@ SELECT x1, x2, max(time) FROM test1 GROUP BY x1, x2, time ORDER BY time limit 10
---------------------------------------------------------------------------------------------------------------
Limit (actual rows=5 loops=1)
-> Finalize GroupAggregate (actual rows=5 loops=1)
Group Key: test1.x1, test1.x2, test1."time"
Group Key: test1."time", test1.x1, test1.x2
-> Custom Scan (ChunkAppend) on test1 (actual rows=5 loops=1)
Order: test1."time", test1.x1, test1.x2
-> Merge Append (actual rows=5 loops=1)
Expand All @@ -711,15 +711,15 @@ SELECT x1, x2, max(time) FROM test1 GROUP BY x1, x2, time ORDER BY time limit 10
Sort Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2
Sort Method: quicksort
-> Partial HashAggregate (actual rows=4 loops=1)
Group Key: _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2, _hyper_3_7_chunk."time"
Group Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2
Batches: 1
-> Custom Scan (DecompressChunk) on _hyper_3_7_chunk (actual rows=4 loops=1)
-> Seq Scan on compress_hyper_4_8_chunk (actual rows=3 loops=1)
-> Sort (actual rows=1 loops=1)
Sort Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2
Sort Method: quicksort
-> Partial HashAggregate (actual rows=1 loops=1)
Group Key: _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2, _hyper_3_7_chunk."time"
Group Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2
Batches: 1
-> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1)
(22 rows)
Expand Down

0 comments on commit c81ae6f

Please sign in to comment.