Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Do not use squashing for materialized views (fixes excessive memory usage) #34908

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
6 changes: 5 additions & 1 deletion src/Processors/Transforms/buildPushingToViewsChain.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -297,7 +297,11 @@ Chain buildPushingToViewsChain(
insert_columns.emplace_back(column.name);
}

InterpreterInsertQuery interpreter(nullptr, insert_context, false, false, false);
/// Create interpreter w/o squashing
/// since we have squashing (SquashingChunksTransform) for each block.
InterpreterInsertQuery interpreter(nullptr, insert_context,
/* allow_materialized_= */ false,
/* no_squash_= */ true);
out = interpreter.buildChain(inner_table, inner_metadata_snapshot, insert_columns, view_thread_status, view_counter_ms);
out.addStorageHolder(dependent_table);
out.addStorageHolder(inner_table);
Expand Down
Empty file.
18 changes: 18 additions & 0 deletions tests/queries/0_stateless/02223_mv_aggregator_leak.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
drop table if exists mv_02223;
drop table if exists in_02223;
drop table if exists out_02223;

create table in_02223 (key UInt64) engine=Null();
create table out_02223 (keys AggregateFunction(uniqExact, String)) engine=Null();

create materialized view mv_02223 to out_02223 as select uniqExactState(toString(key)) as keys from in_02223 group by intDiv(key, 1000);

-- Here SET is used (over SETTINGS in INSERT) to test it on 19.x
-- since the issue was introduced in 19.11.12.69
-- https://github.com/ClickHouse/ClickHouse/pull/3796
set max_memory_usage=200000000;
insert into in_02223 select * from numbers(10000000);

drop table mv_02223;
drop table out_02223;
drop table in_02223;