Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Expose Peak memory usage in query statistics. #51946

Merged
merged 21 commits into from Jul 25, 2023
Merged
Show file tree
Hide file tree
Changes from 13 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 2 additions & 0 deletions src/Client/ClientBase.cpp
Expand Up @@ -1191,6 +1191,8 @@ void ClientBase::onProfileEvents(Block & block)
thread_times[host_name].system_ms = value;
else if (event_name == MemoryTracker::USAGE_EVENT_NAME)
thread_times[host_name].memory_usage = value;
else if (event_name == MemoryTracker::PEAK_USAGE_EVENT_NAME)
thread_times[host_name].peak_memory_usage = value;
}
progress_indication.updateThreadEventData(thread_times);

Expand Down
1 change: 1 addition & 0 deletions src/Common/MemoryTracker.h
Expand Up @@ -95,6 +95,7 @@ class MemoryTracker
public:

static constexpr auto USAGE_EVENT_NAME = "MemoryTrackerUsage";
static constexpr auto PEAK_USAGE_EVENT_NAME = "MemoryTrackerPeakUsage";

explicit MemoryTracker(VariableContext level_ = VariableContext::Thread);
explicit MemoryTracker(MemoryTracker * parent_, VariableContext level_ = VariableContext::Thread);
Expand Down
8 changes: 4 additions & 4 deletions src/Common/ProgressIndication.cpp
Expand Up @@ -83,7 +83,7 @@ ProgressIndication::MemoryUsage ProgressIndication::getMemoryUsage() const
[](MemoryUsage const & acc, auto const & host_data)
{
UInt64 host_usage = host_data.second.memory_usage;
return MemoryUsage{.total = acc.total + host_usage, .max = std::max(acc.max, host_usage)};
return MemoryUsage{.total = acc.total + host_usage, .max = std::max(acc.max, host_usage), .peak = std::max(acc.peak, host_data.second.peak_memory_usage)};
});
}

Expand All @@ -99,8 +99,8 @@ void ProgressIndication::writeFinalProgress()
if (elapsed_ns)
std::cout << " (" << formatReadableQuantity(progress.read_rows * 1000000000.0 / elapsed_ns) << " rows/s., "
<< formatReadableSizeWithDecimalSuffix(progress.read_bytes * 1000000000.0 / elapsed_ns) << "/s.)";
else
std::cout << ". ";
auto peak_memory_usage = getMemoryUsage().peak;
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I have one concern here how a new client works with an old server. Client would print 0 as peak_memory_usage

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It seems to me that this entire line is for information, and it is unlikely that the client will rely on this value.
Try not to display the value for old servers?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It would right and heavy. May be just enough not to log it when it zero. Just because memory usage never is zero.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Added check (not show if old server). I use '-1' (as default) because memory can be zero (I don't know why it works like this; on small queries always 0).

std::cout << ".\nPeak memory usage (for query) " << formatReadableSizeWithBinarySuffix(peak_memory_usage) << ".";
}

void ProgressIndication::writeProgress(WriteBufferFromFileDescriptor & message)
Expand Down Expand Up @@ -152,7 +152,7 @@ void ProgressIndication::writeProgress(WriteBufferFromFileDescriptor & message)
std::string profiling_msg;

double cpu_usage = getCPUUsage();
auto [memory_usage, max_host_usage] = getMemoryUsage();
auto [memory_usage, max_host_usage, peak_usage] = getMemoryUsage();

if (cpu_usage > 0 || memory_usage > 0)
{
Expand Down
2 changes: 2 additions & 0 deletions src/Common/ProgressIndication.h
Expand Up @@ -22,6 +22,7 @@ struct ThreadEventData
UInt64 user_ms = 0;
UInt64 system_ms = 0;
UInt64 memory_usage = 0;
UInt64 peak_memory_usage = 0;
};

using HostToTimesMap = std::unordered_map<String, ThreadEventData>;
Expand Down Expand Up @@ -64,6 +65,7 @@ class ProgressIndication
{
UInt64 total = 0;
UInt64 max = 0;
UInt64 peak = 0;
};

MemoryUsage getMemoryUsage() const;
Expand Down
14 changes: 9 additions & 5 deletions src/IO/Progress.cpp
Expand Up @@ -69,12 +69,14 @@ void ProgressValues::write(WriteBuffer & out, UInt64 client_revision) const
}
}

void ProgressValues::writeJSON(WriteBuffer & out) const
void ProgressValues::writeJSON(WriteBuffer & out, bool add_braces) const
{
/// Numbers are written in double quotes (as strings) to avoid loss of precision
/// of 64-bit integers after interpretation by JavaScript.

writeCString("{\"read_rows\":\"", out);
if (add_braces)
writeCString("{", out);
writeCString("\"read_rows\":\"", out);
writeText(read_rows, out);
writeCString("\",\"read_bytes\":\"", out);
writeText(read_bytes, out);
Expand All @@ -88,7 +90,9 @@ void ProgressValues::writeJSON(WriteBuffer & out) const
writeText(result_rows, out);
writeCString("\",\"result_bytes\":\"", out);
writeText(result_bytes, out);
writeCString("\"}", out);
writeCString("\"", out);
if (add_braces)
writeCString("}", out);
}

bool Progress::incrementPiecewiseAtomically(const Progress & rhs)
Expand Down Expand Up @@ -230,9 +234,9 @@ void Progress::write(WriteBuffer & out, UInt64 client_revision) const
getValues().write(out, client_revision);
}

void Progress::writeJSON(WriteBuffer & out) const
void Progress::writeJSON(WriteBuffer & out, bool add_braces) const
{
getValues().writeJSON(out);
getValues().writeJSON(out, add_braces);
}

}
4 changes: 2 additions & 2 deletions src/IO/Progress.h
Expand Up @@ -32,7 +32,7 @@ struct ProgressValues

void read(ReadBuffer & in, UInt64 server_revision);
void write(WriteBuffer & out, UInt64 client_revision) const;
void writeJSON(WriteBuffer & out) const;
void writeJSON(WriteBuffer & out, bool add_braces = true) const;
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Default values are harmful.

};

struct ReadProgress
Expand Down Expand Up @@ -118,7 +118,7 @@ struct Progress
void write(WriteBuffer & out, UInt64 client_revision) const;

/// Progress in JSON format (single line, without whitespaces) is used in HTTP headers.
void writeJSON(WriteBuffer & out) const;
void writeJSON(WriteBuffer & out, bool add_braces = true) const;

/// Each value separately is changed atomically (but not whole object).
bool incrementPiecewiseAtomically(const Progress & rhs);
Expand Down
12 changes: 10 additions & 2 deletions src/Interpreters/ProfileEventsExt.cpp
Expand Up @@ -86,9 +86,16 @@ static void dumpMemoryTracker(ProfileEventsSnapshot const & snapshot, DB::Mutabl
columns[i++]->insert(static_cast<UInt64>(snapshot.current_time));
columns[i++]->insert(static_cast<UInt64>(snapshot.thread_id));
columns[i++]->insert(Type::GAUGE);

columns[i++]->insertData(MemoryTracker::USAGE_EVENT_NAME, strlen(MemoryTracker::USAGE_EVENT_NAME));
columns[i++]->insert(snapshot.memory_usage);
columns[i]->insert(snapshot.memory_usage);

i = 0;
columns[i++]->insertData(host_name.data(), host_name.size());
columns[i++]->insert(static_cast<UInt64>(snapshot.current_time));
columns[i++]->insert(static_cast<UInt64>(snapshot.thread_id));
columns[i++]->insert(Type::GAUGE);
columns[i++]->insertData(MemoryTracker::PEAK_USAGE_EVENT_NAME, strlen(MemoryTracker::PEAK_USAGE_EVENT_NAME));
columns[i]->insert(snapshot.peak_memory_usage);
}
Comment on lines +90 to 99
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please explain those lines to me. I do not understand it.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The data (ProfileEvents and memory events) between the server and the client passes through row in the sql block (DB::Block). I added one more row with my data (peak memory).
Columns:

static const NamesAndTypesList column_names_and_types = {
        {"host_name", std::make_shared<DataTypeString>()},
        {"current_time", std::make_shared<DataTypeDateTime>()},
        {"thread_id", std::make_shared<DataTypeUInt64>()},
        {"type", TypeEnum},
        {"name", std::make_shared<DataTypeString>()},
        {"value", std::make_shared<DataTypeInt64>()},
    };

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That is Ok.
The diff itself looks strange.

image

Looks like one write is not enough and you added two :)

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Oh, my bad.
That is right. You add one more row here.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't understand, what exactly is confusing? The first block write current_memory_usage. Insert() appends new value at the end of column (column's size is increased by 1). i++ in block ends is not necessary, i replaced to i;


void getProfileEvents(
Expand Down Expand Up @@ -121,6 +128,7 @@ void getProfileEvents(
group_snapshot.thread_id = 0;
group_snapshot.current_time = time(nullptr);
group_snapshot.memory_usage = thread_group->memory_tracker.get();
group_snapshot.peak_memory_usage = thread_group->memory_tracker.getPeak();
auto group_counters = thread_group->performance_counters.getPartiallyAtomicSnapshot();
auto prev_group_snapshot = last_sent_snapshots.find(0);
group_snapshot.counters =
Expand Down
1 change: 1 addition & 0 deletions src/Interpreters/ProfileEventsExt.h
Expand Up @@ -16,6 +16,7 @@ struct ProfileEventsSnapshot
UInt64 thread_id;
CountersIncrement counters;
Int64 memory_usage;
Int64 peak_memory_usage;
time_t current_time;
};

Expand Down
14 changes: 12 additions & 2 deletions src/Server/HTTP/WriteBufferFromHTTPServerResponse.cpp
Expand Up @@ -3,7 +3,7 @@
#include <IO/HTTPCommon.h>
#include <IO/Progress.h>
#include <IO/WriteBufferFromString.h>

#include <IO/WriteHelpers.h>

namespace DB
{
Expand Down Expand Up @@ -35,7 +35,12 @@ void WriteBufferFromHTTPServerResponse::writeHeaderSummary()
return;

WriteBufferFromOwnString progress_string_writer;
accumulated_progress.writeJSON(progress_string_writer);

writeCString("{", progress_string_writer);
accumulated_progress.writeJSON(progress_string_writer, false);
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This looks like trash.

writeCString(",\"peak_memory_usage\":\"", progress_string_writer);
writeText(peak_memory_usage, progress_string_writer);
writeCString("\"}", progress_string_writer);

if (response_header_ostr)
*response_header_ostr << "X-ClickHouse-Summary: " << progress_string_writer.str() << "\r\n" << std::flush;
Expand Down Expand Up @@ -169,6 +174,11 @@ void WriteBufferFromHTTPServerResponse::onProgress(const Progress & progress)
}
}

void WriteBufferFromHTTPServerResponse::onMemoryUsage(Int64 usage)
{
peak_memory_usage = usage;
}

WriteBufferFromHTTPServerResponse::~WriteBufferFromHTTPServerResponse()
{
finalize();
Expand Down
5 changes: 5 additions & 0 deletions src/Server/HTTP/WriteBufferFromHTTPServerResponse.h
Expand Up @@ -45,6 +45,9 @@ class WriteBufferFromHTTPServerResponse final : public BufferWithOwnMemory<Write
/// Writes progress in repeating HTTP headers.
void onProgress(const Progress & progress);

void onMemoryUsage(Int64 peak_memory_usage);


/// Turn compression on or off.
/// The setting has any effect only if HTTP headers haven't been sent yet.
void setCompression(bool enable_compression)
Expand Down Expand Up @@ -126,6 +129,8 @@ class WriteBufferFromHTTPServerResponse final : public BufferWithOwnMemory<Write

int exception_code = 0;

Int64 peak_memory_usage = 0;

std::mutex mutex; /// progress callback could be called from different threads.
};

Expand Down
8 changes: 7 additions & 1 deletion src/Server/HTTPHandler.cpp
Expand Up @@ -818,7 +818,13 @@ void HTTPHandler::processQuery(

/// While still no data has been sent, we will report about query execution progress by sending HTTP headers.
/// Note that we add it unconditionally so the progress is available for `X-ClickHouse-Summary`
append_callback([&used_output](const Progress & progress) { used_output.out->onProgress(progress); });
append_callback([&used_output](const Progress & progress)
{
used_output.out->onProgress(progress);
auto thread_group = CurrentThread::getGroup();
auto peak_memory_usage = thread_group->memory_tracker.getPeak();
used_output.out->onMemoryUsage(peak_memory_usage);
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please clarify to me

  • when do you want to collect peak_memory_usage for the query
  • when do you want to send it to the client
  • which clients should receive peak_memory_usage and which are not suppose to see it.

The questions arose because I see the inconsistency in the logic, however I might be wrong.
I see that

  • 'peak_memory_usage' collected regularly for the query (this is why the data race occurs)
  • it is sent only at the end (only at finishSendHeaders)
  • only http protocol is affected

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

when do you want to collect peak_memory_usage for the query

It is already collected (it was implemented) for the query, I just pass it to http. I don't know exactly how it works. The server already displays this value in its log.

when do you want to send it to the client

I actually need to update once by the end, but there is no such moment here and I send it along with the progress statistics.

which clients should receive peak_memory_usage and which are not suppose to see it.

I did it for everyone (because I thought it was for information, and no one would be offended). But I don't have exact information on how clients use the X-ClickHouse-Summary field.

(this is why the data race occurs)

It looks like I forgot the mutex, now I'll try to fix it.

What does it look like (over curl):

curl -v 'http://localhost:8123/?send_progress_in_http_headers=1&query=SELECT%20number%20FROM%20numbers(100000000)%20FORMAT%20Null;'

...
< Keep-Alive: timeout=10
< X-ClickHouse-Progress: {"read_rows":"3989949","read_bytes":"31919592","written_rows":"0","written_bytes":"0","total_rows_to_read":"20000000","result_rows":"0","result_bytes":"0"}
< X-ClickHouse-Progress: {"read_rows":"8241534","read_bytes":"65932272","written_rows":"0","written_bytes":"0","total_rows_to_read":"20000000","result_rows":"0","result_bytes":"0"}
< X-ClickHouse-Progress: {"read_rows":"12493119","read_bytes":"99944952","written_rows":"0","written_bytes":"0","total_rows_to_read":"20000000","result_rows":"0","result_bytes":"0"}
< X-ClickHouse-Progress: {"read_rows":"16744704","read_bytes":"133957632","written_rows":"0","written_bytes":"0","total_rows_to_read":"20000000","result_rows":"0","result_bytes":"0"}
< X-ClickHouse-Summary: {"read_rows":"20015154","read_bytes":"160121232","written_rows":"0","written_bytes":"0","total_rows_to_read":"20000000","result_rows":"20000000","result_bytes":"160432128","peak_memory_usage":"0"}
....

in log
023.07.14 14:19:56.455771 [ 58613 ] {f97e1eb1-7d9b-44b7-930c-2e9efcef50ca} <Debug> MemoryTracker: Peak memory usage (for query): 0.00 B.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

only http protocol is affected

The task was also about clickhouse-client, and it has a binary protocol. In general, this task consists of two independent tasks: http + clickhouse-client,

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, I see now, sendProfileEvents works for all other protocols. Good.

Copy link
Member

@CheSema CheSema Jul 14, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Now I understand why changes in http look so outstanding here. It because CH does't send ProfileEvents over http.

Interesting question. It is addressed not to you actually. What is the point to send progress as header? If CH is unable to buffer all response then no more headers are sent after data transmission started. Maybe CH really is able to buffer all response in the most cases.

Here I propose to send peak_memory_usage with each writeHeaderProgress() call as well. It make sense actually. Because one short peak might not be displayed in memory usage. But it may lead to the OOM.
I think that could make the code more straight.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What do you think about sending peak_memory_usage with each writeHeaderProgress() call?

In general I like this PR, and could approve it as it.
But I think it would be even better if just send that info with each progress header, not only the last one.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm already doing it now (I thought about doing it during development, but then I refused). Today I hope to fix it (writeHeaderProgress + do not display for old servers).

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Did (send memory in progress)

});

if (settings.readonly > 0 && settings.cancel_http_readonly_queries_on_client_close)
{
Expand Down
Expand Up @@ -46,7 +46,7 @@ ${CLICKHOUSE_CURL} -vsS "${CLICKHOUSE_URL}" -H 'Accept-Encoding: gzip' -d 'CREAT
result=""
counter=0
while [ $counter -lt $RETRIES ] && [ -z "$result" ]; do
result=$(${CLICKHOUSE_CURL} -vsS "${CLICKHOUSE_URL}&max_block_size=1&http_headers_progress_interval_ms=0&send_progress_in_http_headers=1" -d 'INSERT INTO insert_number_query (record) SELECT number FROM system.numbers LIMIT 10' 2>&1 | grep -E 'Content-Encoding|X-ClickHouse-Summary|^[0-9]')
result=$(${CLICKHOUSE_CURL} -vsS "${CLICKHOUSE_URL}&max_block_size=1&http_headers_progress_interval_ms=0&send_progress_in_http_headers=1" -d 'INSERT INTO insert_number_query (record) SELECT number FROM system.numbers LIMIT 10' 2>&1 | grep -E 'Content-Encoding|X-ClickHouse-Summary|^[0-9]' | sed 's/,\"peak_mem[^}]*//')
let counter=counter+1
done
echo "$result"
Expand Down
7 changes: 4 additions & 3 deletions tests/queries/0_stateless/01921_test_progress_bar.py
Expand Up @@ -14,6 +14,7 @@

with client(name="client1>", log=log) as client1:
client1.expect(prompt)
client1.send("SELECT number FROM numbers(100) FORMAT Null")
client1.expect("Progress: 100\.00 rows, 800\.00 B.*" + end_of_block)
client1.expect("0 rows in set. Elapsed: [\\w]{1}\.[\\w]{3} sec." + end_of_block)
client1.send("SELECT number FROM numbers(1000) FORMAT Null")
client1.expect("Progress: 1\.00 thousand rows, 8\.00 KB .*" + end_of_block)
client1.expect("0 rows in set. Elapsed: [\\w]{1}\.[\\w]{3} sec.")
client1.expect("Peak memory usage \(for query\) .*B" + end_of_block)
2 changes: 1 addition & 1 deletion tests/queries/0_stateless/02136_scalar_progress.sh
Expand Up @@ -4,4 +4,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh

$CLICKHOUSE_CURL -sS "${CLICKHOUSE_URL}&wait_end_of_query=1&send_progress_in_http_headers=1&http_headers_progress_interval_ms=0" -d "SELECT (SELECT max(number), count(number) FROM numbers(100000) settings max_block_size=65505);" -v 2>&1 | grep -E "X-ClickHouse-Summary|X-ClickHouse-Progress"
$CLICKHOUSE_CURL -sS "${CLICKHOUSE_URL}&wait_end_of_query=1&send_progress_in_http_headers=1&http_headers_progress_interval_ms=0" -d "SELECT (SELECT max(number), count(number) FROM numbers(100000) settings max_block_size=65505);" -v 2>&1 | grep -E "X-ClickHouse-Summary|X-ClickHouse-Progress" | sed 's/,\"peak_mem[^}]*//'
2 changes: 1 addition & 1 deletion tests/queries/0_stateless/02373_progress_contain_result.sh
Expand Up @@ -6,4 +6,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)

echo 'SELECT 1 FROM numbers(100)' |
${CLICKHOUSE_CURL_COMMAND} -v "${CLICKHOUSE_URL}&wait_end_of_query=1&send_progress_in_http_headers=0" --data-binary @- 2>&1 |
grep 'X-ClickHouse-Summary'
grep 'X-ClickHouse-Summary' | sed 's/,\"peak_mem[^}]*//'
12 changes: 6 additions & 6 deletions tests/queries/0_stateless/02423_insert_summary_behaviour.sh
Expand Up @@ -11,11 +11,11 @@ $CLICKHOUSE_CLIENT -q "CREATE MATERIALIZED VIEW floats_to_target TO target_1 AS
$CLICKHOUSE_CLIENT -q "CREATE MATERIALIZED VIEW floats_to_target_2 TO target_2 AS SELECT * FROM floats, numbers(2) n"

echo "No materialized views"
${CLICKHOUSE_CURL} "${CLICKHOUSE_URL}&wait_end_of_query=1&query=INSERT+INTO+target_1" -d "VALUES(1.0)" -v 2>&1 | grep 'X-ClickHouse-Summary'
$CLICKHOUSE_LOCAL -q "SELECT number::Float64 AS v FROM numbers(10)" --format Native | ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&wait_end_of_query=1&query=INSERT+INTO+target_1+FORMAT+Native" --data-binary @- -v 2>&1 | grep 'X-ClickHouse-Summary'
$CLICKHOUSE_LOCAL -q "SELECT number::Float64 AS v FROM numbers(10)" --format RowBinary | ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&wait_end_of_query=1&query=INSERT+INTO+target_1+FORMAT+RowBinary" --data-binary @- -v 2>&1 | grep 'X-ClickHouse-Summary'
${CLICKHOUSE_CURL} "${CLICKHOUSE_URL}&wait_end_of_query=1&query=INSERT+INTO+target_1" -d "VALUES(1.0)" -v 2>&1 | grep 'X-ClickHouse-Summary' | sed 's/,\"peak_mem[^}]*//'
$CLICKHOUSE_LOCAL -q "SELECT number::Float64 AS v FROM numbers(10)" --format Native | ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&wait_end_of_query=1&query=INSERT+INTO+target_1+FORMAT+Native" --data-binary @- -v 2>&1 | grep 'X-ClickHouse-Summary' | sed 's/,\"peak_mem[^}]*//'
$CLICKHOUSE_LOCAL -q "SELECT number::Float64 AS v FROM numbers(10)" --format RowBinary | ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&wait_end_of_query=1&query=INSERT+INTO+target_1+FORMAT+RowBinary" --data-binary @- -v 2>&1 | grep 'X-ClickHouse-Summary' | sed 's/,\"peak_mem[^}]*//'

echo "With materialized views"
${CLICKHOUSE_CURL} "${CLICKHOUSE_URL}&wait_end_of_query=1&query=INSERT+INTO+floats" -d "VALUES(1.0)" -v 2>&1 | grep 'X-ClickHouse-Summary'
$CLICKHOUSE_LOCAL -q "SELECT number::Float64 AS v FROM numbers(10)" --format Native | ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&wait_end_of_query=1&query=INSERT+INTO+floats+FORMAT+Native" --data-binary @- -v 2>&1 | grep 'X-ClickHouse-Summary'
$CLICKHOUSE_LOCAL -q "SELECT number::Float64 AS v FROM numbers(10)" --format RowBinary | ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&wait_end_of_query=1&query=INSERT+INTO+floats+FORMAT+RowBinary" --data-binary @- -v 2>&1 | grep 'X-ClickHouse-Summary'
${CLICKHOUSE_CURL} "${CLICKHOUSE_URL}&wait_end_of_query=1&query=INSERT+INTO+floats" -d "VALUES(1.0)" -v 2>&1 | grep 'X-ClickHouse-Summary' | sed 's/,\"peak_mem[^}]*//'
$CLICKHOUSE_LOCAL -q "SELECT number::Float64 AS v FROM numbers(10)" --format Native | ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&wait_end_of_query=1&query=INSERT+INTO+floats+FORMAT+Native" --data-binary @- -v 2>&1 | grep 'X-ClickHouse-Summary' | sed 's/,\"peak_mem[^}]*//'
$CLICKHOUSE_LOCAL -q "SELECT number::Float64 AS v FROM numbers(10)" --format RowBinary | ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&wait_end_of_query=1&query=INSERT+INTO+floats+FORMAT+RowBinary" --data-binary @- -v 2>&1 | grep 'X-ClickHouse-Summary' | sed 's/,\"peak_mem[^}]*//'
Expand Up @@ -5,5 +5,5 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. "$CURDIR"/../shell_config.sh

${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&send_progress_in_http_headers=1&http_headers_progress_interval_ms=0" -d @- <<< "insert into function null('_ Int') select * from numbers(5) settings max_block_size=1" -v |& {
grep -F -e X-ClickHouse-Progress: -e X-ClickHouse-Summary:
grep -F -e X-ClickHouse-Progress: -e X-ClickHouse-Summary: | sed 's/,\"peak_mem[^}]*//'
}