diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index 1f9ea483084e..8c87c5883eb7 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -656,15 +656,17 @@ static std::tuple executeQueryImpl( if (query_span && query_span->trace_id != UUID{}) LOG_TRACE(&Poco::Logger::get("executeQuery"), "Query span trace_id for opentelemetry log: {}", query_span->trace_id); + /// Used for logging query start time in system.query_log auto query_start_time = std::chrono::system_clock::now(); - /// Used to set the watch in QueryStatus and the output formats. It is not based on query_start_time as that might be based on - /// the value passed by the client + /// Used for: + /// * Setting the watch in QueryStatus (controls timeouts and progress) and the output formats + /// * Logging query duration (system.query_log) Stopwatch start_watch{CLOCK_MONOTONIC}; const auto & client_info = context->getClientInfo(); - if (!internal) + if (!internal && client_info.initial_query_start_time == 0) { // If it's not an internal query and we don't see an initial_query_start_time yet, initialize it // to current time. Internal queries are those executed without an independent client context, @@ -672,15 +674,7 @@ static std::tuple executeQueryImpl( // possible to have unset initial_query_start_time for non-internal and non-initial queries. For // example, the query is from an initiator that is running an old version of clickhouse. // On the other hand, if it's initialized then take it as the start of the query - if (client_info.initial_query_start_time == 0) - { - context->setInitialQueryStartTime(query_start_time); - } - else - { - query_start_time = std::chrono::time_point( - std::chrono::microseconds{client_info.initial_query_start_time_microseconds}); - } + context->setInitialQueryStartTime(query_start_time); } assert(internal || CurrentThread::get().getQueryContext()); diff --git a/tests/queries/0_stateless/02590_interserver_mode_client_info_initial_query_start_time.reference b/tests/queries/0_stateless/02590_interserver_mode_client_info_initial_query_start_time.reference deleted file mode 100644 index fbce8ae2026f..000000000000 --- a/tests/queries/0_stateless/02590_interserver_mode_client_info_initial_query_start_time.reference +++ /dev/null @@ -1,8 +0,0 @@ -SELECT -3 0 0 -3 0 0 -INSERT -CHECK -1 -2 -6 0 2 diff --git a/tests/queries/0_stateless/02590_interserver_mode_client_info_initial_query_start_time.sh b/tests/queries/0_stateless/02590_interserver_mode_client_info_initial_query_start_time.sh deleted file mode 100755 index 5da643bd17be..000000000000 --- a/tests/queries/0_stateless/02590_interserver_mode_client_info_initial_query_start_time.sh +++ /dev/null @@ -1,67 +0,0 @@ -#!/usr/bin/env bash -# Tags: no-fasttest -# Tag no-fasttest: interserver mode requires SSL -# -# Test that checks that some of ClientInfo correctly passed in inter-server mode. -# NOTE: we need .sh test (.sql is not enough) because queries on remote nodes does not have current_database = currentDatabase() -# -# Check-style suppression: select * from system.query_log where current_database = currentDatabase(); - -CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# shellcheck source=../shell_config.sh -. "$CUR_DIR"/../shell_config.sh - -function get_query_id() { random_str 10; } - -$CLICKHOUSE_CLIENT -nm -q " - drop table if exists buf; - drop table if exists dist; - drop table if exists data; - - create table data (key Int) engine=Memory(); - create table dist as data engine=Distributed(test_cluster_interserver_secret, currentDatabase(), data, key); - create table dist_dist as data engine=Distributed(test_cluster_interserver_secret, currentDatabase(), dist, key); - system stop distributed sends dist; -" - -echo "SELECT" -query_id="$(get_query_id)" -# initialize connection, but actually if there are other tables that uses this -# cluster then, it will be created long time ago, but this is OK for this -# test, since we care about the difference between NOW() and there should -# not be any significant difference. -$CLICKHOUSE_CLIENT --prefer_localhost_replica=0 --query_id "$query_id" -q "select * from dist" -$CLICKHOUSE_CLIENT -nm --param_query_id "$query_id" -q " - system flush logs; - select count(), countIf(initial_query_start_time_microseconds != query_start_time_microseconds), countIf(event_time - initial_query_start_time > 3) from system.query_log where type = 'QueryFinish' and initial_query_id = {query_id:String}; -" - -sleep 6 - -query_id="$(get_query_id)" -# this query (and all subsequent) should reuse the previous connection (at least most of the time) -$CLICKHOUSE_CLIENT --prefer_localhost_replica=0 --query_id "$query_id" -q "select * from dist" - -$CLICKHOUSE_CLIENT -nm --param_query_id "$query_id" -q " - system flush logs; - select count(), countIf(initial_query_start_time_microseconds != query_start_time_microseconds), countIf(event_time - initial_query_start_time > 3) from system.query_log where type = 'QueryFinish' and initial_query_id = {query_id:String}; -" - -echo "INSERT" -query_id="$(get_query_id)" -$CLICKHOUSE_CLIENT --prefer_localhost_replica=0 --query_id "$query_id" -nm -q " - insert into dist_dist values (1),(2); - select * from data; -" - -sleep 3 -$CLICKHOUSE_CLIENT -nm --param_query_id "$query_id" -q "system flush distributed dist_dist" -sleep 1 -$CLICKHOUSE_CLIENT -nm --param_query_id "$query_id" -q "system flush distributed dist" - -echo "CHECK" -$CLICKHOUSE_CLIENT -nm --param_query_id "$query_id" -q " - select * from data order by key; - system flush logs; - select count(), countIf(initial_query_start_time_microseconds != query_start_time_microseconds), countIf(event_time - initial_query_start_time > 3) from system.query_log where type = 'QueryFinish' and initial_query_id = {query_id:String}; -" diff --git a/tests/queries/0_stateless/02985_shard_query_start_time.reference b/tests/queries/0_stateless/02985_shard_query_start_time.reference new file mode 100644 index 000000000000..ff563ea1d53a --- /dev/null +++ b/tests/queries/0_stateless/02985_shard_query_start_time.reference @@ -0,0 +1,2 @@ +QueryStart 2 2 2 2 +QueryFinish 2 2 2 2 diff --git a/tests/queries/0_stateless/02985_shard_query_start_time.sql b/tests/queries/0_stateless/02985_shard_query_start_time.sql new file mode 100644 index 000000000000..c31d81e58ae1 --- /dev/null +++ b/tests/queries/0_stateless/02985_shard_query_start_time.sql @@ -0,0 +1,34 @@ +DROP TABLE IF EXISTS sharded_table; +CREATE TABLE sharded_table (dummy UInt8) ENGINE = Distributed('test_cluster_two_shards', 'system', 'one'); + +SET prefer_localhost_replica=0; +SELECT * FROM sharded_table FORMAT Null SETTINGS log_comment='02985_shard_query_start_time_query_1'; + +SYSTEM FLUSH LOGS; + +-- Check that there are 2 queries to shards and for each one query_start_time_microseconds is more recent +-- than initial_query_start_time_microseconds, and initial_query_start_time_microseconds matches the original query +-- query_start_time_microseconds +WITH +( + SELECT + (query_id, query_start_time, query_start_time_microseconds) + FROM + system.query_log + WHERE + event_date >= yesterday() + AND current_database = currentDatabase() + AND log_comment = '02985_shard_query_start_time_query_1' + AND type = 'QueryFinish' +) AS id_and_start_tuple +SELECT + type, + countIf(query_start_time >= initial_query_start_time), -- Using >= because it's comparing seconds + countIf(query_start_time_microseconds > initial_query_start_time_microseconds), + countIf(initial_query_start_time = id_and_start_tuple.2), + countIf(initial_query_start_time_microseconds = id_and_start_tuple.3) +FROM + system.query_log +WHERE + NOT is_initial_query AND initial_query_id = id_and_start_tuple.1 +GROUP BY type;