Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Randomize more settings #39663

Merged
merged 20 commits into from
Dec 19, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
7 changes: 7 additions & 0 deletions tests/clickhouse-test
Expand Up @@ -611,6 +611,13 @@ class SettingsRandomizer:
"compile_sort_description": lambda: random.randint(0, 1),
"merge_tree_coarse_index_granularity": lambda: random.randint(2, 32),
"optimize_distinct_in_order": lambda: random.randint(0, 1),
"max_bytes_before_external_sort": threshold_generator(
1.0, 0.5, 1, 10 * 1024 * 1024 * 1024
),
"max_bytes_before_external_group_by": threshold_generator(
1.0, 0.5, 1, 10 * 1024 * 1024 * 1024
),
"max_bytes_before_remerge_sort": lambda: random.randint(1, 3000000000),
"optimize_sorting_by_input_stream_properties": lambda: random.randint(0, 1),
"http_response_buffer_size": lambda: random.randint(0, 10 * 1048576),
"http_wait_end_of_query": lambda: random.random() > 0.5,
Expand Down
3 changes: 3 additions & 0 deletions tests/queries/0_stateless/00109_shard_totals_after_having.sql
Expand Up @@ -4,6 +4,9 @@ SET max_rows_to_group_by = 100000;
SET max_block_size = 100001;
SET group_by_overflow_mode = 'any';

-- Settings 'max_rows_to_group_by' and 'max_bytes_before_external_group_by' are mutually exclusive.
SET max_bytes_before_external_group_by = 0;

DROP TABLE IF EXISTS numbers500k;
CREATE TABLE numbers500k (number UInt32) ENGINE = TinyLog;

Expand Down
2 changes: 1 addition & 1 deletion tests/queries/0_stateless/00119_storage_join.sql
Expand Up @@ -12,7 +12,7 @@ SELECT x, s, k FROM (SELECT number AS k FROM system.numbers LIMIT 10) js1 ANY LE
SELECT 1, x, 2, s, 3, k, 4 FROM (SELECT number AS k FROM system.numbers LIMIT 10) js1 ANY LEFT JOIN t2 USING k;

SELECT t1.k, t1.s, t2.x
FROM ( SELECT number AS k, 'a' AS s FROM numbers(2) GROUP BY number WITH TOTALS ) AS t1
FROM ( SELECT number AS k, 'a' AS s FROM numbers(2) GROUP BY number WITH TOTALS ORDER BY number) AS t1
ANY LEFT JOIN t2 AS t2 USING(k);

DROP TABLE t2;
Expand Up @@ -16,7 +16,7 @@ if [ -n "$DBMS_TESTS_UNDER_VALGRIND" ]; then
fi

for i in $(seq 1000000 $((20000 * $STEP_MULTIPLIER)) 10000000 && seq 10100000 $((100000 * $STEP_MULTIPLIER)) 50000000); do
$CLICKHOUSE_CLIENT --max_memory_usage="$i" --query="
$CLICKHOUSE_CLIENT --max_memory_usage="$i" --max_bytes_before_external_group_by 0 --query="
SELECT intDiv(number, 5) AS k, max(toString(number)) FROM remote('127.0.0.{2,3}', ${CLICKHOUSE_DATABASE}.numbers_100k) GROUP BY k ORDER BY k LIMIT 1;
" 2> /dev/null;
CODE=$?;
Expand Down
14 changes: 11 additions & 3 deletions tests/queries/0_stateless/00155_long_merges.sh
Expand Up @@ -34,32 +34,40 @@ function test {

SETTINGS="--min_insert_block_size_rows=0 --min_insert_block_size_bytes=0 --max_block_size=65505"

$CLICKHOUSE_CLIENT --query="SYSTEM STOP MERGES summing_00155"
$CLICKHOUSE_CLIENT $SETTINGS --query="INSERT INTO summing_00155 (x) SELECT number AS x FROM system.numbers LIMIT $1"
$CLICKHOUSE_CLIENT $SETTINGS --query="INSERT INTO summing_00155 (x) SELECT number AS x FROM system.numbers LIMIT $2"

$CLICKHOUSE_CLIENT --query="SYSTEM STOP MERGES collapsing_00155"
$CLICKHOUSE_CLIENT $SETTINGS --query="INSERT INTO collapsing_00155 (x) SELECT number AS x FROM system.numbers LIMIT $1"
$CLICKHOUSE_CLIENT $SETTINGS --query="INSERT INTO collapsing_00155 (x) SELECT number AS x FROM system.numbers LIMIT $2"

$CLICKHOUSE_CLIENT --query="SYSTEM STOP MERGES aggregating_00155"
$CLICKHOUSE_CLIENT $SETTINGS --query="INSERT INTO aggregating_00155 (d, x, s) SELECT today() AS d, number AS x, sumState(materialize(toUInt64(1))) AS s FROM (SELECT number FROM system.numbers LIMIT $1) GROUP BY number"
$CLICKHOUSE_CLIENT $SETTINGS --query="INSERT INTO aggregating_00155 (d, x, s) SELECT today() AS d, number AS x, sumState(materialize(toUInt64(1))) AS s FROM (SELECT number FROM system.numbers LIMIT $2) GROUP BY number"

$CLICKHOUSE_CLIENT --query="SYSTEM STOP MERGES replacing_00155"
$CLICKHOUSE_CLIENT $SETTINGS --query="INSERT INTO replacing_00155 (x, v) SELECT number AS x, toUInt64(number % 3 == 0) FROM system.numbers LIMIT $1"
$CLICKHOUSE_CLIENT $SETTINGS --query="INSERT INTO replacing_00155 (x, v) SELECT number AS x, toUInt64(number % 3 == 1) FROM system.numbers LIMIT $2"

$CLICKHOUSE_CLIENT --query="SELECT count() = $SUM, sum(s) = $SUM FROM summing_00155"
$CLICKHOUSE_CLIENT --query="SYSTEM START MERGES summing_00155"
$CLICKHOUSE_CLIENT --query="OPTIMIZE TABLE summing_00155"
$CLICKHOUSE_CLIENT --query="SELECT count() = $MAX, sum(s) = $SUM FROM summing_00155"
echo
$CLICKHOUSE_CLIENT --query="SELECT count() = $SUM, sum(s) = $SUM FROM collapsing_00155"
$CLICKHOUSE_CLIENT --query="OPTIMIZE TABLE collapsing_00155" --server_logs_file='/dev/null';
$CLICKHOUSE_CLIENT --query="SYSTEM START MERGES collapsing_00155"
$CLICKHOUSE_CLIENT --query="OPTIMIZE TABLE collapsing_00155 FINAL" --server_logs_file='/dev/null';
$CLICKHOUSE_CLIENT --query="SELECT count() = $MAX, sum(s) = $MAX FROM collapsing_00155"
echo
$CLICKHOUSE_CLIENT --query="SELECT count() = $SUM, sumMerge(s) = $SUM FROM aggregating_00155"
$CLICKHOUSE_CLIENT --query="OPTIMIZE TABLE aggregating_00155"
$CLICKHOUSE_CLIENT --query="SYSTEM START MERGES aggregating_00155"
$CLICKHOUSE_CLIENT --query="OPTIMIZE TABLE aggregating_00155 FINAL"
$CLICKHOUSE_CLIENT --query="SELECT count() = $MAX, sumMerge(s) = $SUM FROM aggregating_00155"
echo
$CLICKHOUSE_CLIENT --query="SELECT count() = $SUM, sum(s) = $SUM FROM replacing_00155"
$CLICKHOUSE_CLIENT --query="OPTIMIZE TABLE replacing_00155"
$CLICKHOUSE_CLIENT --query="SYSTEM START MERGES replacing_00155"
$CLICKHOUSE_CLIENT --query="OPTIMIZE TABLE replacing_00155 FINAL"
$CLICKHOUSE_CLIENT --query="SELECT count() = $MAX, sum(s) = $MAX FROM replacing_00155"
$CLICKHOUSE_CLIENT --query="SELECT count() = sum(v) FROM replacing_00155 where x % 3 == 0 and x < $1"
$CLICKHOUSE_CLIENT --query="SELECT count() = sum(v) FROM replacing_00155 where x % 3 == 1 and x < $2"
Expand Down
@@ -1 +1,4 @@
-- Disable external aggregation because the state is reset for each new block of data in 'runningAccumulate' function.
SET max_bytes_before_external_group_by = 0;

SELECT k, finalizeAggregation(sum_state), runningAccumulate(sum_state) FROM (SELECT intDiv(number, 50000) AS k, sumState(number) AS sum_state FROM (SELECT number FROM system.numbers LIMIT 1000000) GROUP BY k ORDER BY k);
Expand Up @@ -36,9 +36,9 @@ GROUP BY ORDER BY
1
GROUP BY w/ ALIAS
0
1
0
1
1
ORDER BY w/ ALIAS
0
func(aggregate function) GROUP BY
Expand Down
Expand Up @@ -34,7 +34,7 @@ SELECT uniq(number) u FROM remote('127.0.0.{2,3}', currentDatabase(), data_00184

-- cover possible tricky issues
SELECT 'GROUP BY w/ ALIAS';
SELECT n FROM remote('127.0.0.{2,3}', currentDatabase(), data_00184) GROUP BY number AS n SETTINGS distributed_group_by_no_merge=2;
SELECT n FROM remote('127.0.0.{2,3}', currentDatabase(), data_00184) GROUP BY number AS n ORDER BY n SETTINGS distributed_group_by_no_merge=2;

SELECT 'ORDER BY w/ ALIAS';
SELECT n FROM remote('127.0.0.{2,3}', currentDatabase(), data_00184) ORDER BY number AS n LIMIT 1 SETTINGS distributed_group_by_no_merge=2;
Expand Down
3 changes: 3 additions & 0 deletions tests/queries/0_stateless/00273_quantiles.sql
Expand Up @@ -8,4 +8,7 @@ SELECT quantilesExact(0, 0.001, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0
SELECT quantilesTDigest(0, 0.001, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.99, 0.999, 1)(x) FROM (SELECT number AS x FROM system.numbers LIMIT 1001);
SELECT quantilesDeterministic(0, 0.001, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.99, 0.999, 1)(x, x) FROM (SELECT number AS x FROM system.numbers LIMIT 1001);

-- The result slightly differs but it's ok since `quantilesDeterministic` is an approximate function.
SET max_bytes_before_external_group_by = 0;

SELECT round(1000000 / (number + 1)) AS k, count() AS c, arrayMap(x -> round(x, 6), quantilesDeterministic(0.1, 0.5, 0.9)(number, intHash64(number))) AS q1, quantilesExact(0.1, 0.5, 0.9)(number) AS q2 FROM (SELECT number FROM system.numbers LIMIT 1000000) GROUP BY k ORDER BY k;
Expand Up @@ -7,4 +7,8 @@ DROP TABLE IF EXISTS arena;

SELECT length(arrayReduce('groupUniqArray', [[1, 2], [1], emptyArrayUInt8(), [1], [1, 2]]));
SELECT min(x), max(x) FROM (SELECT length(arrayReduce('groupUniqArray', [hex(number), hex(number+1), hex(number)])) AS x FROM system.numbers LIMIT 100000);

-- Disable external aggregation because the state is reset for each new block of data in 'runningAccumulate' function.
SET max_bytes_before_external_group_by = 0;

SELECT sum(length(runningAccumulate(x))) FROM (SELECT groupUniqArrayState(toString(number % 10)) AS x, number FROM (SELECT * FROM system.numbers LIMIT 11) GROUP BY number ORDER BY number);
Expand Up @@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh

settings="--log_queries=1 --log_query_threads=1 --log_profile_events=1 --log_query_settings=1 --allow_deprecated_syntax_for_merge_tree=1"
settings="--log_queries=1 --log_query_threads=1 --log_profile_events=1 --log_query_settings=1 --allow_deprecated_syntax_for_merge_tree=1 --max_bytes_before_external_group_by 0 --max_bytes_before_external_sort 0"

# Test insert logging on each block and checkPacket() method

Expand Down
6 changes: 4 additions & 2 deletions tests/queries/0_stateless/00808_not_optimize_predicate.sql
Expand Up @@ -48,7 +48,8 @@ SELECT
intDiv(number, 25) AS n,
avgState(number) AS s
FROM numbers(2500)
GROUP BY n;
GROUP BY n
ORDER BY n;

SET force_primary_key = 1, enable_optimize_predicate_expression = 1;

Expand All @@ -60,7 +61,8 @@ FROM
finalizeAggregation(s)
FROM test_00808_push_down_with_finalizeAggregation
)
WHERE (n >= 2) AND (n <= 5);
WHERE (n >= 2) AND (n <= 5)
ORDER BY n;

EXPLAIN SYNTAX SELECT *
FROM
Expand Down
Expand Up @@ -2,6 +2,7 @@
-- Tag no-msan: memory limits don't work correctly under msan because it replaces malloc/free

SET max_memory_usage = 1000000000;
SET max_bytes_before_external_group_by = 0;

SELECT sum(ignore(*)) FROM (
SELECT number, argMax(number, (number, toFixedString(toString(number), 1024)))
Expand Down
4 changes: 2 additions & 2 deletions tests/queries/0_stateless/00947_ml_test.sql

Large diffs are not rendered by default.

4 changes: 4 additions & 0 deletions tests/queries/0_stateless/00953_moving_functions.sql
Expand Up @@ -24,6 +24,10 @@ INSERT INTO moving_sum_num

SELECT * FROM moving_sum_num ORDER BY k,dt FORMAT TabSeparatedWithNames;

-- Result of function 'groupArrayMovingSum' depends on the order of merging
-- aggregate states which is implementation defined in external aggregation.
SET max_bytes_before_external_group_by = 0;

SELECT k, groupArrayMovingSum(v) FROM (SELECT * FROM moving_sum_num ORDER BY k, dt) GROUP BY k ORDER BY k FORMAT TabSeparatedWithNamesAndTypes;
SELECT k, groupArrayMovingSum(3)(v) FROM (SELECT * FROM moving_sum_num ORDER BY k, dt) GROUP BY k ORDER BY k FORMAT TabSeparatedWithNamesAndTypes;

Expand Down
3 changes: 3 additions & 0 deletions tests/queries/0_stateless/01012_reset_running_accumulate.sql
@@ -1,3 +1,6 @@
-- Disable external aggregation because the state is reset for each new block of data in 'runningAccumulate' function.
SET max_bytes_before_external_group_by = 0;

SELECT grouping,
item,
runningAccumulate(state, grouping)
Expand Down
@@ -1,5 +1,8 @@
SET joined_subquery_requires_alias = 0;
SET max_threads = 1;
-- It affects number of read rows and max_rows_to_read.
SET max_bytes_before_external_sort = 0;
SET max_bytes_before_external_group_by = 0;

-- incremental streaming usecase
-- that has sense only if data filling order has guarantees of chronological order
Expand Down
@@ -1 +1 @@
select arraySlice(groupArray(x),1,1) as y from (select uniqState(number) as x from numbers(10) group by number);
select arraySlice(groupArray(x), 1, 1) as y from (select uniqState(number) as x from numbers(10) group by number order by number);
3 changes: 3 additions & 0 deletions tests/queries/0_stateless/01134_max_rows_to_group_by.sql
Expand Up @@ -2,6 +2,9 @@ SET max_block_size = 1;
SET max_rows_to_group_by = 10;
SET group_by_overflow_mode = 'throw';

-- Settings 'max_rows_to_group_by' and 'max_bytes_before_external_group_by' are mutually exclusive.
SET max_bytes_before_external_group_by = 0;

SELECT 'test1', number FROM system.numbers GROUP BY number; -- { serverError 158 }

SET group_by_overflow_mode = 'break';
Expand Down
2 changes: 1 addition & 1 deletion tests/queries/0_stateless/01193_metadata_loading.sh
Expand Up @@ -29,7 +29,7 @@ create_tables() {
groupArray(
create1 || toString(number) || create2 || engines[1 + number % length(engines)] || ';\n' ||
insert1 || toString(number) || insert2
), ';\n') FROM numbers($tables) FORMAT TSVRaw;" | $CLICKHOUSE_CLIENT -nm
), ';\n') FROM numbers($tables) SETTINGS max_bytes_before_external_group_by = 0 FORMAT TSVRaw;" | $CLICKHOUSE_CLIENT -nm
}

$CLICKHOUSE_CLIENT -q "CREATE DATABASE $db"
Expand Down
Expand Up @@ -4,6 +4,9 @@

set optimize_distributed_group_by_sharding_key=1;

-- Some queries in this test require sorting after aggregation.
set max_bytes_before_external_group_by = 0;

drop table if exists dist_01247;
drop table if exists data_01247;

Expand Down
Expand Up @@ -6,7 +6,12 @@ DROP TABLE IF EXISTS dist;
create table data (key String) Engine=Memory();
create table dist (key LowCardinality(String)) engine=Distributed(test_cluster_two_shards, currentDatabase(), data);
insert into data values ('foo');

set distributed_aggregation_memory_efficient=1;

-- There is an obscure bug in rare corner case.
set max_bytes_before_external_group_by = 0;

select * from dist group by key;

DROP TABLE data;
Expand Down
3 changes: 3 additions & 0 deletions tests/queries/0_stateless/01472_many_rows_in_totals.sql
@@ -1,4 +1,7 @@
-- Disable external aggregation because it may produce several blocks instead of one.
set max_bytes_before_external_group_by = 0;
set output_format_write_statistics = 0;

select g, s from (select g, sum(number) as s from numbers(4) group by bitAnd(number, 1) as g with totals order by g) array join [1, 2] as a format Pretty;
select '--';

Expand Down
Expand Up @@ -12,6 +12,7 @@ optimize table data_01513 final;
set max_memory_usage='500M';
set max_threads=1;
set max_block_size=500;
set max_bytes_before_external_group_by=0;

select key, groupArray(repeat('a', 200)), count() from data_01513 group by key format Null settings optimize_aggregation_in_order=0; -- { serverError 241 }
select key, groupArray(repeat('a', 200)), count() from data_01513 group by key format Null settings optimize_aggregation_in_order=1;
Expand Down
Expand Up @@ -15,6 +15,7 @@ opts=(
"--max_block_size=50"
"--max_threads=1"
"--max_distributed_connections=2"
"--max_bytes_before_external_group_by=0"
)
${CLICKHOUSE_CLIENT} "${opts[@]}" -q "SELECT groupArray(repeat('a', if(_shard_num == 2, 100000, 1))), number%100000 k from remote('127.{2,3}', system.numbers) GROUP BY k LIMIT 10e6" |& {
# the query should fail earlier on 127.3 and 127.2 should not even go to the memory limit exceeded error.
Expand Down
Expand Up @@ -28,7 +28,7 @@ opts=(
"--prefer_localhost_replica=0"
)
$CLICKHOUSE_CLIENT "${opts[@]}" --format CSV -nm <<EOL
select count(), * from dist_01247 group by number limit 1;
select count(), * from dist_01247 group by number order by number limit 1;
EOL

# expect zero new network errors
Expand Down
4 changes: 4 additions & 0 deletions tests/queries/0_stateless/01591_window_functions.sql
Expand Up @@ -2,6 +2,10 @@

SET allow_experimental_analyzer = 1;

-- Too slow
SET max_bytes_before_external_sort = 0;
SET max_bytes_before_external_group_by = 0;

-- { echo }

-- just something basic
Expand Down
2 changes: 2 additions & 0 deletions tests/queries/0_stateless/01710_aggregate_projections.sh
Expand Up @@ -4,6 +4,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh

# Number of read rows depends on max_bytes_before_external_group_by.
CLICKHOUSE_CLIENT="$CLICKHOUSE_CLIENT --max_bytes_before_external_group_by 0"

$CLICKHOUSE_CLIENT -q "CREATE TABLE test_agg_proj (x Int32, y Int32, PROJECTION x_plus_y (SELECT sum(x - y), argMax(x, y) group by x + y)) ENGINE = MergeTree ORDER BY tuple() settings index_granularity = 1"
$CLICKHOUSE_CLIENT -q "insert into test_agg_proj select intDiv(number, 2), -intDiv(number,3) - 1 from numbers(100)"
Expand Down
3 changes: 3 additions & 0 deletions tests/queries/0_stateless/01799_long_uniq_theta_sketch.sql
@@ -1,5 +1,8 @@
-- Tags: long, no-fasttest

-- The result slightly differs but it's ok since `uniqueTheta` is an approximate function.
set max_bytes_before_external_group_by = 0;

SELECT 'uniqTheta';

SELECT Y, uniqTheta(X) FROM (SELECT number AS X, (3*X*X - 7*X + 11) % 37 AS Y FROM system.numbers LIMIT 15) GROUP BY Y ORDER BY Y;
Expand Down
Expand Up @@ -45,7 +45,7 @@

"data":
[
[12]
[10]
],

"rows": 1,
Expand Down
10 changes: 5 additions & 5 deletions tests/queries/0_stateless/01913_exact_rows_before_limit_full.sql
Expand Up @@ -10,20 +10,20 @@ set exact_rows_before_limit = 1, output_format_write_statistics = 0, max_block_s

select * from test limit 1 FORMAT JSONCompact;

select * from test where i < 10 group by i limit 1 FORMAT JSONCompact;
select * from test where i < 10 group by i order by i limit 1 FORMAT JSONCompact;

select * from test group by i having i in (10, 11, 12) limit 1 FORMAT JSONCompact;
select * from test group by i having i in (10, 11, 12) order by i limit 1 FORMAT JSONCompact;

select * from test where i < 20 order by i limit 1 FORMAT JSONCompact;

set prefer_localhost_replica = 0;
select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 30 limit 1 FORMAT JSONCompact;
select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 30 order by i limit 1 FORMAT JSONCompact;
select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 20 order by i limit 1 FORMAT JSONCompact;

set prefer_localhost_replica = 1;
select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 30 limit 1 FORMAT JSONCompact;
select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 30 order by i limit 1 FORMAT JSONCompact;
select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 20 order by i limit 1 FORMAT JSONCompact;

select * from (select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 10) limit 1 FORMAT JSONCompact;
select * from (select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 10) order by i limit 1 FORMAT JSONCompact;

drop table if exists test;
3 changes: 1 addition & 2 deletions tests/queries/0_stateless/02096_totals_global_in_bug.sql
@@ -1,2 +1 @@
select sum(number) from remote('127.0.0.{2,3}', numbers(2)) where number global in (select sum(number) from numbers(2) group by number with totals) group by number with totals

select sum(number) from remote('127.0.0.{2,3}', numbers(2)) where number global in (select sum(number) from numbers(2) group by number with totals) group by number with totals order by number;
12 changes: 6 additions & 6 deletions tests/queries/0_stateless/02163_shard_num.reference
@@ -1,18 +1,18 @@
-- { echoOn }

SELECT shardNum() AS shard_num, sum(1) as rows FROM remote('127.{1,2}', system, one) GROUP BY _shard_num;
2 1
SELECT shardNum() AS shard_num, sum(1) as rows FROM remote('127.{1,2}', system, one) GROUP BY _shard_num ORDER BY _shard_num;
1 1
SELECT shardNum() AS shard_num, sum(1) as rows FROM remote('127.{1,2}', system, one) GROUP BY shard_num;
2 1
SELECT shardNum() AS shard_num, sum(1) as rows FROM remote('127.{1,2}', system, one) GROUP BY shard_num ORDER BY shard_num;
1 1
SELECT _shard_num AS shard_num, sum(1) as rows FROM remote('127.{1,2}', system, one) GROUP BY _shard_num;
2 1
SELECT _shard_num AS shard_num, sum(1) as rows FROM remote('127.{1,2}', system, one) GROUP BY _shard_num ORDER BY _shard_num;
1 1
SELECT _shard_num AS shard_num, sum(1) as rows FROM remote('127.{1,2}', system, one) GROUP BY shard_num;
2 1
SELECT _shard_num AS shard_num, sum(1) as rows FROM remote('127.{1,2}', system, one) GROUP BY shard_num ORDER BY shard_num;
1 1
SELECT a._shard_num AS shard_num, sum(1) as rows FROM remote('127.{1,2}', system, one) a GROUP BY shard_num;
2 1
SELECT a._shard_num AS shard_num, sum(1) as rows FROM remote('127.{1,2}', system, one) a GROUP BY shard_num ORDER BY shard_num;
1 1
2 1
SELECT _shard_num FROM remote('127.1', system.one) AS a INNER JOIN (SELECT _shard_num FROM system.one) AS b USING (dummy); -- { serverError UNSUPPORTED_METHOD, UNKNOWN_IDENTIFIER }
10 changes: 5 additions & 5 deletions tests/queries/0_stateless/02163_shard_num.sql
@@ -1,10 +1,10 @@
-- { echoOn }

SELECT shardNum() AS shard_num, sum(1) as rows FROM remote('127.{1,2}', system, one) GROUP BY _shard_num;
SELECT shardNum() AS shard_num, sum(1) as rows FROM remote('127.{1,2}', system, one) GROUP BY shard_num;
SELECT _shard_num AS shard_num, sum(1) as rows FROM remote('127.{1,2}', system, one) GROUP BY _shard_num;
SELECT _shard_num AS shard_num, sum(1) as rows FROM remote('127.{1,2}', system, one) GROUP BY shard_num;
SELECT a._shard_num AS shard_num, sum(1) as rows FROM remote('127.{1,2}', system, one) a GROUP BY shard_num;
SELECT shardNum() AS shard_num, sum(1) as rows FROM remote('127.{1,2}', system, one) GROUP BY _shard_num ORDER BY _shard_num;
SELECT shardNum() AS shard_num, sum(1) as rows FROM remote('127.{1,2}', system, one) GROUP BY shard_num ORDER BY shard_num;
SELECT _shard_num AS shard_num, sum(1) as rows FROM remote('127.{1,2}', system, one) GROUP BY _shard_num ORDER BY _shard_num;
SELECT _shard_num AS shard_num, sum(1) as rows FROM remote('127.{1,2}', system, one) GROUP BY shard_num ORDER BY shard_num;
SELECT a._shard_num AS shard_num, sum(1) as rows FROM remote('127.{1,2}', system, one) a GROUP BY shard_num ORDER BY shard_num;
SELECT _shard_num FROM remote('127.1', system.one) AS a INNER JOIN (SELECT _shard_num FROM system.one) AS b USING (dummy); -- { serverError UNSUPPORTED_METHOD, UNKNOWN_IDENTIFIER }

-- { echoOff }