From b2e2b4cce7f913957317b5df3eddf930ac8407f8 Mon Sep 17 00:00:00 2001 From: liyang830 Date: Mon, 18 Apr 2022 19:58:57 +0800 Subject: [PATCH 001/179] fix attach table dictionaries function name normalizer --- src/Databases/DatabaseOrdinary.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/Databases/DatabaseOrdinary.cpp b/src/Databases/DatabaseOrdinary.cpp index b5557d9a08d9..baf93182a571 100644 --- a/src/Databases/DatabaseOrdinary.cpp +++ b/src/Databases/DatabaseOrdinary.cpp @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -106,6 +107,7 @@ void DatabaseOrdinary::loadStoredObjects( const auto & name = name_with_path_and_query.first; const auto & path = name_with_path_and_query.second.path; const auto & ast = name_with_path_and_query.second.ast; + FunctionNameNormalizer().visit(ast.get()); const auto & create_query = ast->as(); if (create_query.is_dictionary) @@ -128,6 +130,7 @@ void DatabaseOrdinary::loadStoredObjects( const auto & name = name_with_path_and_query.first; const auto & path = name_with_path_and_query.second.path; const auto & ast = name_with_path_and_query.second.ast; + FunctionNameNormalizer().visit(ast.get()); const auto & create_query = ast->as(); if (!create_query.is_dictionary) @@ -167,6 +170,7 @@ void DatabaseOrdinary::loadTablesMetadata(ContextPtr local_context, ParsedTables auto ast = parseQueryFromMetadata(log, getContext(), full_path.string(), /*throw_on_error*/ true, /*remove_empty*/ false); if (ast) { + FunctionNameNormalizer().visit(ast.get()); auto * create_query = ast->as(); create_query->setDatabase(database_name); @@ -220,6 +224,7 @@ void DatabaseOrdinary::loadTablesMetadata(ContextPtr local_context, ParsedTables void DatabaseOrdinary::loadTableFromMetadata(ContextMutablePtr local_context, const String & file_path, const QualifiedTableName & name, const ASTPtr & ast, bool force_restore) { assert(name.database == database_name); + FunctionNameNormalizer().visit(ast.get()); const auto & create_query = ast->as(); tryAttachTable( From f091c8d1d8ff0577e60bf1aed0d3f97d30cdb35f Mon Sep 17 00:00:00 2001 From: liyang830 Date: Fri, 17 Jun 2022 16:42:05 +0800 Subject: [PATCH 002/179] fix: attach table normalizer, add test --- src/Databases/DatabaseOrdinary.cpp | 3 -- src/Interpreters/InterpreterCreateQuery.cpp | 1 + .../test_attach_table_normalizer/__init__.py | 0 .../configs/config.xml | 4 ++ .../test_attach_table_normalizer/test.py | 43 +++++++++++++++++++ 5 files changed, 48 insertions(+), 3 deletions(-) create mode 100644 tests/integration/test_attach_table_normalizer/__init__.py create mode 100644 tests/integration/test_attach_table_normalizer/configs/config.xml create mode 100644 tests/integration/test_attach_table_normalizer/test.py diff --git a/src/Databases/DatabaseOrdinary.cpp b/src/Databases/DatabaseOrdinary.cpp index baf93182a571..5708ff50323b 100644 --- a/src/Databases/DatabaseOrdinary.cpp +++ b/src/Databases/DatabaseOrdinary.cpp @@ -107,7 +107,6 @@ void DatabaseOrdinary::loadStoredObjects( const auto & name = name_with_path_and_query.first; const auto & path = name_with_path_and_query.second.path; const auto & ast = name_with_path_and_query.second.ast; - FunctionNameNormalizer().visit(ast.get()); const auto & create_query = ast->as(); if (create_query.is_dictionary) @@ -170,7 +169,6 @@ void DatabaseOrdinary::loadTablesMetadata(ContextPtr local_context, ParsedTables auto ast = parseQueryFromMetadata(log, getContext(), full_path.string(), /*throw_on_error*/ true, /*remove_empty*/ false); if (ast) { - FunctionNameNormalizer().visit(ast.get()); auto * create_query = ast->as(); create_query->setDatabase(database_name); @@ -224,7 +222,6 @@ void DatabaseOrdinary::loadTablesMetadata(ContextPtr local_context, ParsedTables void DatabaseOrdinary::loadTableFromMetadata(ContextMutablePtr local_context, const String & file_path, const QualifiedTableName & name, const ASTPtr & ast, bool force_restore) { assert(name.database == database_name); - FunctionNameNormalizer().visit(ast.get()); const auto & create_query = ast->as(); tryAttachTable( diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index ed996430996d..7eb293b1813d 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -953,6 +953,7 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create) // Table SQL definition is available even if the table is detached (even permanently) auto query = database->getCreateTableQuery(create.getTable(), getContext()); + FunctionNameNormalizer().visit(query.get()); auto create_query = query->as(); if (!create.is_dictionary && create_query.is_dictionary) diff --git a/tests/integration/test_attach_table_normalizer/__init__.py b/tests/integration/test_attach_table_normalizer/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/integration/test_attach_table_normalizer/configs/config.xml b/tests/integration/test_attach_table_normalizer/configs/config.xml new file mode 100644 index 000000000000..0500e2ad5542 --- /dev/null +++ b/tests/integration/test_attach_table_normalizer/configs/config.xml @@ -0,0 +1,4 @@ + + 1 + 1 + diff --git a/tests/integration/test_attach_table_normalizer/test.py b/tests/integration/test_attach_table_normalizer/test.py new file mode 100644 index 000000000000..3e86d567c5b8 --- /dev/null +++ b/tests/integration/test_attach_table_normalizer/test.py @@ -0,0 +1,43 @@ +import pytest + +from helpers.cluster import ClickHouseCluster + +cluster = ClickHouseCluster(__file__) +node = cluster.add_instance('node', main_configs=["configs/config.xml"], with_zookeeper=True) + + +@pytest.fixture(scope="module") +def started_cluster(): + try: + cluster.start() + yield cluster + finally: + cluster.shutdown() + +def replace_substring_to_substr(node): + node.exec_in_container(["bash", "-c", "sed -i 's/substring/substr/g' /var/lib/clickhouse/metadata/default/file.sql"], user="root") + +@pytest.mark.parametrize("engine", ['Ordinary', 'Atomic']) +def test_attach_substr(started_cluster, engine): + # Initialize + node.query("CREATE TABLE default.file(`s` String, `n` UInt8) ENGINE = MergeTree PARTITION BY substring(s, 1, 2) ORDER BY n ") + + # Detach table file + node.query("DETACH TABLE file") + + # Replace subtring to substr + replace_substring_to_substr(node) + + # Attach table file + node.query("ATTACH TABLE file") + +@pytest.mark.parametrize("engine", ['Ordinary', 'Atomic']) +def test_attach_substr(started_cluster, engine): + # Initialize + node.query("CREATE TABLE default.file(`s` String, `n` UInt8) ENGINE = MergeTree PARTITION BY substring(s, 1, 2) ORDER BY n ") + + # Replace subtring to substr + replace_substring_to_substr(node) + + # Restart clickhouse + node.restart_clickhouse(kill=True) From c7a85d565cb17c068528bdbf38a74d0ab29a1450 Mon Sep 17 00:00:00 2001 From: liyang830 Date: Fri, 17 Jun 2022 17:51:33 +0800 Subject: [PATCH 003/179] fix: rename restart test --- tests/integration/test_attach_table_normalizer/test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_attach_table_normalizer/test.py b/tests/integration/test_attach_table_normalizer/test.py index 3e86d567c5b8..5a31801b99ca 100644 --- a/tests/integration/test_attach_table_normalizer/test.py +++ b/tests/integration/test_attach_table_normalizer/test.py @@ -32,7 +32,7 @@ def test_attach_substr(started_cluster, engine): node.query("ATTACH TABLE file") @pytest.mark.parametrize("engine", ['Ordinary', 'Atomic']) -def test_attach_substr(started_cluster, engine): +def test_attach_substr_restart(started_cluster, engine): # Initialize node.query("CREATE TABLE default.file(`s` String, `n` UInt8) ENGINE = MergeTree PARTITION BY substring(s, 1, 2) ORDER BY n ") From 701c687e7933f77ad51e91fa8bf1ef6ff2282f8d Mon Sep 17 00:00:00 2001 From: liyang830 Date: Sat, 18 Jun 2022 17:13:50 +0800 Subject: [PATCH 004/179] fix : test error --- src/Databases/DatabaseOrdinary.cpp | 2 +- tests/integration/test_attach_table_normalizer/test.py | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/Databases/DatabaseOrdinary.cpp b/src/Databases/DatabaseOrdinary.cpp index 5708ff50323b..1477014a869c 100644 --- a/src/Databases/DatabaseOrdinary.cpp +++ b/src/Databases/DatabaseOrdinary.cpp @@ -129,7 +129,6 @@ void DatabaseOrdinary::loadStoredObjects( const auto & name = name_with_path_and_query.first; const auto & path = name_with_path_and_query.second.path; const auto & ast = name_with_path_and_query.second.ast; - FunctionNameNormalizer().visit(ast.get()); const auto & create_query = ast->as(); if (!create_query.is_dictionary) @@ -169,6 +168,7 @@ void DatabaseOrdinary::loadTablesMetadata(ContextPtr local_context, ParsedTables auto ast = parseQueryFromMetadata(log, getContext(), full_path.string(), /*throw_on_error*/ true, /*remove_empty*/ false); if (ast) { + FunctionNameNormalizer().visit(ast.get()); auto * create_query = ast->as(); create_query->setDatabase(database_name); diff --git a/tests/integration/test_attach_table_normalizer/test.py b/tests/integration/test_attach_table_normalizer/test.py index 5a31801b99ca..80c4b99dfcc8 100644 --- a/tests/integration/test_attach_table_normalizer/test.py +++ b/tests/integration/test_attach_table_normalizer/test.py @@ -20,12 +20,13 @@ def replace_substring_to_substr(node): @pytest.mark.parametrize("engine", ['Ordinary', 'Atomic']) def test_attach_substr(started_cluster, engine): # Initialize + node.query("DROP TABLE IF EXISTS default.file") node.query("CREATE TABLE default.file(`s` String, `n` UInt8) ENGINE = MergeTree PARTITION BY substring(s, 1, 2) ORDER BY n ") # Detach table file node.query("DETACH TABLE file") - # Replace subtring to substr + # Replace substring to substr replace_substring_to_substr(node) # Attach table file @@ -34,9 +35,10 @@ def test_attach_substr(started_cluster, engine): @pytest.mark.parametrize("engine", ['Ordinary', 'Atomic']) def test_attach_substr_restart(started_cluster, engine): # Initialize + node.query("DROP TABLE IF EXISTS default.file") node.query("CREATE TABLE default.file(`s` String, `n` UInt8) ENGINE = MergeTree PARTITION BY substring(s, 1, 2) ORDER BY n ") - # Replace subtring to substr + # Replace substring to substr replace_substring_to_substr(node) # Restart clickhouse From 252e750fd79090dc4fdb8bfb1317d8f8b1f3136c Mon Sep 17 00:00:00 2001 From: alesapin Date: Fri, 8 Jul 2022 17:57:24 +0200 Subject: [PATCH 005/179] Update test.py --- tests/integration/test_attach_table_normalizer/test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_attach_table_normalizer/test.py b/tests/integration/test_attach_table_normalizer/test.py index 80c4b99dfcc8..f2d99588b941 100644 --- a/tests/integration/test_attach_table_normalizer/test.py +++ b/tests/integration/test_attach_table_normalizer/test.py @@ -3,7 +3,7 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', main_configs=["configs/config.xml"], with_zookeeper=True) +node = cluster.add_instance('node', main_configs=["configs/config.xml"], with_zookeeper=True, stay_alive=True) @pytest.fixture(scope="module") From 935bc723299056c816646fb9067638a60ddfb085 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Tue, 14 Feb 2023 18:51:10 +0100 Subject: [PATCH 006/179] [TEST] Tune allocator Processing of the default max_block_size can be faster then running mmap()/munmap() plus memory dependencies. Here is an example: SELECT count() FROM zeros(10_000_000) WHERE NOT ignore(randomString(1000)) SETTINGS function_implementation='avx2' - Before this patch it takes: ~6sec - After: 1.3sec And even though 128MiB should be enough, since for this query size of allocation for string will be 65409*(1000+1)=65474409 bytes, due to rounding to power of two it will not, so let's try simply use 256MiB (another option is to use strict comparison for MMAP_THRESHOLD) and see the perf tests. But also note, that this has other allocator side effects (performance, fragmentation), so unlikely this is for upstream. I've found this while I was playing with PODArray [1]. [1]: https://s3.amazonaws.com/clickhouse-test-reports/45654/2101b66570cbb9eb9a492afa8ab82d562c34336b/performance_comparison_[1/4]/report.html Signed-off-by: Azat Khuzhin --- src/Common/Allocator.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Common/Allocator.cpp b/src/Common/Allocator.cpp index 5a66ddb63a2b..c02210f2ecee 100644 --- a/src/Common/Allocator.cpp +++ b/src/Common/Allocator.cpp @@ -8,7 +8,7 @@ * See also: https://gcc.gnu.org/legacy-ml/gcc-help/2017-12/msg00021.html */ #ifdef NDEBUG - __attribute__((__weak__)) extern const size_t MMAP_THRESHOLD = 64 * (1ULL << 20); + __attribute__((__weak__)) extern const size_t MMAP_THRESHOLD = 256 * (1ULL << 20); #else /** * In debug build, use small mmap threshold to reproduce more memory From 5781eb67cba3e827ecf47b7929c47777a6e48094 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 26 Feb 2023 01:28:13 +0300 Subject: [PATCH 007/179] Update test.py --- tests/integration/test_attach_table_normalizer/test.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tests/integration/test_attach_table_normalizer/test.py b/tests/integration/test_attach_table_normalizer/test.py index f2d99588b941..526da39935ac 100644 --- a/tests/integration/test_attach_table_normalizer/test.py +++ b/tests/integration/test_attach_table_normalizer/test.py @@ -17,8 +17,7 @@ def started_cluster(): def replace_substring_to_substr(node): node.exec_in_container(["bash", "-c", "sed -i 's/substring/substr/g' /var/lib/clickhouse/metadata/default/file.sql"], user="root") -@pytest.mark.parametrize("engine", ['Ordinary', 'Atomic']) -def test_attach_substr(started_cluster, engine): +def test_attach_substr(started_cluster): # Initialize node.query("DROP TABLE IF EXISTS default.file") node.query("CREATE TABLE default.file(`s` String, `n` UInt8) ENGINE = MergeTree PARTITION BY substring(s, 1, 2) ORDER BY n ") @@ -32,8 +31,7 @@ def test_attach_substr(started_cluster, engine): # Attach table file node.query("ATTACH TABLE file") -@pytest.mark.parametrize("engine", ['Ordinary', 'Atomic']) -def test_attach_substr_restart(started_cluster, engine): +def test_attach_substr_restart(started_cluster): # Initialize node.query("DROP TABLE IF EXISTS default.file") node.query("CREATE TABLE default.file(`s` String, `n` UInt8) ENGINE = MergeTree PARTITION BY substring(s, 1, 2) ORDER BY n ") From 63982a20936bb384a4c4f88f9e4ed2282680e33b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 26 Feb 2023 01:29:29 +0300 Subject: [PATCH 008/179] Delete config.xml --- .../test_attach_table_normalizer/configs/config.xml | 4 ---- 1 file changed, 4 deletions(-) delete mode 100644 tests/integration/test_attach_table_normalizer/configs/config.xml diff --git a/tests/integration/test_attach_table_normalizer/configs/config.xml b/tests/integration/test_attach_table_normalizer/configs/config.xml deleted file mode 100644 index 0500e2ad5542..000000000000 --- a/tests/integration/test_attach_table_normalizer/configs/config.xml +++ /dev/null @@ -1,4 +0,0 @@ - - 1 - 1 - From 0e01991eb7b1331d2fca09c94b3e41fdd5c32bb3 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 26 Feb 2023 04:33:56 +0300 Subject: [PATCH 009/179] Update test.py --- .../test_attach_table_normalizer/test.py | 24 +++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/tests/integration/test_attach_table_normalizer/test.py b/tests/integration/test_attach_table_normalizer/test.py index 526da39935ac..ddbb02bf4eff 100644 --- a/tests/integration/test_attach_table_normalizer/test.py +++ b/tests/integration/test_attach_table_normalizer/test.py @@ -3,7 +3,9 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', main_configs=["configs/config.xml"], with_zookeeper=True, stay_alive=True) +node = cluster.add_instance( + 'node', main_configs=["configs/config.xml"], with_zookeeper=True, stay_alive=True +) @pytest.fixture(scope="module") @@ -14,13 +16,24 @@ def started_cluster(): finally: cluster.shutdown() + def replace_substring_to_substr(node): - node.exec_in_container(["bash", "-c", "sed -i 's/substring/substr/g' /var/lib/clickhouse/metadata/default/file.sql"], user="root") + node.exec_in_container(( + [ + "bash", + "-c", + "sed -i 's/substring/substr/g' /var/lib/clickhouse/metadata/default/file.sql", + ], + user="root", + ) + def test_attach_substr(started_cluster): # Initialize node.query("DROP TABLE IF EXISTS default.file") - node.query("CREATE TABLE default.file(`s` String, `n` UInt8) ENGINE = MergeTree PARTITION BY substring(s, 1, 2) ORDER BY n ") + node.query( + "CREATE TABLE default.file(`s` String, `n` UInt8) ENGINE = MergeTree PARTITION BY substring(s, 1, 2) ORDER BY n " + ) # Detach table file node.query("DETACH TABLE file") @@ -31,10 +44,13 @@ def test_attach_substr(started_cluster): # Attach table file node.query("ATTACH TABLE file") + def test_attach_substr_restart(started_cluster): # Initialize node.query("DROP TABLE IF EXISTS default.file") - node.query("CREATE TABLE default.file(`s` String, `n` UInt8) ENGINE = MergeTree PARTITION BY substring(s, 1, 2) ORDER BY n ") + node.query( + "CREATE TABLE default.file(`s` String, `n` UInt8) ENGINE = MergeTree PARTITION BY substring(s, 1, 2) ORDER BY n " + ) # Replace substring to substr replace_substring_to_substr(node) From e997b1393ce12ba639049147afdedb13e338af38 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Wed, 19 Apr 2023 22:40:13 +0200 Subject: [PATCH 010/179] Play with MMAP_THRESHOLD (set it to 128MiB) Signed-off-by: Azat Khuzhin --- src/Common/Allocator.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Common/Allocator.cpp b/src/Common/Allocator.cpp index c02210f2ecee..0fb90e5a47eb 100644 --- a/src/Common/Allocator.cpp +++ b/src/Common/Allocator.cpp @@ -8,7 +8,7 @@ * See also: https://gcc.gnu.org/legacy-ml/gcc-help/2017-12/msg00021.html */ #ifdef NDEBUG - __attribute__((__weak__)) extern const size_t MMAP_THRESHOLD = 256 * (1ULL << 20); + __attribute__((__weak__)) extern const size_t MMAP_THRESHOLD = 128 * (1ULL << 20); #else /** * In debug build, use small mmap threshold to reproduce more memory From 491c26fb0aa08dd75adf46699225658fd9a45d5d Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 29 Apr 2023 18:55:19 +0200 Subject: [PATCH 011/179] Slight improvement in Disks interface --- src/Disks/DiskEncrypted.cpp | 2 +- src/Disks/DiskEncrypted.h | 6 +-- src/Disks/DiskLocal.cpp | 44 +++++++++++++------ src/Disks/DiskLocal.h | 8 ++-- src/Disks/IDisk.h | 8 ++-- src/Disks/IVolume.cpp | 4 +- src/Disks/IVolume.h | 2 +- .../ObjectStorages/DiskObjectStorage.cpp | 17 ++++--- src/Disks/ObjectStorages/DiskObjectStorage.h | 12 +++-- src/Disks/StoragePolicy.cpp | 27 ++++++++++-- src/Disks/VolumeJBOD.cpp | 22 +++++++--- src/Disks/VolumeJBOD.h | 4 +- src/Functions/filesystem.cpp | 6 +-- .../ServerAsynchronousMetrics.cpp | 23 ++++++---- .../MergeTree/MergeTreePartsMover.cpp | 14 +++--- src/Storages/System/StorageSystemDisks.cpp | 6 +-- 16 files changed, 131 insertions(+), 74 deletions(-) diff --git a/src/Disks/DiskEncrypted.cpp b/src/Disks/DiskEncrypted.cpp index db18e9652e7d..1f8d75dbeb84 100644 --- a/src/Disks/DiskEncrypted.cpp +++ b/src/Disks/DiskEncrypted.cpp @@ -184,7 +184,7 @@ class DiskEncryptedReservation : public IReservation } UInt64 getSize() const override { return reservation->getSize(); } - UInt64 getUnreservedSpace() const override { return reservation->getUnreservedSpace(); } + std::optional getUnreservedSpace() const override { return reservation->getUnreservedSpace(); } DiskPtr getDisk(size_t i) const override { diff --git a/src/Disks/DiskEncrypted.h b/src/Disks/DiskEncrypted.h index 8e824a1f7e52..5d04558792e5 100644 --- a/src/Disks/DiskEncrypted.h +++ b/src/Disks/DiskEncrypted.h @@ -256,17 +256,17 @@ class DiskEncrypted : public IDisk return std::make_shared(*this); } - UInt64 getTotalSpace() const override + std::optional getTotalSpace() const override { return delegate->getTotalSpace(); } - UInt64 getAvailableSpace() const override + std::optional getAvailableSpace() const override { return delegate->getAvailableSpace(); } - UInt64 getUnreservedSpace() const override + std::optional getUnreservedSpace() const override { return delegate->getUnreservedSpace(); } diff --git a/src/Disks/DiskLocal.cpp b/src/Disks/DiskLocal.cpp index 49f28a19b312..af9d4ffd19c4 100644 --- a/src/Disks/DiskLocal.cpp +++ b/src/Disks/DiskLocal.cpp @@ -97,7 +97,8 @@ static void loadDiskLocalConfig(const String & name, tmp_path = context->getPath(); // Create tmp disk for getting total disk space. - keep_free_space_bytes = static_cast(DiskLocal("tmp", tmp_path, 0).getTotalSpace() * ratio); + auto total_space_of_local_disk = DiskLocal("tmp", tmp_path, 0).getTotalSpace(); + keep_free_space_bytes = total_space_of_local_disk ? static_cast(*total_space_of_local_disk * ratio) : 0; } } @@ -128,7 +129,7 @@ class DiskLocalReservation : public IReservation {} UInt64 getSize() const override { return size; } - UInt64 getUnreservedSpace() const override { return unreserved_space; } + std::optional getUnreservedSpace() const override { return unreserved_space; } DiskPtr getDisk(size_t i) const override { @@ -225,8 +226,11 @@ std::optional DiskLocal::tryReserve(UInt64 bytes) { std::lock_guard lock(DiskLocal::reservation_mutex); - UInt64 available_space = getAvailableSpace(); - UInt64 unreserved_space = available_space - std::min(available_space, reserved_bytes); + auto available_space = getAvailableSpace(); + + UInt64 unreserved_space = available_space + ? *available_space - std::min(*available_space, reserved_bytes) + : std::numeric_limits::max(); if (bytes == 0) { @@ -237,12 +241,24 @@ std::optional DiskLocal::tryReserve(UInt64 bytes) if (unreserved_space >= bytes) { - LOG_TRACE( - logger, - "Reserved {} on local disk {}, having unreserved {}.", - ReadableSize(bytes), - backQuote(name), - ReadableSize(unreserved_space)); + if (available_space) + { + LOG_TRACE( + logger, + "Reserved {} on local disk {}, having unreserved {}.", + ReadableSize(bytes), + backQuote(name), + ReadableSize(unreserved_space)); + } + else + { + LOG_TRACE( + logger, + "Reserved {} on local disk {}.", + ReadableSize(bytes), + backQuote(name)); + } + ++reservation_count; reserved_bytes += bytes; return {unreserved_space - bytes}; @@ -268,14 +284,14 @@ static UInt64 getTotalSpaceByName(const String & name, const String & disk_path, return total_size - keep_free_space_bytes; } -UInt64 DiskLocal::getTotalSpace() const +std::optional DiskLocal::getTotalSpace() const { if (broken || readonly) return 0; return getTotalSpaceByName(name, disk_path, keep_free_space_bytes); } -UInt64 DiskLocal::getAvailableSpace() const +std::optional DiskLocal::getAvailableSpace() const { if (broken || readonly) return 0; @@ -292,10 +308,10 @@ UInt64 DiskLocal::getAvailableSpace() const return total_size - keep_free_space_bytes; } -UInt64 DiskLocal::getUnreservedSpace() const +std::optional DiskLocal::getUnreservedSpace() const { std::lock_guard lock(DiskLocal::reservation_mutex); - auto available_space = getAvailableSpace(); + auto available_space = *getAvailableSpace(); available_space -= std::min(available_space, reserved_bytes); return available_space; } diff --git a/src/Disks/DiskLocal.h b/src/Disks/DiskLocal.h index 7ea2c04704c5..6da623327261 100644 --- a/src/Disks/DiskLocal.h +++ b/src/Disks/DiskLocal.h @@ -31,11 +31,9 @@ class DiskLocal : public IDisk ReservationPtr reserve(UInt64 bytes) override; - UInt64 getTotalSpace() const override; - - UInt64 getAvailableSpace() const override; - - UInt64 getUnreservedSpace() const override; + std::optional getTotalSpace() const override; + std::optional getAvailableSpace() const override; + std::optional getUnreservedSpace() const override; UInt64 getKeepingFreeSpace() const override { return keep_free_space_bytes; } diff --git a/src/Disks/IDisk.h b/src/Disks/IDisk.h index 68798047cfd5..7202d1f5cfc5 100644 --- a/src/Disks/IDisk.h +++ b/src/Disks/IDisk.h @@ -127,13 +127,13 @@ class IDisk : public Space const String & getName() const override { return name; } /// Total available space on the disk. - virtual UInt64 getTotalSpace() const = 0; + virtual std::optional getTotalSpace() const = 0; /// Space currently available on the disk. - virtual UInt64 getAvailableSpace() const = 0; + virtual std::optional getAvailableSpace() const = 0; /// Space available for reservation (available space minus reserved space). - virtual UInt64 getUnreservedSpace() const = 0; + virtual std::optional getUnreservedSpace() const = 0; /// Amount of bytes which should be kept free on the disk. virtual UInt64 getKeepingFreeSpace() const { return 0; } @@ -463,7 +463,7 @@ class IReservation : boost::noncopyable /// Space available for reservation /// (with this reservation already take into account). - virtual UInt64 getUnreservedSpace() const = 0; + virtual std::optional getUnreservedSpace() const = 0; /// Get i-th disk where reservation take place. virtual DiskPtr getDisk(size_t i = 0) const = 0; /// NOLINT diff --git a/src/Disks/IVolume.cpp b/src/Disks/IVolume.cpp index eb474f12ad2c..15b52acb4227 100644 --- a/src/Disks/IVolume.cpp +++ b/src/Disks/IVolume.cpp @@ -49,9 +49,9 @@ IVolume::IVolume( throw Exception(ErrorCodes::NO_ELEMENTS_IN_CONFIG, "Volume must contain at least one disk"); } -UInt64 IVolume::getMaxUnreservedFreeSpace() const +std::optional IVolume::getMaxUnreservedFreeSpace() const { - UInt64 res = 0; + std::optional res = 0; for (const auto & disk : disks) res = std::max(res, disk->getUnreservedSpace()); return res; diff --git a/src/Disks/IVolume.h b/src/Disks/IVolume.h index ada28caa960d..f40d4dcba60f 100644 --- a/src/Disks/IVolume.h +++ b/src/Disks/IVolume.h @@ -74,7 +74,7 @@ class IVolume : public Space virtual VolumeType getType() const = 0; /// Return biggest unreserved space across all disks - UInt64 getMaxUnreservedFreeSpace() const; + std::optional getMaxUnreservedFreeSpace() const; DiskPtr getDisk() const { return getDisk(0); } virtual DiskPtr getDisk(size_t i) const { return disks[i]; } diff --git a/src/Disks/ObjectStorages/DiskObjectStorage.cpp b/src/Disks/ObjectStorages/DiskObjectStorage.cpp index bf5d0ab829d7..2f4e0db070f9 100644 --- a/src/Disks/ObjectStorages/DiskObjectStorage.cpp +++ b/src/Disks/ObjectStorages/DiskObjectStorage.cpp @@ -469,18 +469,25 @@ void DiskObjectStorage::removeSharedRecursive( transaction->commit(); } -std::optional DiskObjectStorage::tryReserve(UInt64 bytes) +bool DiskObjectStorage::tryReserve(UInt64 bytes) { std::lock_guard lock(reservation_mutex); auto available_space = getAvailableSpace(); - UInt64 unreserved_space = available_space - std::min(available_space, reserved_bytes); + if (!available_space) + { + ++reservation_count; + reserved_bytes += bytes; + return true; + } + + UInt64 unreserved_space = *available_space - std::min(*available_space, reserved_bytes); if (bytes == 0) { LOG_TRACE(log, "Reserved 0 bytes on remote disk {}", backQuote(name)); ++reservation_count; - return {unreserved_space}; + return true; } if (unreserved_space >= bytes) @@ -493,14 +500,14 @@ std::optional DiskObjectStorage::tryReserve(UInt64 bytes) ReadableSize(unreserved_space)); ++reservation_count; reserved_bytes += bytes; - return {unreserved_space - bytes}; + return true; } else { LOG_TRACE(log, "Could not reserve {} on remote disk {}. Not enough unreserved space", ReadableSize(bytes), backQuote(name)); } - return {}; + return false; } bool DiskObjectStorage::supportsCache() const diff --git a/src/Disks/ObjectStorages/DiskObjectStorage.h b/src/Disks/ObjectStorages/DiskObjectStorage.h index 4372bc759508..2c544e01ca9c 100644 --- a/src/Disks/ObjectStorages/DiskObjectStorage.h +++ b/src/Disks/ObjectStorages/DiskObjectStorage.h @@ -53,11 +53,9 @@ friend class DiskObjectStorageRemoteMetadataRestoreHelper; const std::string & getCacheName() const override { return object_storage->getCacheName(); } - UInt64 getTotalSpace() const override { return std::numeric_limits::max(); } - - UInt64 getAvailableSpace() const override { return std::numeric_limits::max(); } - - UInt64 getUnreservedSpace() const override { return std::numeric_limits::max(); } + std::optional getTotalSpace() const override { return {}; } + std::optional getAvailableSpace() const override { return {}; } + std::optional getUnreservedSpace() const override { return {}; } UInt64 getKeepingFreeSpace() const override { return 0; } @@ -223,7 +221,7 @@ friend class DiskObjectStorageRemoteMetadataRestoreHelper; UInt64 reservation_count = 0; std::mutex reservation_mutex; - std::optional tryReserve(UInt64 bytes); + bool tryReserve(UInt64 bytes); const bool send_metadata; size_t threadpool_size; @@ -244,7 +242,7 @@ class DiskObjectStorageReservation final : public IReservation UInt64 getSize() const override { return size; } - UInt64 getUnreservedSpace() const override { return unreserved_space; } + std::optional getUnreservedSpace() const override { return unreserved_space; } DiskPtr getDisk(size_t i) const override; diff --git a/src/Disks/StoragePolicy.cpp b/src/Disks/StoragePolicy.cpp index f4be8b8fe86c..92cca23ca76b 100644 --- a/src/Disks/StoragePolicy.cpp +++ b/src/Disks/StoragePolicy.cpp @@ -211,7 +211,11 @@ UInt64 StoragePolicy::getMaxUnreservedFreeSpace() const { UInt64 res = 0; for (const auto & volume : volumes) - res = std::max(res, volume->getMaxUnreservedFreeSpace()); + { + auto max_unreserved_for_volume = volume->getMaxUnreservedFreeSpace(); + if (max_unreserved_for_volume) + res = std::max(res, *max_unreserved_for_volume); + } return res; } @@ -248,22 +252,37 @@ ReservationPtr StoragePolicy::reserveAndCheck(UInt64 bytes) const ReservationPtr StoragePolicy::makeEmptyReservationOnLargestDisk() const { UInt64 max_space = 0; + bool found_bottomless_disk = false; DiskPtr max_disk; + for (const auto & volume : volumes) { for (const auto & disk : volume->getDisks()) { - auto avail_space = disk->getAvailableSpace(); - if (avail_space > max_space) + auto available_space = disk->getAvailableSpace(); + + if (!available_space) { - max_space = avail_space; + max_disk = disk; + found_bottomless_disk = true; + break; + } + + if (*available_space > max_space) + { + max_space = *available_space; max_disk = disk; } } + + if (found_bottomless_disk) + break; } + if (!max_disk) throw Exception(ErrorCodes::NOT_ENOUGH_SPACE, "There is no space on any disk in storage policy: {}. " "It's likely all disks are broken", name); + auto reservation = max_disk->reserve(0); if (!reservation) { diff --git a/src/Disks/VolumeJBOD.cpp b/src/Disks/VolumeJBOD.cpp index 64bd2619665f..885b1d56b0db 100644 --- a/src/Disks/VolumeJBOD.cpp +++ b/src/Disks/VolumeJBOD.cpp @@ -40,20 +40,28 @@ VolumeJBOD::VolumeJBOD( auto ratio = config.getDouble(config_prefix + ".max_data_part_size_ratio"); if (ratio < 0) throw Exception(ErrorCodes::EXCESSIVE_ELEMENT_IN_CONFIG, "'max_data_part_size_ratio' have to be not less then 0."); + UInt64 sum_size = 0; std::vector sizes; for (const auto & disk : disks) { - sizes.push_back(disk->getTotalSpace()); - sum_size += sizes.back(); + auto size = disk->getTotalSpace(); + sizes.push_back(*size); + if (size) + sum_size += *size; + else + break; } - max_data_part_size = static_cast(sum_size * ratio / disks.size()); - for (size_t i = 0; i < disks.size(); ++i) + if (sizes.size() == disks.size()) { - if (sizes[i] < max_data_part_size) + max_data_part_size = static_cast(sum_size * ratio / disks.size()); + for (size_t i = 0; i < disks.size(); ++i) { - LOG_WARNING(logger, "Disk {} on volume {} have not enough space ({}) for containing part the size of max_data_part_size ({})", - backQuote(disks[i]->getName()), backQuote(config_prefix), ReadableSize(sizes[i]), ReadableSize(max_data_part_size)); + if (sizes[i] < max_data_part_size) + { + LOG_WARNING(logger, "Disk {} on volume {} have not enough space ({}) for containing part the size of max_data_part_size ({})", + backQuote(disks[i]->getName()), backQuote(config_prefix), ReadableSize(sizes[i]), ReadableSize(max_data_part_size)); + } } } } diff --git a/src/Disks/VolumeJBOD.h b/src/Disks/VolumeJBOD.h index ef6f215bf188..8d270a6c71c7 100644 --- a/src/Disks/VolumeJBOD.h +++ b/src/Disks/VolumeJBOD.h @@ -68,7 +68,7 @@ class VolumeJBOD : public IVolume struct DiskWithSize { DiskPtr disk; - uint64_t free_size = 0; + std::optional free_size = 0; DiskWithSize(DiskPtr disk_) : disk(disk_) @@ -80,7 +80,7 @@ class VolumeJBOD : public IVolume return free_size < rhs.free_size; } - ReservationPtr reserve(uint64_t bytes) + ReservationPtr reserve(UInt64 bytes) { ReservationPtr reservation = disk->reserve(bytes); if (!reservation) diff --git a/src/Functions/filesystem.cpp b/src/Functions/filesystem.cpp index 1eb1c27211c1..9fbf9b0cbe75 100644 --- a/src/Functions/filesystem.cpp +++ b/src/Functions/filesystem.cpp @@ -22,19 +22,19 @@ namespace struct FilesystemAvailable { static constexpr auto name = "filesystemAvailable"; - static std::uintmax_t get(const DiskPtr & disk) { return disk->getAvailableSpace(); } + static UInt64 get(const DiskPtr & disk) { return disk->getAvailableSpace().value_or(std::numeric_limits::max()); } }; struct FilesystemUnreserved { static constexpr auto name = "filesystemUnreserved"; - static std::uintmax_t get(const DiskPtr & disk) { return disk->getUnreservedSpace(); } + static UInt64 get(const DiskPtr & disk) { return disk->getUnreservedSpace().value_or(std::numeric_limits::max()); } }; struct FilesystemCapacity { static constexpr auto name = "filesystemCapacity"; - static std::uintmax_t get(const DiskPtr & disk) { return disk->getTotalSpace(); } + static UInt64 get(const DiskPtr & disk) { return disk->getTotalSpace().value_or(std::numeric_limits::max()); } }; template diff --git a/src/Interpreters/ServerAsynchronousMetrics.cpp b/src/Interpreters/ServerAsynchronousMetrics.cpp index e6e1a03f11c9..0fbcfc9e6a1c 100644 --- a/src/Interpreters/ServerAsynchronousMetrics.cpp +++ b/src/Interpreters/ServerAsynchronousMetrics.cpp @@ -191,14 +191,21 @@ void ServerAsynchronousMetrics::updateImpl(AsynchronousMetricValues & new_values auto available = disk->getAvailableSpace(); auto unreserved = disk->getUnreservedSpace(); - new_values[fmt::format("DiskTotal_{}", name)] = { total, - "The total size in bytes of the disk (virtual filesystem). Remote filesystems can show a large value like 16 EiB." }; - new_values[fmt::format("DiskUsed_{}", name)] = { total - available, - "Used bytes on the disk (virtual filesystem). Remote filesystems not always provide this information." }; - new_values[fmt::format("DiskAvailable_{}", name)] = { available, - "Available bytes on the disk (virtual filesystem). Remote filesystems can show a large value like 16 EiB." }; - new_values[fmt::format("DiskUnreserved_{}", name)] = { unreserved, - "Available bytes on the disk (virtual filesystem) without the reservations for merges, fetches, and moves. Remote filesystems can show a large value like 16 EiB." }; + new_values[fmt::format("DiskTotal_{}", name)] = { *total, + "The total size in bytes of the disk (virtual filesystem). Remote filesystems may not provide this information." }; + + if (available) + { + new_values[fmt::format("DiskUsed_{}", name)] = { *total - *available, + "Used bytes on the disk (virtual filesystem). Remote filesystems not always provide this information." }; + + new_values[fmt::format("DiskAvailable_{}", name)] = { *available, + "Available bytes on the disk (virtual filesystem). Remote filesystems may not provide this information." }; + } + + if (unreserved) + new_values[fmt::format("DiskUnreserved_{}", name)] = { *unreserved, + "Available bytes on the disk (virtual filesystem) without the reservations for merges, fetches, and moves. Remote filesystems may not provide this information." }; } } diff --git a/src/Storages/MergeTree/MergeTreePartsMover.cpp b/src/Storages/MergeTree/MergeTreePartsMover.cpp index e1da57744b3f..391b04573d79 100644 --- a/src/Storages/MergeTree/MergeTreePartsMover.cpp +++ b/src/Storages/MergeTree/MergeTreePartsMover.cpp @@ -111,11 +111,15 @@ bool MergeTreePartsMover::selectPartsForMove( { for (const auto & disk : volumes[i]->getDisks()) { - UInt64 required_maximum_available_space = static_cast(disk->getTotalSpace() * policy->getMoveFactor()); - UInt64 unreserved_space = disk->getUnreservedSpace(); - - if (unreserved_space < required_maximum_available_space && !disk->isBroken()) - need_to_move.emplace(disk, required_maximum_available_space - unreserved_space); + auto total_space = disk->getTotalSpace(); + auto unreserved_space = disk->getUnreservedSpace(); + if (total_space && unreserved_space) + { + UInt64 required_maximum_available_space = static_cast(*total_space * policy->getMoveFactor()); + + if (*unreserved_space < required_maximum_available_space && !disk->isBroken()) + need_to_move.emplace(disk, required_maximum_available_space - *unreserved_space); + } } } } diff --git a/src/Storages/System/StorageSystemDisks.cpp b/src/Storages/System/StorageSystemDisks.cpp index 002da7abd148..23a00cc7ae55 100644 --- a/src/Storages/System/StorageSystemDisks.cpp +++ b/src/Storages/System/StorageSystemDisks.cpp @@ -64,9 +64,9 @@ Pipe StorageSystemDisks::read( { col_name->insert(disk_name); col_path->insert(disk_ptr->getPath()); - col_free->insert(disk_ptr->getAvailableSpace()); - col_total->insert(disk_ptr->getTotalSpace()); - col_unreserved->insert(disk_ptr->getUnreservedSpace()); + col_free->insert(disk_ptr->getAvailableSpace().value_or(std::numeric_limits::max())); + col_total->insert(disk_ptr->getTotalSpace().value_or(std::numeric_limits::max())); + col_unreserved->insert(disk_ptr->getUnreservedSpace().value_or(std::numeric_limits::max())); col_keep->insert(disk_ptr->getKeepingFreeSpace()); auto data_source_description = disk_ptr->getDataSourceDescription(); col_type->insert(toString(data_source_description.type)); From e1bf96a786be0883993d2d9e8a5d2c1fcd89095c Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Mon, 22 May 2023 14:29:15 +0800 Subject: [PATCH 012/179] finish dev --- src/Functions/geohashEncode.cpp | 78 ++++++++++++++++++++++++++++----- 1 file changed, 67 insertions(+), 11 deletions(-) diff --git a/src/Functions/geohashEncode.cpp b/src/Functions/geohashEncode.cpp index bc0c8b8fc5f3..a05fa7fc8d6b 100644 --- a/src/Functions/geohashEncode.cpp +++ b/src/Functions/geohashEncode.cpp @@ -37,7 +37,7 @@ class FunctionGeohashEncode : public IFunction bool isVariadic() const override { return true; } size_t getNumberOfArguments() const override { return 0; } - ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {2}; } + // ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {2}; } bool useDefaultImplementationForConstants() const override { return true; } bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; } @@ -59,7 +59,50 @@ class FunctionGeohashEncode : public IFunction } template - bool tryExecute(const IColumn * lon_column, const IColumn * lat_column, UInt64 precision_value, ColumnPtr & result) const + bool tryVectorVector(const IColumn * lon_column, const IColumn * lat_column, const IColumn * precision_column, ColumnPtr & result) const + { + const ColumnVector * longitude = checkAndGetColumn>(lon_column); + const ColumnVector * latitude = checkAndGetColumn>(lat_column); + if (!latitude || !longitude) + return false; + + auto col_str = ColumnString::create(); + ColumnString::Chars & out_vec = col_str->getChars(); + ColumnString::Offsets & out_offsets = col_str->getOffsets(); + + const size_t size = lat_column->size(); + + out_offsets.resize(size); + out_vec.resize(size * (GEOHASH_MAX_TEXT_LENGTH + 1)); + + char * begin = reinterpret_cast(out_vec.data()); + char * pos = begin; + + for (size_t i = 0; i < size; ++i) + { + const Float64 longitude_value = longitude->getElement(i); + const Float64 latitude_value = latitude->getElement(i); + const UInt64 precision_value = std::min(precision_column->get64(i), GEOHASH_MAX_TEXT_LENGTH); + + const size_t encoded_size = geohashEncode(longitude_value, latitude_value, precision_value, pos); + + pos += encoded_size; + *pos = '\0'; + out_offsets[i] = ++pos - begin; + } + out_vec.resize(pos - begin); + + if (!out_offsets.empty() && out_offsets.back() != out_vec.size()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Column size mismatch (internal logical error)"); + + result = std::move(col_str); + + return true; + + } + + template + bool tryVectorConstant(const IColumn * lon_column, const IColumn * lat_column, UInt64 precision_value, ColumnPtr & result) const { const ColumnVector * longitude = checkAndGetColumn>(lon_column); const ColumnVector * latitude = checkAndGetColumn>(lat_column); @@ -105,16 +148,29 @@ class FunctionGeohashEncode : public IFunction const IColumn * longitude = arguments[0].column.get(); const IColumn * latitude = arguments[1].column.get(); - const UInt64 precision_value = std::min(GEOHASH_MAX_TEXT_LENGTH, - arguments.size() == 3 ? arguments[2].column->get64(0) : GEOHASH_MAX_TEXT_LENGTH); - - ColumnPtr res_column; + if (arguments.size() < 3 || isColumnConst(*arguments[3].column)) + { + const UInt64 precision_value = std::min( + GEOHASH_MAX_TEXT_LENGTH, arguments.size() == 3 ? arguments[2].column->get64(0) : GEOHASH_MAX_TEXT_LENGTH); + + ColumnPtr res_column; + if (tryVectorConstant(longitude, latitude, precision_value, res_column) + || tryVectorConstant(longitude, latitude, precision_value, res_column) + || tryVectorConstant(longitude, latitude, precision_value, res_column) + || tryVectorConstant(longitude, latitude, precision_value, res_column)) + return res_column; + } + else + { + const IColumn * precision = arguments[2].column.get(); + ColumnPtr res_column; + if (tryVectorVector(longitude, latitude, precision, res_column) + || tryVectorVector(longitude, latitude, precision, res_column) + || tryVectorVector(longitude, latitude, precision, res_column) + || tryVectorVector(longitude, latitude, precision, res_column)) + return res_column; - if (tryExecute(longitude, latitude, precision_value, res_column) || - tryExecute(longitude, latitude, precision_value, res_column) || - tryExecute(longitude, latitude, precision_value, res_column) || - tryExecute(longitude, latitude, precision_value, res_column)) - return res_column; + } std::string arguments_description; for (size_t i = 0; i < arguments.size(); ++i) From 1f91a75b5472f3f1321aac9a76c3078880ba5dc9 Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Mon, 22 May 2023 15:52:58 +0800 Subject: [PATCH 013/179] fix bugs of geoHashEncode --- src/Functions/geohashEncode.cpp | 108 +++++-------------------------- src/Storages/HDFS/HDFSCommon.cpp | 4 +- 2 files changed, 19 insertions(+), 93 deletions(-) diff --git a/src/Functions/geohashEncode.cpp b/src/Functions/geohashEncode.cpp index a05fa7fc8d6b..5f225a96c2bb 100644 --- a/src/Functions/geohashEncode.cpp +++ b/src/Functions/geohashEncode.cpp @@ -4,6 +4,7 @@ #include #include +#include #include @@ -58,57 +59,25 @@ class FunctionGeohashEncode : public IFunction return std::make_shared(); } - template - bool tryVectorVector(const IColumn * lon_column, const IColumn * lat_column, const IColumn * precision_column, ColumnPtr & result) const + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override { - const ColumnVector * longitude = checkAndGetColumn>(lon_column); - const ColumnVector * latitude = checkAndGetColumn>(lat_column); - if (!latitude || !longitude) - return false; - - auto col_str = ColumnString::create(); - ColumnString::Chars & out_vec = col_str->getChars(); - ColumnString::Offsets & out_offsets = col_str->getOffsets(); - - const size_t size = lat_column->size(); - - out_offsets.resize(size); - out_vec.resize(size * (GEOHASH_MAX_TEXT_LENGTH + 1)); - - char * begin = reinterpret_cast(out_vec.data()); - char * pos = begin; - - for (size_t i = 0; i < size; ++i) - { - const Float64 longitude_value = longitude->getElement(i); - const Float64 latitude_value = latitude->getElement(i); - const UInt64 precision_value = std::min(precision_column->get64(i), GEOHASH_MAX_TEXT_LENGTH); - - const size_t encoded_size = geohashEncode(longitude_value, latitude_value, precision_value, pos); - - pos += encoded_size; - *pos = '\0'; - out_offsets[i] = ++pos - begin; - } - out_vec.resize(pos - begin); - - if (!out_offsets.empty() && out_offsets.back() != out_vec.size()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Column size mismatch (internal logical error)"); - - result = std::move(col_str); + const IColumn * longitude = arguments[0].column.get(); + const IColumn * latitude = arguments[1].column.get(); - return true; + ColumnPtr precision; + if (arguments.size() < 3) + precision = DataTypeUInt8().createColumnConst(longitude->size(), GEOHASH_MAX_TEXT_LENGTH); + else + precision = arguments[2].column; + ColumnPtr res_column; + vector(longitude, latitude, precision.get(), res_column); + return res_column; } - template - bool tryVectorConstant(const IColumn * lon_column, const IColumn * lat_column, UInt64 precision_value, ColumnPtr & result) const +private: + void vector(const IColumn * lon_column, const IColumn * lat_column, const IColumn * precision_column, ColumnPtr & result) const { - const ColumnVector * longitude = checkAndGetColumn>(lon_column); - const ColumnVector * latitude = checkAndGetColumn>(lat_column); - if (!latitude || !longitude) - return false; - auto col_str = ColumnString::create(); ColumnString::Chars & out_vec = col_str->getChars(); ColumnString::Offsets & out_offsets = col_str->getOffsets(); @@ -123,8 +92,9 @@ class FunctionGeohashEncode : public IFunction for (size_t i = 0; i < size; ++i) { - const Float64 longitude_value = longitude->getElement(i); - const Float64 latitude_value = latitude->getElement(i); + const Float64 longitude_value = lon_column->getFloat64(i); + const Float64 latitude_value = lat_column->getFloat64(i); + const UInt64 precision_value = std::min(precision_column->get64(i), GEOHASH_MAX_TEXT_LENGTH); const size_t encoded_size = geohashEncode(longitude_value, latitude_value, precision_value, pos); @@ -138,50 +108,6 @@ class FunctionGeohashEncode : public IFunction throw Exception(ErrorCodes::LOGICAL_ERROR, "Column size mismatch (internal logical error)"); result = std::move(col_str); - - return true; - - } - - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override - { - const IColumn * longitude = arguments[0].column.get(); - const IColumn * latitude = arguments[1].column.get(); - - if (arguments.size() < 3 || isColumnConst(*arguments[3].column)) - { - const UInt64 precision_value = std::min( - GEOHASH_MAX_TEXT_LENGTH, arguments.size() == 3 ? arguments[2].column->get64(0) : GEOHASH_MAX_TEXT_LENGTH); - - ColumnPtr res_column; - if (tryVectorConstant(longitude, latitude, precision_value, res_column) - || tryVectorConstant(longitude, latitude, precision_value, res_column) - || tryVectorConstant(longitude, latitude, precision_value, res_column) - || tryVectorConstant(longitude, latitude, precision_value, res_column)) - return res_column; - } - else - { - const IColumn * precision = arguments[2].column.get(); - ColumnPtr res_column; - if (tryVectorVector(longitude, latitude, precision, res_column) - || tryVectorVector(longitude, latitude, precision, res_column) - || tryVectorVector(longitude, latitude, precision, res_column) - || tryVectorVector(longitude, latitude, precision, res_column)) - return res_column; - - } - - std::string arguments_description; - for (size_t i = 0; i < arguments.size(); ++i) - { - if (i != 0) - arguments_description += ", "; - arguments_description += arguments[i].column->getName(); - } - - throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Unsupported argument types: {} for function {}", - arguments_description, getName()); } }; diff --git a/src/Storages/HDFS/HDFSCommon.cpp b/src/Storages/HDFS/HDFSCommon.cpp index 932e80831fed..7b149518c0aa 100644 --- a/src/Storages/HDFS/HDFSCommon.cpp +++ b/src/Storages/HDFS/HDFSCommon.cpp @@ -38,8 +38,8 @@ HDFSFileInfo::~HDFSFileInfo() } -void HDFSBuilderWrapper::loadFromConfig(const Poco::Util::AbstractConfiguration & config, - const String & prefix, bool isUser) +void HDFSBuilderWrapper::loadFromConfig( + const Poco::Util::AbstractConfiguration & config, const String & prefix, [[maybe_unused]] bool isUser) { Poco::Util::AbstractConfiguration::Keys keys; From 39806657711f933c7e0d0fa04e8cc0e8cd769eaa Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Mon, 22 May 2023 15:58:28 +0800 Subject: [PATCH 014/179] fix uts --- tests/queries/0_stateless/00932_geohash_support.reference | 4 ++++ tests/queries/0_stateless/00932_geohash_support.sql | 5 ++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/00932_geohash_support.reference b/tests/queries/0_stateless/00932_geohash_support.reference index ffc290681c72..0dcb084eb6db 100644 --- a/tests/queries/0_stateless/00932_geohash_support.reference +++ b/tests/queries/0_stateless/00932_geohash_support.reference @@ -9,6 +9,10 @@ default precision: ezs42d000000 mixing const and non-const-columns: ezs42d000000 +ezs42d000000 +ezs42d000000 +ezs42d000000 +ezs42d000000 from table (with const precision): 1 6 Ok 1 6 Ok diff --git a/tests/queries/0_stateless/00932_geohash_support.sql b/tests/queries/0_stateless/00932_geohash_support.sql index aeed72176b94..89f8eba9ca23 100644 --- a/tests/queries/0_stateless/00932_geohash_support.sql +++ b/tests/queries/0_stateless/00932_geohash_support.sql @@ -24,7 +24,10 @@ select geohashEncode(-5.60302734375, 42.593994140625); select 'mixing const and non-const-columns:'; select geohashEncode(materialize(-5.60302734375), materialize(42.593994140625), 0); -select geohashEncode(materialize(-5.60302734375), materialize(42.593994140625), materialize(0)); -- { serverError 44 } +select geohashEncode(materialize(-5.60302734375), materialize(42.593994140625), materialize(0)); +select geohashEncode(-5.60302734375, materialize(42.593994140625), 0); +select geohashEncode(materialize(-5.60302734375), 42.593994140625, 0); +select geohashEncode(-5.60302734375, 42.593994140625, 0); select 'from table (with const precision):'; From 056e5824b57a78314b7ae565585ef0afea1bd836 Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Mon, 22 May 2023 16:02:42 +0800 Subject: [PATCH 015/179] remove useless code --- src/Functions/geohashEncode.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Functions/geohashEncode.cpp b/src/Functions/geohashEncode.cpp index 5f225a96c2bb..ff61bf7d27c8 100644 --- a/src/Functions/geohashEncode.cpp +++ b/src/Functions/geohashEncode.cpp @@ -38,7 +38,6 @@ class FunctionGeohashEncode : public IFunction bool isVariadic() const override { return true; } size_t getNumberOfArguments() const override { return 0; } - // ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {2}; } bool useDefaultImplementationForConstants() const override { return true; } bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; } From 87907dafa7a8179382c98cb1718b58a002617e08 Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Wed, 24 May 2023 14:27:37 +0800 Subject: [PATCH 016/179] fix code style --- src/Functions/geohashEncode.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Functions/geohashEncode.cpp b/src/Functions/geohashEncode.cpp index ff61bf7d27c8..7c353b822aa0 100644 --- a/src/Functions/geohashEncode.cpp +++ b/src/Functions/geohashEncode.cpp @@ -17,7 +17,6 @@ namespace DB namespace ErrorCodes { extern const int LOGICAL_ERROR; - extern const int ILLEGAL_COLUMN; extern const int TOO_MANY_ARGUMENTS_FOR_FUNCTION; } From 7bd1c183ebe535ec3f8799e82d73f9b064c967c8 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 10 Jun 2023 13:16:02 +0300 Subject: [PATCH 017/179] Update test.py --- tests/integration/test_attach_table_normalizer/test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_attach_table_normalizer/test.py b/tests/integration/test_attach_table_normalizer/test.py index ddbb02bf4eff..ba0068e9c59c 100644 --- a/tests/integration/test_attach_table_normalizer/test.py +++ b/tests/integration/test_attach_table_normalizer/test.py @@ -24,7 +24,7 @@ def replace_substring_to_substr(node): "-c", "sed -i 's/substring/substr/g' /var/lib/clickhouse/metadata/default/file.sql", ], - user="root", + user="root" ) From ed318d10353101c76a4493ccd9fa6c239868abd3 Mon Sep 17 00:00:00 2001 From: Dmitry Kardymon Date: Wed, 14 Jun 2023 10:35:36 +0000 Subject: [PATCH 018/179] Add input_format_csv_ignore_extra_columns setting (prototype) --- src/Core/Settings.h | 1 + src/Formats/FormatFactory.cpp | 1 + src/Formats/FormatSettings.h | 1 + src/Processors/Formats/Impl/CSVRowInputFormat.cpp | 15 ++++++++++++++- tests/queries/0_stateless/00301_csv.reference | 4 ++++ tests/queries/0_stateless/00301_csv.sh | 10 ++++++++++ 6 files changed, 31 insertions(+), 1 deletion(-) diff --git a/src/Core/Settings.h b/src/Core/Settings.h index bc879b9bdf6a..d38f7767252d 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -835,6 +835,7 @@ class IColumn; M(Bool, input_format_import_nested_json, false, "Map nested JSON data to nested tables (it works for JSONEachRow format).", 0) \ M(Bool, input_format_defaults_for_omitted_fields, true, "For input data calculate default expressions for omitted fields (it works for JSONEachRow, -WithNames, -WithNamesAndTypes formats).", IMPORTANT) \ M(Bool, input_format_csv_empty_as_default, true, "Treat empty fields in CSV input as default values.", 0) \ + M(Bool, input_format_csv_ignore_extra_columns, false, "", 0) \ M(Bool, input_format_tsv_empty_as_default, false, "Treat empty fields in TSV input as default values.", 0) \ M(Bool, input_format_tsv_enum_as_number, false, "Treat inserted enum values in TSV formats as enum indices.", 0) \ M(Bool, input_format_null_as_default, true, "Initialize null fields with default values if the data type of this field is not nullable and it is supported by the input format", 0) \ diff --git a/src/Formats/FormatFactory.cpp b/src/Formats/FormatFactory.cpp index c235afae57ee..0218d268c519 100644 --- a/src/Formats/FormatFactory.cpp +++ b/src/Formats/FormatFactory.cpp @@ -63,6 +63,7 @@ FormatSettings getFormatSettings(ContextPtr context, const Settings & settings) format_settings.csv.delimiter = settings.format_csv_delimiter; format_settings.csv.tuple_delimiter = settings.format_csv_delimiter; format_settings.csv.empty_as_default = settings.input_format_csv_empty_as_default; + format_settings.csv.ignore_extra_columns = settings.input_format_csv_ignore_extra_columns; format_settings.csv.enum_as_number = settings.input_format_csv_enum_as_number; format_settings.csv.null_representation = settings.format_csv_null_representation; format_settings.csv.arrays_as_nested_csv = settings.input_format_csv_arrays_as_nested_csv; diff --git a/src/Formats/FormatSettings.h b/src/Formats/FormatSettings.h index 787c1a64759d..3bc53140fe54 100644 --- a/src/Formats/FormatSettings.h +++ b/src/Formats/FormatSettings.h @@ -128,6 +128,7 @@ struct FormatSettings bool allow_single_quotes = true; bool allow_double_quotes = true; bool empty_as_default = false; + bool ignore_extra_columns = false; bool crlf_end_of_line = false; bool enum_as_number = false; bool arrays_as_nested_csv = false; diff --git a/src/Processors/Formats/Impl/CSVRowInputFormat.cpp b/src/Processors/Formats/Impl/CSVRowInputFormat.cpp index ae75240e0eea..0cc5889b732b 100644 --- a/src/Processors/Formats/Impl/CSVRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/CSVRowInputFormat.cpp @@ -302,14 +302,27 @@ bool CSVFormatReader::readField( return false; } + auto skip_all = [&]() + { + if (!is_last_file_column || !format_settings.csv.ignore_extra_columns) + { + return; + } + //std::cout << "skip !!!" << std::endl; + buf->position() = find_first_symbols<'\n'>(buf->position(), buf->buffer().end()); + }; if (format_settings.null_as_default && !isNullableOrLowCardinalityNullable(type)) { /// If value is null but type is not nullable then use default value instead. - return SerializationNullable::deserializeTextCSVImpl(column, *buf, format_settings, serialization); + bool res = SerializationNullable::deserializeTextCSVImpl(column, *buf, format_settings, serialization); + skip_all(); + return res; } /// Read the column normally. serialization->deserializeTextCSV(column, *buf, format_settings); + + skip_all(); return true; } diff --git a/tests/queries/0_stateless/00301_csv.reference b/tests/queries/0_stateless/00301_csv.reference index 9863da4b6407..61279f3b84a5 100644 --- a/tests/queries/0_stateless/00301_csv.reference +++ b/tests/queries/0_stateless/00301_csv.reference @@ -11,3 +11,7 @@ default-eof 1 2019-06-19 2016-01-01 01:02:03 NUL 2016-01-02 01:02:03 Nhello \N \N +Hello world 1 2016-01-01 +Hello world 2 2016-01-02 +Hello world 3 2016-01-03 +Hello world 4 2016-01-04 diff --git a/tests/queries/0_stateless/00301_csv.sh b/tests/queries/0_stateless/00301_csv.sh index b2618343dc0c..e99c39a0f6f0 100755 --- a/tests/queries/0_stateless/00301_csv.sh +++ b/tests/queries/0_stateless/00301_csv.sh @@ -37,3 +37,13 @@ echo 'NULL, NULL $CLICKHOUSE_CLIENT --query="SELECT * FROM csv ORDER BY s NULLS LAST"; $CLICKHOUSE_CLIENT --query="DROP TABLE csv"; + + +$CLICKHOUSE_CLIENT --query="CREATE TABLE csv (s String, n UInt64 DEFAULT 1, d Date DEFAULT '2019-06-19') ENGINE = Memory"; + +echo 'Hello world, 1, 2016-01-01 +Hello world, 2 ,2016-01-02, +Hello world, 3 ,2016-01-03, 2016-01-13 +Hello world, 4 ,2016-01-04, 2016-01-14, 2016-01-15' | $CLICKHOUSE_CLIENT --input_format_csv_empty_as_default=1 --input_format_csv_ignore_extra_columns=1 --query="INSERT INTO csv FORMAT CSV"; +$CLICKHOUSE_CLIENT --query="SELECT * FROM csv ORDER BY s, n"; +$CLICKHOUSE_CLIENT --query="DROP TABLE csv"; \ No newline at end of file From 2b40734900f121f60ad50e37c2c6fa2f9376e3d5 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Mon, 12 Jun 2023 14:29:16 +0200 Subject: [PATCH 019/179] use const-size tasks in prefetch pool --- .../MergeTree/MergeTreePrefetchedReadPool.cpp | 105 +++++++----------- 1 file changed, 42 insertions(+), 63 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreePrefetchedReadPool.cpp b/src/Storages/MergeTree/MergeTreePrefetchedReadPool.cpp index 63a205a1a610..f0dd2123ca43 100644 --- a/src/Storages/MergeTree/MergeTreePrefetchedReadPool.cpp +++ b/src/Storages/MergeTree/MergeTreePrefetchedReadPool.cpp @@ -1,18 +1,18 @@ -#include -#include -#include -#include -#include -#include -#include +#include +#include +#include #include +#include +#include +#include +#include +#include +#include #include -#include -#include +#include +#include #include #include -#include -#include namespace ProfileEvents @@ -296,31 +296,12 @@ MergeTreeReadTaskPtr MergeTreePrefetchedReadPool::getTask(size_t thread) return task; } -size_t MergeTreePrefetchedReadPool::getApproxSizeOfGranule(const IMergeTreeDataPart & part) const +size_t getApproximateSizeOfGranule(const IMergeTreeDataPart & part, const Names & columns_to_read) { - const auto & columns = part.getColumns(); - auto all_columns_are_fixed_size = columns.end() == std::find_if( - columns.begin(), columns.end(), - [](const auto & col){ return col.type->haveMaximumSizeOfValue() == false; }); - - if (all_columns_are_fixed_size) - { - size_t approx_size = 0; - for (const auto & col : columns) - approx_size += col.type->getMaximumSizeOfValueInMemory() * fixed_index_granularity; - - if (!index_granularity_bytes) - return approx_size; - - return std::min(index_granularity_bytes, approx_size); - } - - const size_t approx_size = static_cast(std::round(static_cast(part.getBytesOnDisk()) / part.getMarksCount())); - - if (!index_granularity_bytes) - return approx_size; - - return std::min(index_granularity_bytes, approx_size); + ColumnSize columns_size{}; + for (const auto & col_name : columns_to_read) + columns_size.add(part.getColumnSize(col_name)); + return columns_size.data_compressed / part.getMarksCount(); } MergeTreePrefetchedReadPool::PartsInfos MergeTreePrefetchedReadPool::getPartsInfos( @@ -347,7 +328,7 @@ MergeTreePrefetchedReadPool::PartsInfos MergeTreePrefetchedReadPool::getPartsInf for (const auto & range : part.ranges) part_info->sum_marks += range.end - range.begin; - part_info->approx_size_of_mark = getApproxSizeOfGranule(*part_info->data_part); + part_info->approx_size_of_mark = getApproximateSizeOfGranule(*part_info->data_part, column_names); const auto task_columns = getReadTaskColumns( part_reader_info, @@ -357,7 +338,7 @@ MergeTreePrefetchedReadPool::PartsInfos MergeTreePrefetchedReadPool::getPartsInf prewhere_info, actions_settings, reader_settings, - /*with_subcolumns=*/ true); + /* with_subcolumns */ true); part_info->size_predictor = !predict_block_size_bytes ? nullptr @@ -421,10 +402,6 @@ MergeTreePrefetchedReadPool::ThreadsTasks MergeTreePrefetchedReadPool::createThr } size_t min_prefetch_step_marks = 0; - if (settings.filesystem_prefetches_limit && settings.filesystem_prefetches_limit < sum_marks) - { - min_prefetch_step_marks = static_cast(std::round(static_cast(sum_marks) / settings.filesystem_prefetches_limit)); - } for (const auto & part : parts_infos) { @@ -437,12 +414,6 @@ MergeTreePrefetchedReadPool::ThreadsTasks MergeTreePrefetchedReadPool::createThr part->prefetch_step_marks = std::max( 1, static_cast(std::round(static_cast(settings.filesystem_prefetch_step_bytes) / part->approx_size_of_mark))); } - else - { - /// Experimentally derived ratio. - part->prefetch_step_marks = static_cast( - std::round(std::pow(std::max(1, static_cast(std::round(sum_marks / 1000))), double(1.5)))); - } /// This limit is important to avoid spikes of slow aws getObject requests when parallelizing within one file. /// (The default is taken from here https://docs.aws.amazon.com/whitepapers/latest/s3-optimizing-performance-best-practices/use-byte-range-fetches.html). @@ -450,13 +421,13 @@ MergeTreePrefetchedReadPool::ThreadsTasks MergeTreePrefetchedReadPool::createThr && settings.filesystem_prefetch_min_bytes_for_single_read_task && part->approx_size_of_mark < settings.filesystem_prefetch_min_bytes_for_single_read_task) { - - const size_t new_min_prefetch_step_marks = static_cast( + const size_t min_prefetch_step_marks_by_total_cols = static_cast( std::ceil(static_cast(settings.filesystem_prefetch_min_bytes_for_single_read_task) / part->approx_size_of_mark)); + /// At least one task to start working on it right now and another one to prefetch in the meantime. + const size_t new_min_prefetch_step_marks = std::min(min_prefetch_step_marks_by_total_cols, sum_marks / threads / 2); if (min_prefetch_step_marks < new_min_prefetch_step_marks) { - LOG_TEST( - log, "Increasing min prefetch step from {} to {}", min_prefetch_step_marks, new_min_prefetch_step_marks); + LOG_DEBUG(log, "Increasing min prefetch step from {} to {}", min_prefetch_step_marks, new_min_prefetch_step_marks); min_prefetch_step_marks = new_min_prefetch_step_marks; } @@ -464,25 +435,33 @@ MergeTreePrefetchedReadPool::ThreadsTasks MergeTreePrefetchedReadPool::createThr if (part->prefetch_step_marks < min_prefetch_step_marks) { - LOG_TEST( - log, "Increasing prefetch step from {} to {} because of the prefetches limit {}", - part->prefetch_step_marks, min_prefetch_step_marks, settings.filesystem_prefetches_limit); + LOG_DEBUG(log, "Increasing prefetch step from {} to {}", part->prefetch_step_marks, min_prefetch_step_marks); part->prefetch_step_marks = min_prefetch_step_marks; } - LOG_TEST(log, - "Part: {}, sum_marks: {}, approx mark size: {}, prefetch_step_bytes: {}, prefetch_step_marks: {}, (ranges: {})", - part->data_part->name, part->sum_marks, part->approx_size_of_mark, - settings.filesystem_prefetch_step_bytes, part->prefetch_step_marks, toString(part->ranges)); + LOG_DEBUG( + log, + "Part: {}, sum_marks: {}, approx mark size: {}, prefetch_step_bytes: {}, prefetch_step_marks: {}, (ranges: {})", + part->data_part->name, + part->sum_marks, + part->approx_size_of_mark, + settings.filesystem_prefetch_step_bytes, + part->prefetch_step_marks, + toString(part->ranges)); } const size_t min_marks_per_thread = (sum_marks - 1) / threads + 1; LOG_DEBUG( log, - "Sum marks: {}, threads: {}, min_marks_per_thread: {}, result prefetch step marks: {}, prefetches limit: {}, total_size_approx: {}", - sum_marks, threads, min_marks_per_thread, settings.filesystem_prefetch_step_bytes, settings.filesystem_prefetches_limit, total_size_approx); + "Sum marks: {}, threads: {}, min_marks_per_thread: {}, min prefetch step marks: {}, prefetches limit: {}, total_size_approx: {}", + sum_marks, + threads, + min_marks_per_thread, + min_prefetch_step_marks, + settings.filesystem_prefetches_limit, + total_size_approx); size_t allowed_memory_usage = settings.filesystem_prefetch_max_memory_usage; if (!allowed_memory_usage) @@ -492,6 +471,7 @@ MergeTreePrefetchedReadPool::ThreadsTasks MergeTreePrefetchedReadPool::createThr : std::nullopt; ThreadsTasks result_threads_tasks; + size_t total_tasks = 0; for (size_t i = 0, part_idx = 0; i < threads && part_idx < parts_infos.size(); ++i) { auto need_marks = min_marks_per_thread; @@ -606,12 +586,11 @@ MergeTreePrefetchedReadPool::ThreadsTasks MergeTreePrefetchedReadPool::createThr ++priority.value; result_threads_tasks[i].push_back(std::move(read_task)); + ++total_tasks; } } - LOG_TEST( - log, "Result tasks {} for {} threads: {}", - result_threads_tasks.size(), threads, dumpTasks(result_threads_tasks)); + LOG_TEST(log, "Result tasks {} for {} threads: {}", total_tasks, threads, dumpTasks(result_threads_tasks)); return result_threads_tasks; } From e88fc3989534986e78561a967a9263eda7548d3f Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Mon, 12 Jun 2023 14:32:09 +0200 Subject: [PATCH 020/179] cosmetics --- .../IO/AsynchronousBoundedReadBuffer.cpp | 23 +++++++++++-------- .../IO/CachedOnDiskReadBufferFromFile.cpp | 4 ++++ 2 files changed, 17 insertions(+), 10 deletions(-) diff --git a/src/Disks/IO/AsynchronousBoundedReadBuffer.cpp b/src/Disks/IO/AsynchronousBoundedReadBuffer.cpp index f9bd68222ae0..6651658e156a 100644 --- a/src/Disks/IO/AsynchronousBoundedReadBuffer.cpp +++ b/src/Disks/IO/AsynchronousBoundedReadBuffer.cpp @@ -33,6 +33,15 @@ namespace ProfileEvents extern const Event RemoteFSBuffers; } +namespace +{ +size_t chooseBufferSizeForRemoteReading(const DB::ReadSettings & settings, size_t file_size) +{ + /// Buffers used for prefetch or pre-download better to have enough size, but not bigger than the whole file. + return std::min(std::max(settings.prefetch_buffer_size, DBMS_DEFAULT_BUFFER_SIZE), file_size); +} +} + namespace DB { @@ -42,23 +51,17 @@ namespace ErrorCodes extern const int ARGUMENT_OUT_OF_BOUND; } -static size_t chooseBufferSize(const ReadSettings & settings, size_t file_size) -{ - /// Buffers used for prefetch or pre-download better to have enough size, but not bigger than the whole file. - return std::min(std::max(settings.prefetch_buffer_size, DBMS_DEFAULT_BUFFER_SIZE), file_size); -} - AsynchronousBoundedReadBuffer::AsynchronousBoundedReadBuffer( ImplPtr impl_, IAsynchronousReader & reader_, const ReadSettings & settings_, AsyncReadCountersPtr async_read_counters_, FilesystemReadPrefetchesLogPtr prefetches_log_) - : ReadBufferFromFileBase(chooseBufferSize(settings_, impl_->getFileSize()), nullptr, 0) + : ReadBufferFromFileBase(chooseBufferSizeForRemoteReading(settings_, impl_->getFileSize()), nullptr, 0) , impl(std::move(impl_)) , read_settings(settings_) , reader(reader_) - , prefetch_buffer(chooseBufferSize(settings_, impl->getFileSize())) + , prefetch_buffer(chooseBufferSizeForRemoteReading(read_settings, impl->getFileSize())) , query_id(CurrentThread::isInitialized() && CurrentThread::get().getQueryContext() != nullptr ? CurrentThread::getQueryId() : "") , current_reader_id(getRandomASCIIString(8)) , log(&Poco::Logger::get("AsynchronousBoundedReadBuffer")) @@ -111,7 +114,7 @@ void AsynchronousBoundedReadBuffer::prefetch(Priority priority) last_prefetch_info.submit_time = std::chrono::system_clock::now(); last_prefetch_info.priority = priority; - chassert(prefetch_buffer.size() == chooseBufferSize(read_settings, impl->getFileSize())); + chassert(prefetch_buffer.size() == chooseBufferSizeForRemoteReading(read_settings, impl->getFileSize())); prefetch_future = asyncReadInto(prefetch_buffer.data(), prefetch_buffer.size(), priority); ProfileEvents::increment(ProfileEvents::RemoteFSPrefetches); } @@ -190,7 +193,7 @@ bool AsynchronousBoundedReadBuffer::nextImpl() { ProfileEventTimeIncrement watch(ProfileEvents::SynchronousRemoteReadWaitMicroseconds); - chassert(memory.size() == chooseBufferSize(read_settings, impl->getFileSize())); + chassert(memory.size() == chooseBufferSizeForRemoteReading(read_settings, impl->getFileSize())); std::tie(size, offset) = impl->readInto(memory.data(), memory.size(), file_offset_of_buffer_end, bytes_to_ignore); ProfileEvents::increment(ProfileEvents::RemoteFSUnprefetchedReads); diff --git a/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp b/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp index 6317aba20e90..bfde6d0984c8 100644 --- a/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp +++ b/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp @@ -1085,6 +1085,10 @@ bool CachedOnDiskReadBufferFromFile::nextImplStep() first_offset, file_segments->toString()); + /// Release buffer a little bit earlier. + if (read_until_position == file_offset_of_buffer_end) + implementation_buffer.reset(); + return result; } From 1d33043fe673d5ebc86b68fbbdb563c1cbcdbb0f Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Mon, 12 Jun 2023 16:18:47 +0200 Subject: [PATCH 021/179] changes around buffer sizes --- .../IO/AsynchronousBoundedReadBuffer.cpp | 9 ---- src/Disks/IO/ReadBufferFromRemoteFSGather.cpp | 41 ++++++++++++++----- src/Disks/IO/ReadBufferFromRemoteFSGather.h | 1 + 3 files changed, 31 insertions(+), 20 deletions(-) diff --git a/src/Disks/IO/AsynchronousBoundedReadBuffer.cpp b/src/Disks/IO/AsynchronousBoundedReadBuffer.cpp index 6651658e156a..86ee541dcbd5 100644 --- a/src/Disks/IO/AsynchronousBoundedReadBuffer.cpp +++ b/src/Disks/IO/AsynchronousBoundedReadBuffer.cpp @@ -33,15 +33,6 @@ namespace ProfileEvents extern const Event RemoteFSBuffers; } -namespace -{ -size_t chooseBufferSizeForRemoteReading(const DB::ReadSettings & settings, size_t file_size) -{ - /// Buffers used for prefetch or pre-download better to have enough size, but not bigger than the whole file. - return std::min(std::max(settings.prefetch_buffer_size, DBMS_DEFAULT_BUFFER_SIZE), file_size); -} -} - namespace DB { diff --git a/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp b/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp index eb9c509e4597..537c0cf1be7d 100644 --- a/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp +++ b/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp @@ -2,15 +2,28 @@ #include +#include #include #include -#include +#include #include -#include -#include #include +#include +#include + +using namespace DB; +namespace +{ +bool withCache(const ReadSettings & settings) +{ + return settings.remote_fs_cache && settings.enable_filesystem_cache + && (!CurrentThread::getQueryId().empty() || settings.read_from_filesystem_cache_if_exists_otherwise_bypass_cache + || !settings.avoid_readthrough_cache_outside_query_context); +} +} + namespace DB { namespace ErrorCodes @@ -18,29 +31,35 @@ namespace ErrorCodes extern const int CANNOT_SEEK_THROUGH_FILE; } +size_t chooseBufferSizeForRemoteReading(const DB::ReadSettings & settings, size_t file_size) +{ + /// Only when cache is used we could download bigger portions of FileSegments than what we actually gonna read within particular task. + if (!withCache(settings)) + return settings.remote_fs_buffer_size; + + /// Buffers used for prefetch and pre-download better to have enough size, but not bigger than the whole file. + return std::min(std::max(settings.remote_fs_buffer_size, DBMS_DEFAULT_BUFFER_SIZE), file_size); +} + ReadBufferFromRemoteFSGather::ReadBufferFromRemoteFSGather( ReadBufferCreator && read_buffer_creator_, const StoredObjects & blobs_to_read_, const ReadSettings & settings_, std::shared_ptr cache_log_, bool use_external_buffer_) - : ReadBufferFromFileBase(use_external_buffer_ ? 0 : settings_.remote_fs_buffer_size, nullptr, 0) + : ReadBufferFromFileBase( + use_external_buffer_ ? 0 : chooseBufferSizeForRemoteReading(settings_, getTotalSize(blobs_to_read_)), nullptr, 0) , settings(settings_) , blobs_to_read(blobs_to_read_) , read_buffer_creator(std::move(read_buffer_creator_)) , cache_log(settings.enable_filesystem_cache_log ? cache_log_ : nullptr) - , query_id(CurrentThread::isInitialized() && CurrentThread::get().getQueryContext() != nullptr ? CurrentThread::getQueryId() : "") + , query_id(CurrentThread::getQueryId()) , use_external_buffer(use_external_buffer_) + , with_cache(withCache(settings)) , log(&Poco::Logger::get("ReadBufferFromRemoteFSGather")) { if (!blobs_to_read.empty()) current_object = blobs_to_read.front(); - - with_cache = settings.remote_fs_cache - && settings.enable_filesystem_cache - && (!query_id.empty() - || settings.read_from_filesystem_cache_if_exists_otherwise_bypass_cache - || !settings.avoid_readthrough_cache_outside_query_context); } SeekableReadBufferPtr ReadBufferFromRemoteFSGather::createImplementationBuffer(const StoredObject & object) diff --git a/src/Disks/IO/ReadBufferFromRemoteFSGather.h b/src/Disks/IO/ReadBufferFromRemoteFSGather.h index 272ed2b3ac10..9bf55ab69ce9 100644 --- a/src/Disks/IO/ReadBufferFromRemoteFSGather.h +++ b/src/Disks/IO/ReadBufferFromRemoteFSGather.h @@ -86,4 +86,5 @@ friend class ReadIndirectBufferFromRemoteFS; Poco::Logger * log; }; +size_t chooseBufferSizeForRemoteReading(const DB::ReadSettings & settings, size_t file_size); } From 1dddcc94726bfca062da2af1b9880df5fa5e4268 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Mon, 12 Jun 2023 16:19:05 +0200 Subject: [PATCH 022/179] use connection pool --- src/Common/PoolBase.h | 88 +++++++++++++------- src/Disks/ObjectStorages/S3/diskSettings.cpp | 3 + src/IO/HTTPCommon.cpp | 59 ++++++++++--- src/IO/HTTPCommon.h | 16 +++- src/IO/ReadBufferFromS3.cpp | 50 ++++++++++- src/IO/ReadBufferFromS3.h | 6 +- src/IO/S3/PocoHTTPClient.cpp | 40 +++++++-- src/IO/S3/PocoHTTPClient.h | 25 ++++++ src/IO/S3/SessionAwareIOStream.h | 4 + 9 files changed, 239 insertions(+), 52 deletions(-) diff --git a/src/Common/PoolBase.h b/src/Common/PoolBase.h index 8cabb472d8fc..5575b56f2999 100644 --- a/src/Common/PoolBase.h +++ b/src/Common/PoolBase.h @@ -1,9 +1,11 @@ #pragma once -#include #include -#include +#include +#include +#include #include +#include #include #include @@ -15,14 +17,6 @@ namespace ProfileEvents extern const Event ConnectionPoolIsFullMicroseconds; } -namespace DB -{ - namespace ErrorCodes - { - extern const int LOGICAL_ERROR; - } -} - /** A class from which you can inherit and get a pool of something. Used for database connection pools. * Descendant class must provide a method for creating a new object to place in the pool. */ @@ -35,6 +29,22 @@ class PoolBase : private boost::noncopyable using ObjectPtr = std::shared_ptr; using Ptr = std::shared_ptr>; + enum class BehaviourOnLimit + { + /** + * Default behaviour - when limit on pool size is reached, callers will wait until object will be returned back in pool. + */ + Wait, + + /** + * If no free objects in pool - allocate a new object, but not store it in pool. + * This behaviour is needed when we simply don't want to waste time waiting or if we cannot guarantee that query could be processed using fixed amount of connections. + * For example, when we read from table on s3, one GetObject request corresponds to the whole FileSystemCache segment. This segments are shared between different + * reading tasks, so in general case connection could be taken from pool by one task and returned back by another one. And these tasks are processed completely independently. + */ + AllocateNewBypassingPool, + }; + private: /** The object with the flag, whether it is currently used. */ @@ -89,37 +99,53 @@ class PoolBase : private boost::noncopyable Object & operator*() && = delete; const Object & operator*() const && = delete; - Object * operator->() & { return &*data->data.object; } - const Object * operator->() const & { return &*data->data.object; } - Object & operator*() & { return *data->data.object; } - const Object & operator*() const & { return *data->data.object; } + Object * operator->() & { return castToObjectPtr(); } + const Object * operator->() const & { return castToObjectPtr(); } + Object & operator*() & { return *castToObjectPtr(); } + const Object & operator*() const & { return *castToObjectPtr(); } /** * Expire an object to make it reallocated later. */ void expire() { - data->data.is_expired = true; + if (data.index() == 1) + std::get<1>(data)->data.is_expired = true; } - bool isNull() const { return data == nullptr; } - - PoolBase * getPool() const - { - if (!data) - throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "Attempt to get pool from uninitialized entry"); - return &data->data.pool; - } + bool isNull() const { return data.index() == 0 ? !std::get<0>(data) : !std::get<1>(data); } private: - std::shared_ptr data; + /** + * Plain object will be stored instead of PoolEntryHelper if fallback was made in get() (see BehaviourOnLimit::AllocateNewBypassingPool). + */ + std::variant> data; + + explicit Entry(ObjectPtr && object) : data(std::move(object)) { } + + explicit Entry(PooledObject & object) : data(std::make_shared(object)) { } - explicit Entry(PooledObject & object) : data(std::make_shared(object)) {} + auto castToObjectPtr() const + { + return std::visit( + [](const auto & ptr) + { + using T = std::decay_t; + if constexpr (std::is_same_v) + return ptr.get(); + else + return ptr->data.object.get(); + }, + data); + } }; virtual ~PoolBase() = default; - /** Allocates the object. Wait for free object in pool for 'timeout'. With 'timeout' < 0, the timeout is infinite. */ + /** Allocates the object. + * If 'behaviour_on_limit' is Wait - wait for free object in pool for 'timeout'. With 'timeout' < 0, the timeout is infinite. + * If 'behaviour_on_limit' is AllocateNewBypassingPool and there is no free object - a new object will be created but not stored in the pool. + */ Entry get(Poco::Timespan::TimeDiff timeout) { std::unique_lock lock(mutex); @@ -150,6 +176,9 @@ class PoolBase : private boost::noncopyable return Entry(*items.back()); } + if (behaviour_on_limit == BehaviourOnLimit::AllocateNewBypassingPool) + return Entry(allocObject()); + Stopwatch blocked; if (timeout < 0) { @@ -184,6 +213,8 @@ class PoolBase : private boost::noncopyable /** The maximum size of the pool. */ unsigned max_items; + BehaviourOnLimit behaviour_on_limit; + /** Pool. */ Objects items; @@ -192,11 +223,10 @@ class PoolBase : private boost::noncopyable std::condition_variable available; protected: - Poco::Logger * log; - PoolBase(unsigned max_items_, Poco::Logger * log_) - : max_items(max_items_), log(log_) + PoolBase(unsigned max_items_, Poco::Logger * log_, BehaviourOnLimit behaviour_on_limit_ = BehaviourOnLimit::Wait) + : max_items(max_items_), behaviour_on_limit(behaviour_on_limit_), log(log_) { items.reserve(max_items); } diff --git a/src/Disks/ObjectStorages/S3/diskSettings.cpp b/src/Disks/ObjectStorages/S3/diskSettings.cpp index 409eb2a3dc3f..fe57fb24bbd1 100644 --- a/src/Disks/ObjectStorages/S3/diskSettings.cpp +++ b/src/Disks/ObjectStorages/S3/diskSettings.cpp @@ -132,6 +132,9 @@ std::unique_ptr getClient( client_configuration.requestTimeoutMs = config.getUInt(config_prefix + ".request_timeout_ms", 30000); client_configuration.maxConnections = config.getUInt(config_prefix + ".max_connections", 100); client_configuration.endpointOverride = uri.endpoint; + client_configuration.http_keep_alive_timeout_ms = config.getUInt(config_prefix + ".http_keep_alive_timeout_ms", 10000); + client_configuration.http_connection_pool_size = config.getUInt(config_prefix + ".http_connection_pool_size", 1000); + client_configuration.wait_on_pool_size_limit = false; auto proxy_config = getProxyConfiguration(config_prefix, config); if (proxy_config) diff --git a/src/IO/HTTPCommon.cpp b/src/IO/HTTPCommon.cpp index 3ec9b3d0a835..f3e2064c8bfa 100644 --- a/src/IO/HTTPCommon.cpp +++ b/src/IO/HTTPCommon.cpp @@ -131,8 +131,12 @@ namespace UInt16 proxy_port_, bool proxy_https_, size_t max_pool_size_, - bool resolve_host_ = true) - : Base(static_cast(max_pool_size_), &Poco::Logger::get("HTTPSessionPool")) + bool resolve_host_, + bool wait_on_pool_size_limit) + : Base( + static_cast(max_pool_size_), + &Poco::Logger::get("HTTPSessionPool"), + wait_on_pool_size_limit ? BehaviourOnLimit::Wait : BehaviourOnLimit::AllocateNewBypassingPool) , host(host_) , port(port_) , https(https_) @@ -155,11 +159,12 @@ namespace String proxy_host; UInt16 proxy_port; bool is_proxy_https; + bool wait_on_pool_size_limit; bool operator ==(const Key & rhs) const { - return std::tie(target_host, target_port, is_target_https, proxy_host, proxy_port, is_proxy_https) - == std::tie(rhs.target_host, rhs.target_port, rhs.is_target_https, rhs.proxy_host, rhs.proxy_port, rhs.is_proxy_https); + return std::tie(target_host, target_port, is_target_https, proxy_host, proxy_port, is_proxy_https, wait_on_pool_size_limit) + == std::tie(rhs.target_host, rhs.target_port, rhs.is_target_https, rhs.proxy_host, rhs.proxy_port, rhs.is_proxy_https, rhs.wait_on_pool_size_limit); } }; @@ -178,6 +183,7 @@ namespace s.update(k.proxy_host); s.update(k.proxy_port); s.update(k.is_proxy_https); + s.update(k.wait_on_pool_size_limit); return s.get64(); } }; @@ -218,14 +224,14 @@ namespace const Poco::URI & proxy_uri, const ConnectionTimeouts & timeouts, size_t max_connections_per_endpoint, - bool resolve_host = true) + bool resolve_host, + bool wait_on_pool_size_limit) { - std::lock_guard lock(mutex); + std::unique_lock lock(mutex); const std::string & host = uri.getHost(); UInt16 port = uri.getPort(); bool https = isHTTPS(uri); - String proxy_host; UInt16 proxy_port = 0; bool proxy_https = false; @@ -236,11 +242,27 @@ namespace proxy_https = isHTTPS(proxy_uri); } - HTTPSessionPool::Key key{host, port, https, proxy_host, proxy_port, proxy_https}; + HTTPSessionPool::Key key{host, port, https, proxy_host, proxy_port, proxy_https, wait_on_pool_size_limit}; auto pool_ptr = endpoints_pool.find(key); if (pool_ptr == endpoints_pool.end()) std::tie(pool_ptr, std::ignore) = endpoints_pool.emplace( - key, std::make_shared(host, port, https, proxy_host, proxy_port, proxy_https, max_connections_per_endpoint, resolve_host)); + key, + std::make_shared( + host, + port, + https, + proxy_host, + proxy_port, + proxy_https, + max_connections_per_endpoint, + resolve_host, + wait_on_pool_size_limit)); + + /// Some routines held session objects until the end of its lifetime. Also this routines may create another sessions in this time frame. + /// If some other session holds `lock` because it waits on another lock inside `pool_ptr->second->get` it isn't possible to create any + /// new session and thus finish routine, return session to the pool and unlock the thread waiting inside `pool_ptr->second->get`. + /// To avoid such a deadlock we unlock `lock` before entering `pool_ptr->second->get`. + lock.unlock(); auto retry_timeout = timeouts.connection_timeout.totalMicroseconds(); auto session = pool_ptr->second->get(retry_timeout); @@ -295,14 +317,25 @@ HTTPSessionPtr makeHTTPSession(const Poco::URI & uri, const ConnectionTimeouts & } -PooledHTTPSessionPtr makePooledHTTPSession(const Poco::URI & uri, const ConnectionTimeouts & timeouts, size_t per_endpoint_pool_size, bool resolve_host) +PooledHTTPSessionPtr makePooledHTTPSession( + const Poco::URI & uri, + const ConnectionTimeouts & timeouts, + size_t per_endpoint_pool_size, + bool resolve_host, + bool wait_on_pool_size_limit) { - return makePooledHTTPSession(uri, {}, timeouts, per_endpoint_pool_size, resolve_host); + return makePooledHTTPSession(uri, {}, timeouts, per_endpoint_pool_size, resolve_host, wait_on_pool_size_limit); } -PooledHTTPSessionPtr makePooledHTTPSession(const Poco::URI & uri, const Poco::URI & proxy_uri, const ConnectionTimeouts & timeouts, size_t per_endpoint_pool_size, bool resolve_host) +PooledHTTPSessionPtr makePooledHTTPSession( + const Poco::URI & uri, + const Poco::URI & proxy_uri, + const ConnectionTimeouts & timeouts, + size_t per_endpoint_pool_size, + bool resolve_host, + bool wait_on_pool_size_limit) { - return HTTPSessionPool::instance().getSession(uri, proxy_uri, timeouts, per_endpoint_pool_size, resolve_host); + return HTTPSessionPool::instance().getSession(uri, proxy_uri, timeouts, per_endpoint_pool_size, resolve_host, wait_on_pool_size_limit); } bool isRedirect(const Poco::Net::HTTPResponse::HTTPStatus status) { return status == Poco::Net::HTTPResponse::HTTP_MOVED_PERMANENTLY || status == Poco::Net::HTTPResponse::HTTP_FOUND || status == Poco::Net::HTTPResponse::HTTP_SEE_OTHER || status == Poco::Net::HTTPResponse::HTTP_TEMPORARY_REDIRECT; } diff --git a/src/IO/HTTPCommon.h b/src/IO/HTTPCommon.h index 3616a33c1c74..db8fc2a2a40b 100644 --- a/src/IO/HTTPCommon.h +++ b/src/IO/HTTPCommon.h @@ -61,8 +61,20 @@ void setResponseDefaultHeaders(HTTPServerResponse & response, size_t keep_alive_ HTTPSessionPtr makeHTTPSession(const Poco::URI & uri, const ConnectionTimeouts & timeouts, bool resolve_host = true); /// As previous method creates session, but tooks it from pool, without and with proxy uri. -PooledHTTPSessionPtr makePooledHTTPSession(const Poco::URI & uri, const ConnectionTimeouts & timeouts, size_t per_endpoint_pool_size, bool resolve_host = true); -PooledHTTPSessionPtr makePooledHTTPSession(const Poco::URI & uri, const Poco::URI & proxy_uri, const ConnectionTimeouts & timeouts, size_t per_endpoint_pool_size, bool resolve_host = true); +PooledHTTPSessionPtr makePooledHTTPSession( + const Poco::URI & uri, + const ConnectionTimeouts & timeouts, + size_t per_endpoint_pool_size, + bool resolve_host = true, + bool wait_on_pool_size_limit = true); + +PooledHTTPSessionPtr makePooledHTTPSession( + const Poco::URI & uri, + const Poco::URI & proxy_uri, + const ConnectionTimeouts & timeouts, + size_t per_endpoint_pool_size, + bool resolve_host = true, + bool wait_on_pool_size_limit = true); bool isRedirect(Poco::Net::HTTPResponse::HTTPStatus status); diff --git a/src/IO/ReadBufferFromS3.cpp b/src/IO/ReadBufferFromS3.cpp index d1cb1ec9ab03..364253ba7466 100644 --- a/src/IO/ReadBufferFromS3.cpp +++ b/src/IO/ReadBufferFromS3.cpp @@ -31,6 +31,23 @@ namespace ProfileEvents extern const Event RemoteReadThrottlerSleepMicroseconds; } +namespace +{ +void resetSession(Aws::S3::Model::GetObjectResult & read_result) +{ + if (auto * session_aware_stream = dynamic_cast *>(&read_result.GetBody())) + { + auto & session + = static_cast(*static_cast(session_aware_stream->getSession())); + session.reset(); + } + else if (!dynamic_cast *>(&read_result.GetBody())) + { + throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "Session of unexpected type encountered"); + } +} +} + namespace DB { namespace ErrorCodes @@ -74,7 +91,10 @@ bool ReadBufferFromS3::nextImpl() if (read_until_position) { if (read_until_position == offset) + { + read_all_range_successfully = true; return false; + } if (read_until_position < offset) throw Exception(ErrorCodes::LOGICAL_ERROR, "Attempt to read beyond right offset ({} > {})", offset, read_until_position - 1); @@ -154,7 +174,10 @@ bool ReadBufferFromS3::nextImpl() } if (!next_result) + { + read_all_range_successfully = true; return false; + } BufferBase::set(impl->buffer().begin(), impl->buffer().size(), impl->offset()); @@ -240,6 +263,8 @@ off_t ReadBufferFromS3::seek(off_t offset_, int whence) if (offset_ == getPosition() && whence == SEEK_SET) return offset_; + read_all_range_successfully = false; + if (impl && restricted_seek) { throw Exception( @@ -312,6 +337,8 @@ void ReadBufferFromS3::setReadUntilPosition(size_t position) { if (position != static_cast(read_until_position)) { + read_all_range_successfully = false; + if (impl) { if (!atEndOfRequestedRangeGuess()) @@ -328,6 +355,8 @@ void ReadBufferFromS3::setReadUntilEnd() { if (read_until_position) { + read_all_range_successfully = false; + read_until_position = 0; if (impl) { @@ -351,8 +380,27 @@ bool ReadBufferFromS3::atEndOfRequestedRangeGuess() return false; } +ReadBufferFromS3::~ReadBufferFromS3() +{ + try + { + if (!read_all_range_successfully && read_result) + /// When we abandon a session with an ongoing GetObject request and there is another one trying to delete the same object this delete + /// operation will hang until GetObject's session idle timeouts. So we have to call `reset()` on GetObject's session session immediately. + resetSession(*read_result); + } + catch (...) + { + tryLogCurrentException(log); + } +} + std::unique_ptr ReadBufferFromS3::initialize() { + if (!read_all_range_successfully && read_result) + resetSession(*read_result); + read_all_range_successfully = false; + /** * If remote_filesystem_read_method = 'threadpool', then for MergeTree family tables * exact byte ranges to read are always passed here. @@ -363,7 +411,7 @@ std::unique_ptr ReadBufferFromS3::initialize() read_result = sendRequest(offset, read_until_position ? std::make_optional(read_until_position - 1) : std::nullopt); size_t buffer_size = use_external_buffer ? 0 : read_settings.remote_fs_buffer_size; - return std::make_unique(read_result.GetBody(), buffer_size); + return std::make_unique(read_result->GetBody(), buffer_size); } Aws::S3::Model::GetObjectResult ReadBufferFromS3::sendRequest(size_t range_begin, std::optional range_end_incl) const diff --git a/src/IO/ReadBufferFromS3.h b/src/IO/ReadBufferFromS3.h index 0f665861a1ea..11299aa2c2a8 100644 --- a/src/IO/ReadBufferFromS3.h +++ b/src/IO/ReadBufferFromS3.h @@ -41,7 +41,7 @@ class ReadBufferFromS3 : public ReadBufferFromFileBase std::atomic offset = 0; std::atomic read_until_position = 0; - Aws::S3::Model::GetObjectResult read_result; + std::optional read_result; std::unique_ptr impl; Poco::Logger * log = &Poco::Logger::get("ReadBufferFromS3"); @@ -60,6 +60,8 @@ class ReadBufferFromS3 : public ReadBufferFromFileBase bool restricted_seek_ = false, std::optional file_size = std::nullopt); + ~ReadBufferFromS3() override; + bool nextImpl() override; off_t seek(off_t off, int whence) override; @@ -100,6 +102,8 @@ class ReadBufferFromS3 : public ReadBufferFromFileBase /// There is different seek policy for disk seek and for non-disk seek /// (non-disk seek is applied for seekable input formats: orc, arrow, parquet). bool restricted_seek; + + bool read_all_range_successfully = false; }; } diff --git a/src/IO/S3/PocoHTTPClient.cpp b/src/IO/S3/PocoHTTPClient.cpp index bfda71493436..754b1bfd5b80 100644 --- a/src/IO/S3/PocoHTTPClient.cpp +++ b/src/IO/S3/PocoHTTPClient.cpp @@ -1,3 +1,4 @@ +#include #include "Common/DNSResolver.h" #include "config.h" @@ -138,8 +139,9 @@ PocoHTTPClient::PocoHTTPClient(const PocoHTTPClientConfiguration & client_config , timeouts(ConnectionTimeouts( Poco::Timespan(client_configuration.connectTimeoutMs * 1000), /// connection timeout. Poco::Timespan(client_configuration.requestTimeoutMs * 1000), /// send timeout. - Poco::Timespan(client_configuration.requestTimeoutMs * 1000) /// receive timeout. - )) + Poco::Timespan(client_configuration.requestTimeoutMs * 1000), /// receive timeout. + Poco::Timespan(client_configuration.enableTcpKeepAlive ? client_configuration.tcpKeepAliveIntervalMs * 1000 : 0), + Poco::Timespan(client_configuration.http_keep_alive_timeout_ms * 1000))) /// flag indicating whether keep-alive is enabled is set to each session upon creation , remote_host_filter(client_configuration.remote_host_filter) , s3_max_redirects(client_configuration.s3_max_redirects) , enable_s3_requests_logging(client_configuration.enable_s3_requests_logging) @@ -147,6 +149,8 @@ PocoHTTPClient::PocoHTTPClient(const PocoHTTPClientConfiguration & client_config , get_request_throttler(client_configuration.get_request_throttler) , put_request_throttler(client_configuration.put_request_throttler) , extra_headers(client_configuration.extra_headers) + , http_connection_pool_size(client_configuration.http_connection_pool_size) + , wait_on_pool_size_limit(client_configuration.wait_on_pool_size_limit) { } @@ -254,9 +258,26 @@ void PocoHTTPClient::addMetric(const Aws::Http::HttpRequest & request, S3MetricT void PocoHTTPClient::makeRequestInternal( Aws::Http::HttpRequest & request, std::shared_ptr & response, + Aws::Utils::RateLimits::RateLimiterInterface * readLimiter , + Aws::Utils::RateLimits::RateLimiterInterface * writeLimiter) const +{ + const auto request_configuration = per_request_configuration(request); + if (http_connection_pool_size && request_configuration.proxy_host.empty()) + makeRequestInternalImpl(request, request_configuration, response, readLimiter, writeLimiter); + else + makeRequestInternalImpl(request, request_configuration, response, readLimiter, writeLimiter); +} + +template +void PocoHTTPClient::makeRequestInternalImpl( + Aws::Http::HttpRequest & request, + const ClientConfigurationPerRequest & request_configuration, + std::shared_ptr & response, Aws::Utils::RateLimits::RateLimiterInterface *, Aws::Utils::RateLimits::RateLimiterInterface *) const { + using SessionPtr = std::conditional_t; + Poco::Logger * log = &Poco::Logger::get("AWSClient"); auto uri = request.GetUri().GetURIString(); @@ -303,8 +324,7 @@ void PocoHTTPClient::makeRequestInternal( for (unsigned int attempt = 0; attempt <= s3_max_redirects; ++attempt) { Poco::URI target_uri(uri); - HTTPSessionPtr session; - auto request_configuration = per_request_configuration(request); + SessionPtr session; if (!request_configuration.proxy_host.empty()) { @@ -313,7 +333,11 @@ void PocoHTTPClient::makeRequestInternal( /// Reverse proxy can replace host header with resolved ip address instead of host name. /// This can lead to request signature difference on S3 side. - session = makeHTTPSession(target_uri, timeouts, /* resolve_host = */ false); + if constexpr (pooled) + session = makePooledHTTPSession( + target_uri, timeouts, http_connection_pool_size, /* resolve_host = */ true, wait_on_pool_size_limit); + else + session = makeHTTPSession(target_uri, timeouts, /* resolve_host = */ false); bool use_tunnel = request_configuration.proxy_scheme == Aws::Http::Scheme::HTTP && target_uri.getScheme() == "https"; session->setProxy( @@ -325,7 +349,11 @@ void PocoHTTPClient::makeRequestInternal( } else { - session = makeHTTPSession(target_uri, timeouts, /* resolve_host = */ true); + if constexpr (pooled) + session = makePooledHTTPSession( + target_uri, timeouts, http_connection_pool_size, /* resolve_host = */ true, wait_on_pool_size_limit); + else + session = makeHTTPSession(target_uri, timeouts, /* resolve_host = */ false); } /// In case of error this address will be written to logs diff --git a/src/IO/S3/PocoHTTPClient.h b/src/IO/S3/PocoHTTPClient.h index 762178a93655..92d3d5c57478 100644 --- a/src/IO/S3/PocoHTTPClient.h +++ b/src/IO/S3/PocoHTTPClient.h @@ -53,6 +53,13 @@ struct PocoHTTPClientConfiguration : public Aws::Client::ClientConfiguration ThrottlerPtr put_request_throttler; HTTPHeaderEntries extra_headers; + /// Not a client parameter in terms of HTTP and we won't send it to the server. Used internally to determine when connection have to be re-established. + uint32_t http_keep_alive_timeout_ms = 0; + /// Zero means pooling will not be used. + size_t http_connection_pool_size = 0; + /// See PoolBase::BehaviourOnLimit + bool wait_on_pool_size_limit = true; + void updateSchemeAndRegion(); std::function error_report; @@ -90,6 +97,12 @@ class PocoHTTPResponse : public Aws::Http::Standard::StandardHttpResponse ); } + void SetResponseBody(Aws::IStream & incoming_stream, PooledHTTPSessionPtr & session_) /// NOLINT + { + body_stream = Aws::Utils::Stream::ResponseStream( + Aws::New>("http result streambuf", session_, incoming_stream.rdbuf())); + } + void SetResponseBody(std::string & response_body) /// NOLINT { auto stream = Aws::New("http result buf", response_body); // STYLE_CHECK_ALLOW_STD_STRING_STREAM @@ -149,6 +162,15 @@ class PocoHTTPClient : public Aws::Http::HttpClient EnumSize, }; + template + void makeRequestInternalImpl( + Aws::Http::HttpRequest & request, + const ClientConfigurationPerRequest & per_request_configuration, + std::shared_ptr & response, + Aws::Utils::RateLimits::RateLimiterInterface * readLimiter, + Aws::Utils::RateLimits::RateLimiterInterface * writeLimiter) const; + +protected: static S3MetricKind getMetricKind(const Aws::Http::HttpRequest & request); void addMetric(const Aws::Http::HttpRequest & request, S3MetricType type, ProfileEvents::Count amount = 1) const; @@ -170,6 +192,9 @@ class PocoHTTPClient : public Aws::Http::HttpClient ThrottlerPtr put_request_throttler; const HTTPHeaderEntries extra_headers; + + size_t http_connection_pool_size = 0; + bool wait_on_pool_size_limit = true; }; } diff --git a/src/IO/S3/SessionAwareIOStream.h b/src/IO/S3/SessionAwareIOStream.h index 1640accb6fac..f7e42f99f51b 100644 --- a/src/IO/S3/SessionAwareIOStream.h +++ b/src/IO/S3/SessionAwareIOStream.h @@ -18,6 +18,10 @@ class SessionAwareIOStream : public std::iostream { } + Session & getSession() { return session; } + + const Session & getSession() const { return session; } + private: /// Poco HTTP session is holder of response stream. Session session; From c8cbc9f8ce36fa49a0785c7f9792c6cf154e06da Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Mon, 12 Jun 2023 16:19:14 +0200 Subject: [PATCH 023/179] fix test --- tests/integration/test_merge_tree_s3/test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_merge_tree_s3/test.py b/tests/integration/test_merge_tree_s3/test.py index 2ccd517923a4..22805eb6e948 100644 --- a/tests/integration/test_merge_tree_s3/test.py +++ b/tests/integration/test_merge_tree_s3/test.py @@ -923,7 +923,7 @@ def test_merge_canceled_by_s3_errors_when_move(cluster, broken_s3, node_name): @pytest.mark.parametrize("node_name", ["node"]) @pytest.mark.parametrize( - "in_flight_memory", [(10, 245918115), (5, 156786752), (1, 106426187)] + "in_flight_memory", [(10, 288044299), (5, 193557290), (1, 128348733)] ) def test_s3_engine_heavy_write_check_mem( cluster, broken_s3, node_name, in_flight_memory From a91fc3ddb33865d2db8170ff96e636de293b323a Mon Sep 17 00:00:00 2001 From: Dmitry Kardymon Date: Wed, 14 Jun 2023 16:44:31 +0000 Subject: [PATCH 024/179] Add docs/ add more cases in test --- docs/en/interfaces/formats.md | 3 +- .../operations/settings/settings-formats.md | 5 +++ docs/ru/interfaces/formats.md | 4 +- docs/ru/operations/settings/settings.md | 8 +++- src/Core/Settings.h | 2 +- src/Formats/FormatFactory.cpp | 2 +- src/Formats/FormatSettings.h | 2 +- .../Formats/Impl/CSVRowInputFormat.cpp | 37 +++++++++---------- .../RowInputFormatWithNamesAndTypes.cpp | 4 ++ tests/queries/0_stateless/00301_csv.reference | 10 +++-- tests/queries/0_stateless/00301_csv.sh | 15 +++++--- 11 files changed, 56 insertions(+), 36 deletions(-) diff --git a/docs/en/interfaces/formats.md b/docs/en/interfaces/formats.md index 324930e248f9..950692deb77e 100644 --- a/docs/en/interfaces/formats.md +++ b/docs/en/interfaces/formats.md @@ -470,6 +470,7 @@ The CSV format supports the output of totals and extremes the same way as `TabSe - [input_format_csv_detect_header](/docs/en/operations/settings/settings-formats.md/#input_format_csv_detect_header) - automatically detect header with names and types in CSV format. Default value - `true`. - [input_format_csv_skip_trailing_empty_lines](/docs/en/operations/settings/settings-formats.md/#input_format_csv_skip_trailing_empty_lines) - skip trailing empty lines at the end of data. Default value - `false`. - [input_format_csv_trim_whitespaces](/docs/en/operations/settings/settings-formats.md/#input_format_csv_trim_whitespaces) - trim spaces and tabs in non-quoted CSV strings. Default value - `true`. +- [input_format_csv_ignore_extra_columns](/docs/en/operations/settings/settings-formats.md/#input_format_csv_ignore_extra_columns) - ignore extra colums in CSV input. Default value - `false`. ## CSVWithNames {#csvwithnames} @@ -2062,7 +2063,7 @@ Special format for reading Parquet file metadata (https://parquet.apache.org/doc - logical_type - column logical type - compression - compression used for this column - total_uncompressed_size - total uncompressed bytes size of the column, calculated as the sum of total_uncompressed_size of the column from all row groups - - total_compressed_size - total compressed bytes size of the column, calculated as the sum of total_compressed_size of the column from all row groups + - total_compressed_size - total compressed bytes size of the column, calculated as the sum of total_compressed_size of the column from all row groups - space_saved - percent of space saved by compression, calculated as (1 - total_compressed_size/total_uncompressed_size). - encodings - the list of encodings used for this column - row_groups - the list of row groups metadata with the next structure: diff --git a/docs/en/operations/settings/settings-formats.md b/docs/en/operations/settings/settings-formats.md index 26501f3f3f63..e721c9408e39 100644 --- a/docs/en/operations/settings/settings-formats.md +++ b/docs/en/operations/settings/settings-formats.md @@ -931,6 +931,11 @@ Result ```text " string " ``` +### input_format_csv_ignore_extra_columns {#input_format_csv_ignore_extra_columns} + +Ignore extra colums in CSV input. + +Disabled by default. ## Values format settings {#values-format-settings} diff --git a/docs/ru/interfaces/formats.md b/docs/ru/interfaces/formats.md index 48a6132170a3..8488f4ce55a1 100644 --- a/docs/ru/interfaces/formats.md +++ b/docs/ru/interfaces/formats.md @@ -401,8 +401,8 @@ $ clickhouse-client --format_csv_delimiter="|" --query="INSERT INTO test.csv FOR - [output_format_csv_crlf_end_of_line](../operations/settings/settings.md#output_format_csv_crlf_end_of_line) - если установлено значение true, конец строки в формате вывода CSV будет `\r\n` вместо `\n`. Значение по умолчанию - `false`. - [input_format_csv_skip_first_lines](../operations/settings/settings.md#input_format_csv_skip_first_lines) - пропустить указанное количество строк в начале данных. Значение по умолчанию - `0`. - [input_format_csv_detect_header](../operations/settings/settings.md#input_format_csv_detect_header) - обнаружить заголовок с именами и типами в формате CSV. Значение по умолчанию - `true`. -- [input_format_csv_trim_whitespaces](../operations/settings/settings.md#input_format_csv_trim_whitespaces) - удалить пробелы и символы табуляции из строк без кавычек. -Значение по умолчанию - `true`. +- [input_format_csv_trim_whitespaces](../operations/settings/settings.md#input_format_csv_trim_whitespaces) - удалить пробелы и символы табуляции из строк без кавычек. Значение по умолчанию - `true`. +- [input_format_csv_ignore_extra_columns](../operations/settings/settings.md/#input_format_csv_ignore_extra_columns) - игнорировать дополнительные столбцы. Значение по умолчанию - `false`. ## CSVWithNames {#csvwithnames} diff --git a/docs/ru/operations/settings/settings.md b/docs/ru/operations/settings/settings.md index e3da8302fc8a..33d9300f8e1a 100644 --- a/docs/ru/operations/settings/settings.md +++ b/docs/ru/operations/settings/settings.md @@ -1686,7 +1686,7 @@ SELECT * FROM table_with_enum_column_for_csv_insert; ## input_format_csv_detect_header {#input_format_csv_detect_header} Обнаружить заголовок с именами и типами в формате CSV. - + Значение по умолчанию - `true`. ## input_format_csv_skip_first_lines {#input_format_csv_skip_first_lines} @@ -1727,6 +1727,12 @@ echo ' string ' | ./clickhouse local -q "select * from table FORMAT CSV" --in " string " ``` +## input_format_csv_ignore_extra_columns {#input_format_csv_ignore_extra_columns} + +Игнорировать дополнительные столбцы. + +Выключено по умолчанию. + ## output_format_tsv_crlf_end_of_line {#settings-output-format-tsv-crlf-end-of-line} Использовать в качестве разделителя строк для TSV формата CRLF (DOC/Windows стиль) вместо LF (Unix стиль). diff --git a/src/Core/Settings.h b/src/Core/Settings.h index d38f7767252d..9582419b98c9 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -835,7 +835,6 @@ class IColumn; M(Bool, input_format_import_nested_json, false, "Map nested JSON data to nested tables (it works for JSONEachRow format).", 0) \ M(Bool, input_format_defaults_for_omitted_fields, true, "For input data calculate default expressions for omitted fields (it works for JSONEachRow, -WithNames, -WithNamesAndTypes formats).", IMPORTANT) \ M(Bool, input_format_csv_empty_as_default, true, "Treat empty fields in CSV input as default values.", 0) \ - M(Bool, input_format_csv_ignore_extra_columns, false, "", 0) \ M(Bool, input_format_tsv_empty_as_default, false, "Treat empty fields in TSV input as default values.", 0) \ M(Bool, input_format_tsv_enum_as_number, false, "Treat inserted enum values in TSV formats as enum indices.", 0) \ M(Bool, input_format_null_as_default, true, "Initialize null fields with default values if the data type of this field is not nullable and it is supported by the input format", 0) \ @@ -1001,6 +1000,7 @@ class IColumn; M(Bool, regexp_dict_allow_hyperscan, true, "Allow regexp_tree dictionary using Hyperscan library.", 0) \ \ M(Bool, dictionary_use_async_executor, false, "Execute a pipeline for reading from a dictionary with several threads. It's supported only by DIRECT dictionary with CLICKHOUSE source.", 0) \ + M(Bool, input_format_csv_ignore_extra_columns, false, "Ignore extra colums in CSV input", 0) \ // End of FORMAT_FACTORY_SETTINGS // Please add settings non-related to formats into the COMMON_SETTINGS above. diff --git a/src/Formats/FormatFactory.cpp b/src/Formats/FormatFactory.cpp index 0218d268c519..f29b55f7e736 100644 --- a/src/Formats/FormatFactory.cpp +++ b/src/Formats/FormatFactory.cpp @@ -63,7 +63,6 @@ FormatSettings getFormatSettings(ContextPtr context, const Settings & settings) format_settings.csv.delimiter = settings.format_csv_delimiter; format_settings.csv.tuple_delimiter = settings.format_csv_delimiter; format_settings.csv.empty_as_default = settings.input_format_csv_empty_as_default; - format_settings.csv.ignore_extra_columns = settings.input_format_csv_ignore_extra_columns; format_settings.csv.enum_as_number = settings.input_format_csv_enum_as_number; format_settings.csv.null_representation = settings.format_csv_null_representation; format_settings.csv.arrays_as_nested_csv = settings.input_format_csv_arrays_as_nested_csv; @@ -72,6 +71,7 @@ FormatSettings getFormatSettings(ContextPtr context, const Settings & settings) format_settings.csv.try_detect_header = settings.input_format_csv_detect_header; format_settings.csv.skip_trailing_empty_lines = settings.input_format_csv_skip_trailing_empty_lines; format_settings.csv.trim_whitespaces = settings.input_format_csv_trim_whitespaces; + format_settings.csv.ignore_extra_columns = settings.input_format_csv_ignore_extra_columns; format_settings.hive_text.fields_delimiter = settings.input_format_hive_text_fields_delimiter; format_settings.hive_text.collection_items_delimiter = settings.input_format_hive_text_collection_items_delimiter; format_settings.hive_text.map_keys_delimiter = settings.input_format_hive_text_map_keys_delimiter; diff --git a/src/Formats/FormatSettings.h b/src/Formats/FormatSettings.h index 3bc53140fe54..38148bda3733 100644 --- a/src/Formats/FormatSettings.h +++ b/src/Formats/FormatSettings.h @@ -128,7 +128,6 @@ struct FormatSettings bool allow_single_quotes = true; bool allow_double_quotes = true; bool empty_as_default = false; - bool ignore_extra_columns = false; bool crlf_end_of_line = false; bool enum_as_number = false; bool arrays_as_nested_csv = false; @@ -140,6 +139,7 @@ struct FormatSettings bool try_detect_header = true; bool skip_trailing_empty_lines = false; bool trim_whitespaces = true; + bool ignore_extra_columns = false; } csv; struct HiveText diff --git a/src/Processors/Formats/Impl/CSVRowInputFormat.cpp b/src/Processors/Formats/Impl/CSVRowInputFormat.cpp index 0cc5889b732b..8aaf8fd3e2f8 100644 --- a/src/Processors/Formats/Impl/CSVRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/CSVRowInputFormat.cpp @@ -288,6 +288,8 @@ bool CSVFormatReader::readField( const bool at_delimiter = !buf->eof() && *buf->position() == format_settings.csv.delimiter; const bool at_last_column_line_end = is_last_file_column && (buf->eof() || *buf->position() == '\n' || *buf->position() == '\r'); + bool res = false; + /// Note: Tuples are serialized in CSV as separate columns, but with empty_as_default or null_as_default /// only one empty or NULL column will be expected if (format_settings.csv.empty_as_default && (at_delimiter || at_last_column_line_end)) @@ -299,31 +301,28 @@ bool CSVFormatReader::readField( /// they do not contain empty unquoted fields, so this check /// works for tuples as well. column.insertDefault(); - return false; + } + else if (format_settings.null_as_default && !isNullableOrLowCardinalityNullable(type)) + { + /// If value is null but type is not nullable then use default value instead. + res = SerializationNullable::deserializeTextCSVImpl(column, *buf, format_settings, serialization); + } + else + { + /// Read the column normally. + serialization->deserializeTextCSV(column, *buf, format_settings); + res = true; } - auto skip_all = [&]() + if (is_last_file_column && format_settings.csv.ignore_extra_columns) { - if (!is_last_file_column || !format_settings.csv.ignore_extra_columns) + while (checkChar(format_settings.csv.delimiter, *buf)) { - return; + skipField(); + skipWhitespacesAndTabs(*buf); } - //std::cout << "skip !!!" << std::endl; - buf->position() = find_first_symbols<'\n'>(buf->position(), buf->buffer().end()); - }; - if (format_settings.null_as_default && !isNullableOrLowCardinalityNullable(type)) - { - /// If value is null but type is not nullable then use default value instead. - bool res = SerializationNullable::deserializeTextCSVImpl(column, *buf, format_settings, serialization); - skip_all(); - return res; } - - /// Read the column normally. - serialization->deserializeTextCSV(column, *buf, format_settings); - - skip_all(); - return true; + return res; } void CSVFormatReader::skipPrefixBeforeHeader() diff --git a/src/Processors/Formats/RowInputFormatWithNamesAndTypes.cpp b/src/Processors/Formats/RowInputFormatWithNamesAndTypes.cpp index eaedbbb4a1e5..24bf1d0d5955 100644 --- a/src/Processors/Formats/RowInputFormatWithNamesAndTypes.cpp +++ b/src/Processors/Formats/RowInputFormatWithNamesAndTypes.cpp @@ -212,8 +212,12 @@ bool RowInputFormatWithNamesAndTypes::readRow(MutableColumns & columns, RowReadE format_reader->skipRowStartDelimiter(); ext.read_columns.resize(data_types.size()); + //std::cout << "col size " << column_mapping->column_indexes_for_input_fields.size() << std::endl; for (size_t file_column = 0; file_column < column_mapping->column_indexes_for_input_fields.size(); ++file_column) { + // std::cout << " file_column " << file_column << column_mapping->names_of_columns[file_column] << std::endl; + + const auto & column_index = column_mapping->column_indexes_for_input_fields[file_column]; const bool is_last_file_column = file_column + 1 == column_mapping->column_indexes_for_input_fields.size(); if (column_index) diff --git a/tests/queries/0_stateless/00301_csv.reference b/tests/queries/0_stateless/00301_csv.reference index 61279f3b84a5..3dbe3116beae 100644 --- a/tests/queries/0_stateless/00301_csv.reference +++ b/tests/queries/0_stateless/00301_csv.reference @@ -11,7 +11,9 @@ default-eof 1 2019-06-19 2016-01-01 01:02:03 NUL 2016-01-02 01:02:03 Nhello \N \N -Hello world 1 2016-01-01 -Hello world 2 2016-01-02 -Hello world 3 2016-01-03 -Hello world 4 2016-01-04 +Hello 1 String1 +Hello 2 String2 +Hello 3 String3 +Hello 4 String4 +Hello 5 String5 +Hello 6 String6 diff --git a/tests/queries/0_stateless/00301_csv.sh b/tests/queries/0_stateless/00301_csv.sh index e99c39a0f6f0..fafe75f6f63f 100755 --- a/tests/queries/0_stateless/00301_csv.sh +++ b/tests/queries/0_stateless/00301_csv.sh @@ -39,11 +39,14 @@ $CLICKHOUSE_CLIENT --query="SELECT * FROM csv ORDER BY s NULLS LAST"; $CLICKHOUSE_CLIENT --query="DROP TABLE csv"; -$CLICKHOUSE_CLIENT --query="CREATE TABLE csv (s String, n UInt64 DEFAULT 1, d Date DEFAULT '2019-06-19') ENGINE = Memory"; - -echo 'Hello world, 1, 2016-01-01 -Hello world, 2 ,2016-01-02, -Hello world, 3 ,2016-01-03, 2016-01-13 -Hello world, 4 ,2016-01-04, 2016-01-14, 2016-01-15' | $CLICKHOUSE_CLIENT --input_format_csv_empty_as_default=1 --input_format_csv_ignore_extra_columns=1 --query="INSERT INTO csv FORMAT CSV"; +$CLICKHOUSE_CLIENT --query="CREATE TABLE csv (s String, n UInt64 DEFAULT 3, d String DEFAULT 'String4') ENGINE = Memory"; + +echo 'Hello, 1, String1 +Hello, 2, String2, +Hello, 3, String3, 2016-01-13 +Hello, 4, , 2016-01-14 +Hello, 5, String5, 2016-01-15, 2016-01-16 +Hello, 6, String6, "line with a +break"' | $CLICKHOUSE_CLIENT --input_format_defaults_for_omitted_fields=1 --input_format_csv_empty_as_default=1 --input_format_csv_ignore_extra_columns=1 --query="INSERT INTO csv FORMAT CSV"; $CLICKHOUSE_CLIENT --query="SELECT * FROM csv ORDER BY s, n"; $CLICKHOUSE_CLIENT --query="DROP TABLE csv"; \ No newline at end of file From 806176d88e0b4237c16e23aed27179ed93aa17c1 Mon Sep 17 00:00:00 2001 From: Dmitry Kardymon Date: Thu, 15 Jun 2023 11:23:08 +0000 Subject: [PATCH 025/179] Add input_format_csv_missing_as_default setting and tests --- docs/en/interfaces/formats.md | 3 ++- .../operations/settings/settings-formats.md | 8 +++++++- docs/ru/interfaces/formats.md | 3 ++- docs/ru/operations/settings/settings.md | 8 +++++++- src/Core/Settings.h | 3 ++- src/Dictionaries/CacheDictionary.cpp | 2 +- src/Formats/FormatFactory.cpp | 1 + src/Formats/FormatSettings.h | 1 + .../Formats/Impl/CSVRowInputFormat.cpp | 18 +++++++++++++++++- .../Formats/Impl/CSVRowInputFormat.h | 1 + .../RowInputFormatWithNamesAndTypes.cpp | 4 ---- tests/queries/0_stateless/00301_csv.reference | 10 ++++++++++ tests/queries/0_stateless/00301_csv.sh | 19 +++++++++++++++++-- 13 files changed, 68 insertions(+), 13 deletions(-) diff --git a/docs/en/interfaces/formats.md b/docs/en/interfaces/formats.md index 950692deb77e..e0b0fcfabd51 100644 --- a/docs/en/interfaces/formats.md +++ b/docs/en/interfaces/formats.md @@ -470,7 +470,8 @@ The CSV format supports the output of totals and extremes the same way as `TabSe - [input_format_csv_detect_header](/docs/en/operations/settings/settings-formats.md/#input_format_csv_detect_header) - automatically detect header with names and types in CSV format. Default value - `true`. - [input_format_csv_skip_trailing_empty_lines](/docs/en/operations/settings/settings-formats.md/#input_format_csv_skip_trailing_empty_lines) - skip trailing empty lines at the end of data. Default value - `false`. - [input_format_csv_trim_whitespaces](/docs/en/operations/settings/settings-formats.md/#input_format_csv_trim_whitespaces) - trim spaces and tabs in non-quoted CSV strings. Default value - `true`. -- [input_format_csv_ignore_extra_columns](/docs/en/operations/settings/settings-formats.md/#input_format_csv_ignore_extra_columns) - ignore extra colums in CSV input. Default value - `false`. +- [input_format_csv_ignore_extra_columns](/docs/en/operations/settings/settings-formats.md/#input_format_csv_ignore_extra_columns) - ignore extra columns in CSV input (if your file has more columns than expected). Default value - `false`. +- [input_format_csv_missing_as_default](/docs/en/operations/settings/settings-formats.md/#input_format_csv_missing_as_default) - treat missing fields in CSV input as default values. Default value - `false`. ## CSVWithNames {#csvwithnames} diff --git a/docs/en/operations/settings/settings-formats.md b/docs/en/operations/settings/settings-formats.md index e721c9408e39..6d9a1fb51608 100644 --- a/docs/en/operations/settings/settings-formats.md +++ b/docs/en/operations/settings/settings-formats.md @@ -933,7 +933,13 @@ Result ``` ### input_format_csv_ignore_extra_columns {#input_format_csv_ignore_extra_columns} -Ignore extra colums in CSV input. +Ignore extra columns in CSV input (if your file has more columns than expected). + +Disabled by default. + +### input_format_csv_missing_as_default {#input_format_csv_missing_as_default} + +Treat missing fields in CSV input as default values. Disabled by default. diff --git a/docs/ru/interfaces/formats.md b/docs/ru/interfaces/formats.md index 8488f4ce55a1..7e3bb3f7d266 100644 --- a/docs/ru/interfaces/formats.md +++ b/docs/ru/interfaces/formats.md @@ -402,7 +402,8 @@ $ clickhouse-client --format_csv_delimiter="|" --query="INSERT INTO test.csv FOR - [input_format_csv_skip_first_lines](../operations/settings/settings.md#input_format_csv_skip_first_lines) - пропустить указанное количество строк в начале данных. Значение по умолчанию - `0`. - [input_format_csv_detect_header](../operations/settings/settings.md#input_format_csv_detect_header) - обнаружить заголовок с именами и типами в формате CSV. Значение по умолчанию - `true`. - [input_format_csv_trim_whitespaces](../operations/settings/settings.md#input_format_csv_trim_whitespaces) - удалить пробелы и символы табуляции из строк без кавычек. Значение по умолчанию - `true`. -- [input_format_csv_ignore_extra_columns](../operations/settings/settings.md/#input_format_csv_ignore_extra_columns) - игнорировать дополнительные столбцы. Значение по умолчанию - `false`. +- [input_format_csv_ignore_extra_columns](../operations/settings/settings.md/#input_format_csv_ignore_extra_columns) - игнорировать дополнительные столбцы (если файл содержит больше столбцов чем ожидается). Значение по умолчанию - `false`. +- [input_format_csv_missing_as_default](../operations/settings/settings.md/#input_format_csv_missing_as_default) - рассматривать отсутствующие поля в CSV в качестве значений по умолчанию. Значение по умолчанию - `false`. ## CSVWithNames {#csvwithnames} diff --git a/docs/ru/operations/settings/settings.md b/docs/ru/operations/settings/settings.md index 33d9300f8e1a..61cfc332585a 100644 --- a/docs/ru/operations/settings/settings.md +++ b/docs/ru/operations/settings/settings.md @@ -1729,7 +1729,13 @@ echo ' string ' | ./clickhouse local -q "select * from table FORMAT CSV" --in ## input_format_csv_ignore_extra_columns {#input_format_csv_ignore_extra_columns} -Игнорировать дополнительные столбцы. +Игнорировать дополнительные столбцы (если файл содержит больше столбцов чем ожидается). + +Выключено по умолчанию. + +## input_format_csv_missing_as_default {#input_format_csv_missing_as_default} + +Рассматривать отсутствующие поля в CSV в качестве значений по умолчанию. Выключено по умолчанию. diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 9582419b98c9..ce7c28996e81 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -1000,7 +1000,8 @@ class IColumn; M(Bool, regexp_dict_allow_hyperscan, true, "Allow regexp_tree dictionary using Hyperscan library.", 0) \ \ M(Bool, dictionary_use_async_executor, false, "Execute a pipeline for reading from a dictionary with several threads. It's supported only by DIRECT dictionary with CLICKHOUSE source.", 0) \ - M(Bool, input_format_csv_ignore_extra_columns, false, "Ignore extra colums in CSV input", 0) \ + M(Bool, input_format_csv_ignore_extra_columns, false, "Ignore extra columns in CSV input (if your file has more columns than expected)", 0) \ + M(Bool, input_format_csv_missing_as_default, false, "Treat missing fields in CSV input as default values", 0) \ // End of FORMAT_FACTORY_SETTINGS // Please add settings non-related to formats into the COMMON_SETTINGS above. diff --git a/src/Dictionaries/CacheDictionary.cpp b/src/Dictionaries/CacheDictionary.cpp index c5c88a9f142a..359f7c174369 100644 --- a/src/Dictionaries/CacheDictionary.cpp +++ b/src/Dictionaries/CacheDictionary.cpp @@ -138,7 +138,7 @@ Columns CacheDictionary::getColumns( const Columns & default_values_columns) const { /** - * Flow of getColumsImpl + * Flow of getColumnsImpl * 1. Get fetch result from storage * 2. If all keys are found in storage and not expired * 2.1. If storage returns fetched columns in order of keys then result is returned to client. diff --git a/src/Formats/FormatFactory.cpp b/src/Formats/FormatFactory.cpp index f29b55f7e736..102b5d7eec06 100644 --- a/src/Formats/FormatFactory.cpp +++ b/src/Formats/FormatFactory.cpp @@ -72,6 +72,7 @@ FormatSettings getFormatSettings(ContextPtr context, const Settings & settings) format_settings.csv.skip_trailing_empty_lines = settings.input_format_csv_skip_trailing_empty_lines; format_settings.csv.trim_whitespaces = settings.input_format_csv_trim_whitespaces; format_settings.csv.ignore_extra_columns = settings.input_format_csv_ignore_extra_columns; + format_settings.csv.missing_as_default = settings.input_format_csv_missing_as_default; format_settings.hive_text.fields_delimiter = settings.input_format_hive_text_fields_delimiter; format_settings.hive_text.collection_items_delimiter = settings.input_format_hive_text_collection_items_delimiter; format_settings.hive_text.map_keys_delimiter = settings.input_format_hive_text_map_keys_delimiter; diff --git a/src/Formats/FormatSettings.h b/src/Formats/FormatSettings.h index 38148bda3733..2b52d88184c3 100644 --- a/src/Formats/FormatSettings.h +++ b/src/Formats/FormatSettings.h @@ -140,6 +140,7 @@ struct FormatSettings bool skip_trailing_empty_lines = false; bool trim_whitespaces = true; bool ignore_extra_columns = false; + bool missing_as_default = false; } csv; struct HiveText diff --git a/src/Processors/Formats/Impl/CSVRowInputFormat.cpp b/src/Processors/Formats/Impl/CSVRowInputFormat.cpp index 8aaf8fd3e2f8..dcc057baef2c 100644 --- a/src/Processors/Formats/Impl/CSVRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/CSVRowInputFormat.cpp @@ -147,7 +147,18 @@ CSVFormatReader::CSVFormatReader(PeekableReadBuffer & buf_, const FormatSettings void CSVFormatReader::skipFieldDelimiter() { skipWhitespacesAndTabs(*buf); - assertChar(format_settings.csv.delimiter, *buf); + + bool res = checkChar(format_settings.csv.delimiter, *buf); + if (!res && !format_settings.csv.missing_as_default) + { + char err[2] = {format_settings.csv.delimiter, '\0'}; + throwAtAssertionFailed(err, *buf); + } + + if (!res && format_settings.csv.missing_as_default) + { + current_row_has_missing_fields = true; + } } template @@ -187,6 +198,7 @@ void CSVFormatReader::skipRowEndDelimiter() return; skipEndOfLine(*buf); + current_row_has_missing_fields = false; } void CSVFormatReader::skipHeaderRow() @@ -302,6 +314,10 @@ bool CSVFormatReader::readField( /// works for tuples as well. column.insertDefault(); } + else if (current_row_has_missing_fields) + { + column.insertDefault(); + } else if (format_settings.null_as_default && !isNullableOrLowCardinalityNullable(type)) { /// If value is null but type is not nullable then use default value instead. diff --git a/src/Processors/Formats/Impl/CSVRowInputFormat.h b/src/Processors/Formats/Impl/CSVRowInputFormat.h index 0c8099a216c2..3958c66bbc63 100644 --- a/src/Processors/Formats/Impl/CSVRowInputFormat.h +++ b/src/Processors/Formats/Impl/CSVRowInputFormat.h @@ -89,6 +89,7 @@ class CSVFormatReader : public FormatWithNamesAndTypesReader protected: PeekableReadBuffer * buf; + bool current_row_has_missing_fields = false; }; class CSVSchemaReader : public FormatWithNamesAndTypesSchemaReader diff --git a/src/Processors/Formats/RowInputFormatWithNamesAndTypes.cpp b/src/Processors/Formats/RowInputFormatWithNamesAndTypes.cpp index 24bf1d0d5955..eaedbbb4a1e5 100644 --- a/src/Processors/Formats/RowInputFormatWithNamesAndTypes.cpp +++ b/src/Processors/Formats/RowInputFormatWithNamesAndTypes.cpp @@ -212,12 +212,8 @@ bool RowInputFormatWithNamesAndTypes::readRow(MutableColumns & columns, RowReadE format_reader->skipRowStartDelimiter(); ext.read_columns.resize(data_types.size()); - //std::cout << "col size " << column_mapping->column_indexes_for_input_fields.size() << std::endl; for (size_t file_column = 0; file_column < column_mapping->column_indexes_for_input_fields.size(); ++file_column) { - // std::cout << " file_column " << file_column << column_mapping->names_of_columns[file_column] << std::endl; - - const auto & column_index = column_mapping->column_indexes_for_input_fields[file_column]; const bool is_last_file_column = file_column + 1 == column_mapping->column_indexes_for_input_fields.size(); if (column_index) diff --git a/tests/queries/0_stateless/00301_csv.reference b/tests/queries/0_stateless/00301_csv.reference index 3dbe3116beae..fa85fd924e14 100644 --- a/tests/queries/0_stateless/00301_csv.reference +++ b/tests/queries/0_stateless/00301_csv.reference @@ -1,19 +1,29 @@ +=== Test input_format_csv_empty_as_default Hello, world 123 2016-01-01 Hello, "world" 456 2016-01-02 Hello "world" 789 2016-01-03 Hello\n world 100 2016-01-04 default 1 2019-06-19 default-eof 1 2019-06-19 +=== Test datetime 2016-01-01 01:02:03 1 2016-01-02 01:02:03 2 2017-08-15 13:15:01 3 1970-01-02 05:46:39 4 +=== Test nullable datetime 2016-01-01 01:02:03 NUL 2016-01-02 01:02:03 Nhello \N \N +=== Test input_format_csv_ignore_extra_columns Hello 1 String1 Hello 2 String2 Hello 3 String3 Hello 4 String4 Hello 5 String5 Hello 6 String6 +=== Test input_format_csv_missing_as_default +Hello 0 33 \N 55 Default +Hello 0 33 \N 55 Default +Hello 1 2 \N 55 Default +Hello 1 2 3 4 String +Hello 1 2 3 4 String diff --git a/tests/queries/0_stateless/00301_csv.sh b/tests/queries/0_stateless/00301_csv.sh index fafe75f6f63f..887a75b0dedf 100755 --- a/tests/queries/0_stateless/00301_csv.sh +++ b/tests/queries/0_stateless/00301_csv.sh @@ -4,6 +4,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh +echo === Test input_format_csv_empty_as_default $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS csv"; $CLICKHOUSE_CLIENT --query="CREATE TABLE csv (s String, n UInt64 DEFAULT 1, d Date DEFAULT '2019-06-19') ENGINE = Memory"; @@ -18,6 +19,7 @@ Hello "world", 789 ,2016-01-03 $CLICKHOUSE_CLIENT --query="SELECT * FROM csv ORDER BY d, s"; $CLICKHOUSE_CLIENT --query="DROP TABLE csv"; +echo === Test datetime $CLICKHOUSE_CLIENT --query="CREATE TABLE csv (t DateTime('Asia/Istanbul'), s String) ENGINE = Memory"; echo '"2016-01-01 01:02:03","1" @@ -28,7 +30,7 @@ echo '"2016-01-01 01:02:03","1" $CLICKHOUSE_CLIENT --query="SELECT * FROM csv ORDER BY s"; $CLICKHOUSE_CLIENT --query="DROP TABLE csv"; - +echo === Test nullable datetime $CLICKHOUSE_CLIENT --query="CREATE TABLE csv (t Nullable(DateTime('Asia/Istanbul')), s Nullable(String)) ENGINE = Memory"; echo 'NULL, NULL @@ -39,6 +41,7 @@ $CLICKHOUSE_CLIENT --query="SELECT * FROM csv ORDER BY s NULLS LAST"; $CLICKHOUSE_CLIENT --query="DROP TABLE csv"; +echo === Test input_format_csv_ignore_extra_columns $CLICKHOUSE_CLIENT --query="CREATE TABLE csv (s String, n UInt64 DEFAULT 3, d String DEFAULT 'String4') ENGINE = Memory"; echo 'Hello, 1, String1 @@ -49,4 +52,16 @@ Hello, 5, String5, 2016-01-15, 2016-01-16 Hello, 6, String6, "line with a break"' | $CLICKHOUSE_CLIENT --input_format_defaults_for_omitted_fields=1 --input_format_csv_empty_as_default=1 --input_format_csv_ignore_extra_columns=1 --query="INSERT INTO csv FORMAT CSV"; $CLICKHOUSE_CLIENT --query="SELECT * FROM csv ORDER BY s, n"; -$CLICKHOUSE_CLIENT --query="DROP TABLE csv"; \ No newline at end of file +$CLICKHOUSE_CLIENT --query="DROP TABLE csv"; + + +echo === Test input_format_csv_missing_as_default +$CLICKHOUSE_CLIENT --query="CREATE TABLE csv (f1 String, f2 UInt64, f3 UInt64 Default 33, f4 Nullable(UInt64), f5 Nullable(UInt64) Default 55, f6 String DEFAULT 'Default') ENGINE = Memory"; + +echo 'Hello +Hello, +Hello, 1, 2 +Hello, 1, 2, 3, 4, String +Hello, 1, 2, 3, 4, String,'| $CLICKHOUSE_CLIENT --input_format_defaults_for_omitted_fields=1 --input_format_csv_empty_as_default=1 --input_format_csv_missing_as_default=1 --query="INSERT INTO csv FORMAT CSV"; +$CLICKHOUSE_CLIENT --query="SELECT * FROM csv ORDER BY f1, f2, f3, f4 NULLS FIRST, f5, f6"; +$CLICKHOUSE_CLIENT --query="DROP TABLE csv"; From 0eeee11dc46d462412ad671a7d59006fba59c403 Mon Sep 17 00:00:00 2001 From: Dmitry Kardymon Date: Thu, 15 Jun 2023 12:36:18 +0000 Subject: [PATCH 026/179] Style fix, add comment --- .../Formats/Impl/CSVRowInputFormat.cpp | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/src/Processors/Formats/Impl/CSVRowInputFormat.cpp b/src/Processors/Formats/Impl/CSVRowInputFormat.cpp index dcc057baef2c..7cd812bc5b06 100644 --- a/src/Processors/Formats/Impl/CSVRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/CSVRowInputFormat.cpp @@ -149,15 +149,17 @@ void CSVFormatReader::skipFieldDelimiter() skipWhitespacesAndTabs(*buf); bool res = checkChar(format_settings.csv.delimiter, *buf); - if (!res && !format_settings.csv.missing_as_default) + if (!res) { - char err[2] = {format_settings.csv.delimiter, '\0'}; - throwAtAssertionFailed(err, *buf); - } - - if (!res && format_settings.csv.missing_as_default) - { - current_row_has_missing_fields = true; + if (!format_settings.csv.missing_as_default) + { + char err[2] = {format_settings.csv.delimiter, '\0'}; + throwAtAssertionFailed(err, *buf); + } + else + { + current_row_has_missing_fields = true; + } } } @@ -332,6 +334,7 @@ bool CSVFormatReader::readField( if (is_last_file_column && format_settings.csv.ignore_extra_columns) { + // Skip all fields to next line. while (checkChar(format_settings.csv.delimiter, *buf)) { skipField(); From b546d8e665b86429ac44770db7d73dd32b0a7156 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Fri, 16 Jun 2023 15:30:56 +0200 Subject: [PATCH 027/179] review fixes + test --- src/Common/ProfileEvents.cpp | 2 ++ src/Disks/IO/ReadBufferFromRemoteFSGather.h | 2 +- src/IO/ReadBufferFromS3.cpp | 34 +++++++++++++------ src/IO/ReadBufferFromS3.h | 2 ++ src/IO/S3/PocoHTTPClient.cpp | 1 + ...ing_from_s3_with_connection_pool.reference | 1 + ...89_reading_from_s3_with_connection_pool.sh | 29 ++++++++++++++++ 7 files changed, 60 insertions(+), 11 deletions(-) create mode 100644 tests/queries/0_stateless/02789_reading_from_s3_with_connection_pool.reference create mode 100755 tests/queries/0_stateless/02789_reading_from_s3_with_connection_pool.sh diff --git a/src/Common/ProfileEvents.cpp b/src/Common/ProfileEvents.cpp index f66f7bc64655..c9030070bf2f 100644 --- a/src/Common/ProfileEvents.cpp +++ b/src/Common/ProfileEvents.cpp @@ -368,6 +368,8 @@ The server successfully detected this situation and will download merged part fr M(ReadBufferFromS3InitMicroseconds, "Time spent initializing connection to S3.") \ M(ReadBufferFromS3Bytes, "Bytes read from S3.") \ M(ReadBufferFromS3RequestsErrors, "Number of exceptions while reading from S3.") \ + M(ReadBufferFromS3ResetSessions, "Number of HTTP sessions that were reset in ReadBufferFromS3.") \ + M(ReadBufferFromS3PreservedSessions, "Number of HTTP sessions that were preserved in ReadBufferFromS3.") \ \ M(WriteBufferFromS3Microseconds, "Time spent on writing to S3.") \ M(WriteBufferFromS3Bytes, "Bytes written to S3.") \ diff --git a/src/Disks/IO/ReadBufferFromRemoteFSGather.h b/src/Disks/IO/ReadBufferFromRemoteFSGather.h index 9bf55ab69ce9..6488d532829e 100644 --- a/src/Disks/IO/ReadBufferFromRemoteFSGather.h +++ b/src/Disks/IO/ReadBufferFromRemoteFSGather.h @@ -73,7 +73,7 @@ friend class ReadIndirectBufferFromRemoteFS; const std::shared_ptr cache_log; const String query_id; const bool use_external_buffer; - bool with_cache; + const bool with_cache; size_t read_until_position = 0; size_t file_offset_of_buffer_end = 0; diff --git a/src/IO/ReadBufferFromS3.cpp b/src/IO/ReadBufferFromS3.cpp index 364253ba7466..0b320ed86ff5 100644 --- a/src/IO/ReadBufferFromS3.cpp +++ b/src/IO/ReadBufferFromS3.cpp @@ -1,5 +1,7 @@ -#include "config.h" #include +#include +#include +#include "config.h" #if USE_AWS_S3 @@ -24,6 +26,8 @@ namespace ProfileEvents extern const Event ReadBufferFromS3InitMicroseconds; extern const Event ReadBufferFromS3Bytes; extern const Event ReadBufferFromS3RequestsErrors; + extern const Event ReadBufferFromS3ResetSessions; + extern const Event ReadBufferFromS3PreservedSessions; extern const Event ReadBufferSeekCancelConnection; extern const Event S3GetObject; extern const Event DiskS3GetObject; @@ -46,6 +50,19 @@ void resetSession(Aws::S3::Model::GetObjectResult & read_result) throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "Session of unexpected type encountered"); } } + +void resetSessionIfNeeded(bool read_all_range_successfully, std::optional & read_result) +{ + if (!read_all_range_successfully && read_result) + { + /// When we abandon a session with an ongoing GetObject request and there is another one trying to delete the same object this delete + /// operation will hang until GetObject's session idle timeouts. So we have to call `reset()` on GetObject's session session immediately. + resetSession(*read_result); + ProfileEvents::increment(ProfileEvents::ReadBufferFromS3ResetSessions); + } + else + ProfileEvents::increment(ProfileEvents::ReadBufferFromS3PreservedSessions); +} } namespace DB @@ -91,10 +108,7 @@ bool ReadBufferFromS3::nextImpl() if (read_until_position) { if (read_until_position == offset) - { - read_all_range_successfully = true; return false; - } if (read_until_position < offset) throw Exception(ErrorCodes::LOGICAL_ERROR, "Attempt to read beyond right offset ({} > {})", offset, read_until_position - 1); @@ -384,10 +398,7 @@ ReadBufferFromS3::~ReadBufferFromS3() { try { - if (!read_all_range_successfully && read_result) - /// When we abandon a session with an ongoing GetObject request and there is another one trying to delete the same object this delete - /// operation will hang until GetObject's session idle timeouts. So we have to call `reset()` on GetObject's session session immediately. - resetSession(*read_result); + resetSessionIfNeeded(readAllRangeSuccessfully(), read_result); } catch (...) { @@ -397,8 +408,7 @@ ReadBufferFromS3::~ReadBufferFromS3() std::unique_ptr ReadBufferFromS3::initialize() { - if (!read_all_range_successfully && read_result) - resetSession(*read_result); + resetSessionIfNeeded(readAllRangeSuccessfully(), read_result); read_all_range_successfully = false; /** @@ -463,6 +473,10 @@ Aws::S3::Model::GetObjectResult ReadBufferFromS3::sendRequest(size_t range_begin } } +bool ReadBufferFromS3::readAllRangeSuccessfully() const +{ + return read_until_position ? offset == read_until_position : read_all_range_successfully; +} } #endif diff --git a/src/IO/ReadBufferFromS3.h b/src/IO/ReadBufferFromS3.h index 11299aa2c2a8..d58971bea5b0 100644 --- a/src/IO/ReadBufferFromS3.h +++ b/src/IO/ReadBufferFromS3.h @@ -95,6 +95,8 @@ class ReadBufferFromS3 : public ReadBufferFromFileBase Aws::S3::Model::GetObjectResult sendRequest(size_t range_begin, std::optional range_end_incl) const; + bool readAllRangeSuccessfully() const; + ReadSettings read_settings; bool use_external_buffer; diff --git a/src/IO/S3/PocoHTTPClient.cpp b/src/IO/S3/PocoHTTPClient.cpp index 754b1bfd5b80..d64ddf0ec385 100644 --- a/src/IO/S3/PocoHTTPClient.cpp +++ b/src/IO/S3/PocoHTTPClient.cpp @@ -261,6 +261,7 @@ void PocoHTTPClient::makeRequestInternal( Aws::Utils::RateLimits::RateLimiterInterface * readLimiter , Aws::Utils::RateLimits::RateLimiterInterface * writeLimiter) const { + /// Most sessions in pool are already connected and it is not possible to set proxy host/port to a connected session. const auto request_configuration = per_request_configuration(request); if (http_connection_pool_size && request_configuration.proxy_host.empty()) makeRequestInternalImpl(request, request_configuration, response, readLimiter, writeLimiter); diff --git a/tests/queries/0_stateless/02789_reading_from_s3_with_connection_pool.reference b/tests/queries/0_stateless/02789_reading_from_s3_with_connection_pool.reference new file mode 100644 index 000000000000..d00491fd7e5b --- /dev/null +++ b/tests/queries/0_stateless/02789_reading_from_s3_with_connection_pool.reference @@ -0,0 +1 @@ +1 diff --git a/tests/queries/0_stateless/02789_reading_from_s3_with_connection_pool.sh b/tests/queries/0_stateless/02789_reading_from_s3_with_connection_pool.sh new file mode 100755 index 000000000000..7a8b94a10a89 --- /dev/null +++ b/tests/queries/0_stateless/02789_reading_from_s3_with_connection_pool.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env bash +# Tags: no-fasttest, no-random-settings + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +${CLICKHOUSE_CLIENT} -nm --query " +DROP TABLE IF EXISTS test_s3; + +CREATE TABLE test_s3 (a UInt64, b UInt64) +ENGINE = MergeTree ORDER BY a +SETTINGS disk = 's3_disk', min_bytes_for_wide_part = 0; + +INSERT INTO test_s3 SELECT number, number FROM numbers_mt(1e7); +" +query="SELECT a, b FROM test_s3" +query_id=$(${CLICKHOUSE_CLIENT} --query "select queryID() from ($query) limit 1" 2>&1) +${CLICKHOUSE_CLIENT} --query "SYSTEM FLUSH LOGS" +${CLICKHOUSE_CLIENT} -nm --query " +WITH + ProfileEvents['ReadBufferFromS3ResetSessions'] AS reset, + ProfileEvents['ReadBufferFromS3PreservedSessions'] AS preserved +SELECT preserved > reset +FROM system.query_log +WHERE type = 'QueryFinish' + AND current_database = currentDatabase() + AND query_id='$query_id'; +" From dd43a186adca8b4480e9287127c740f6b05d743d Mon Sep 17 00:00:00 2001 From: Dmitry Kardymon Date: Mon, 19 Jun 2023 09:51:29 +0000 Subject: [PATCH 028/179] Minor edit docs / add int256 test --- docs/en/interfaces/formats.md | 2 +- docs/en/operations/settings/settings-formats.md | 2 +- src/Core/Settings.h | 2 +- src/Processors/Formats/Impl/CSVRowInputFormat.cpp | 2 +- tests/queries/0_stateless/00301_csv.reference | 10 +++++----- tests/queries/0_stateless/00301_csv.sh | 10 +++++----- 6 files changed, 14 insertions(+), 14 deletions(-) diff --git a/docs/en/interfaces/formats.md b/docs/en/interfaces/formats.md index 7a900ecd8696..c5cd19f1743e 100644 --- a/docs/en/interfaces/formats.md +++ b/docs/en/interfaces/formats.md @@ -471,7 +471,7 @@ The CSV format supports the output of totals and extremes the same way as `TabSe - [input_format_csv_skip_trailing_empty_lines](/docs/en/operations/settings/settings-formats.md/#input_format_csv_skip_trailing_empty_lines) - skip trailing empty lines at the end of data. Default value - `false`. - [input_format_csv_trim_whitespaces](/docs/en/operations/settings/settings-formats.md/#input_format_csv_trim_whitespaces) - trim spaces and tabs in non-quoted CSV strings. Default value - `true`. - [input_format_csv_allow_whitespace_or_tab_as_delimiter](/docs/en/operations/settings/settings-formats.md/# input_format_csv_allow_whitespace_or_tab_as_delimiter) - Allow to use whitespace or tab as field delimiter in CSV strings. Default value - `false`. -- [input_format_csv_ignore_extra_columns](/docs/en/operations/settings/settings-formats.md/#input_format_csv_ignore_extra_columns) - ignore extra columns in CSV input (if your file has more columns than expected). Default value - `false`. +- [input_format_csv_ignore_extra_columns](/docs/en/operations/settings/settings-formats.md/#input_format_csv_ignore_extra_columns) - ignore extra columns in CSV input (if file has more columns than expected). Default value - `false`. - [input_format_csv_missing_as_default](/docs/en/operations/settings/settings-formats.md/#input_format_csv_missing_as_default) - treat missing fields in CSV input as default values. Default value - `false`. ## CSVWithNames {#csvwithnames} diff --git a/docs/en/operations/settings/settings-formats.md b/docs/en/operations/settings/settings-formats.md index c17a24abccf3..6b05f41666cc 100644 --- a/docs/en/operations/settings/settings-formats.md +++ b/docs/en/operations/settings/settings-formats.md @@ -933,7 +933,7 @@ Result ``` ### input_format_csv_ignore_extra_columns {#input_format_csv_ignore_extra_columns} -Ignore extra columns in CSV input (if your file has more columns than expected). +Ignore extra columns in CSV input (if file has more columns than expected). Disabled by default. diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 76bb7ae92062..e60d2df933f6 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -1005,7 +1005,7 @@ class IColumn; M(Bool, regexp_dict_allow_hyperscan, true, "Allow regexp_tree dictionary using Hyperscan library.", 0) \ \ M(Bool, dictionary_use_async_executor, false, "Execute a pipeline for reading from a dictionary with several threads. It's supported only by DIRECT dictionary with CLICKHOUSE source.", 0) \ - M(Bool, input_format_csv_ignore_extra_columns, false, "Ignore extra columns in CSV input (if your file has more columns than expected)", 0) \ + M(Bool, input_format_csv_ignore_extra_columns, false, "Ignore extra columns in CSV input (if file has more columns than expected)", 0) \ M(Bool, input_format_csv_missing_as_default, false, "Treat missing fields in CSV input as default values", 0) \ // End of FORMAT_FACTORY_SETTINGS diff --git a/src/Processors/Formats/Impl/CSVRowInputFormat.cpp b/src/Processors/Formats/Impl/CSVRowInputFormat.cpp index c80887bde0aa..a727a5bc4902 100644 --- a/src/Processors/Formats/Impl/CSVRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/CSVRowInputFormat.cpp @@ -346,7 +346,7 @@ bool CSVFormatReader::readField( while (checkChar(format_settings.csv.delimiter, *buf)) { skipField(); - skipWhitespacesAndTabs(*buf); + skipWhitespacesAndTabs(*buf, format_settings.csv.allow_whitespace_or_tab_as_delimiter); } } return res; diff --git a/tests/queries/0_stateless/00301_csv.reference b/tests/queries/0_stateless/00301_csv.reference index fa85fd924e14..e3441a3a4b3d 100644 --- a/tests/queries/0_stateless/00301_csv.reference +++ b/tests/queries/0_stateless/00301_csv.reference @@ -22,8 +22,8 @@ Hello 4 String4 Hello 5 String5 Hello 6 String6 === Test input_format_csv_missing_as_default -Hello 0 33 \N 55 Default -Hello 0 33 \N 55 Default -Hello 1 2 \N 55 Default -Hello 1 2 3 4 String -Hello 1 2 3 4 String +Hello 0 0 33 \N 55 Default +Hello 0 0 33 \N 55 Default +Hello 1 3 2 \N 55 Default +Hello 1 4 2 3 4 String +Hello 1 5 2 3 4 String diff --git a/tests/queries/0_stateless/00301_csv.sh b/tests/queries/0_stateless/00301_csv.sh index 887a75b0dedf..4555e0476d86 100755 --- a/tests/queries/0_stateless/00301_csv.sh +++ b/tests/queries/0_stateless/00301_csv.sh @@ -56,12 +56,12 @@ $CLICKHOUSE_CLIENT --query="DROP TABLE csv"; echo === Test input_format_csv_missing_as_default -$CLICKHOUSE_CLIENT --query="CREATE TABLE csv (f1 String, f2 UInt64, f3 UInt64 Default 33, f4 Nullable(UInt64), f5 Nullable(UInt64) Default 55, f6 String DEFAULT 'Default') ENGINE = Memory"; +$CLICKHOUSE_CLIENT --query="CREATE TABLE csv (f1 String, f2 UInt64, f3 UInt256, f4 UInt64 Default 33, f5 Nullable(UInt64), f6 Nullable(UInt64) Default 55, f7 String DEFAULT 'Default') ENGINE = Memory"; echo 'Hello Hello, -Hello, 1, 2 -Hello, 1, 2, 3, 4, String -Hello, 1, 2, 3, 4, String,'| $CLICKHOUSE_CLIENT --input_format_defaults_for_omitted_fields=1 --input_format_csv_empty_as_default=1 --input_format_csv_missing_as_default=1 --query="INSERT INTO csv FORMAT CSV"; -$CLICKHOUSE_CLIENT --query="SELECT * FROM csv ORDER BY f1, f2, f3, f4 NULLS FIRST, f5, f6"; +Hello, 1, 3, 2 +Hello, 1, 4, 2, 3, 4, String +Hello, 1, 5, 2, 3, 4, String,'| $CLICKHOUSE_CLIENT --input_format_defaults_for_omitted_fields=1 --input_format_csv_missing_as_default=1 --query="INSERT INTO csv FORMAT CSV"; +$CLICKHOUSE_CLIENT --query="SELECT * FROM csv ORDER BY f1, f2, f3, f4, f5 NULLS FIRST, f6, f7"; $CLICKHOUSE_CLIENT --query="DROP TABLE csv"; From f81401db99e194c4a8d231a38918da53bd221e90 Mon Sep 17 00:00:00 2001 From: Dmitry Kardymon Date: Mon, 19 Jun 2023 10:48:38 +0000 Subject: [PATCH 029/179] Add empty line test --- src/Processors/Formats/Impl/CSVRowInputFormat.h | 2 ++ tests/queries/0_stateless/00301_csv.reference | 2 ++ tests/queries/0_stateless/00301_csv.sh | 4 +++- 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/src/Processors/Formats/Impl/CSVRowInputFormat.h b/src/Processors/Formats/Impl/CSVRowInputFormat.h index 3958c66bbc63..82e03c453e78 100644 --- a/src/Processors/Formats/Impl/CSVRowInputFormat.h +++ b/src/Processors/Formats/Impl/CSVRowInputFormat.h @@ -89,6 +89,8 @@ class CSVFormatReader : public FormatWithNamesAndTypesReader protected: PeekableReadBuffer * buf; + +private: bool current_row_has_missing_fields = false; }; diff --git a/tests/queries/0_stateless/00301_csv.reference b/tests/queries/0_stateless/00301_csv.reference index e3441a3a4b3d..140bbda84e7b 100644 --- a/tests/queries/0_stateless/00301_csv.reference +++ b/tests/queries/0_stateless/00301_csv.reference @@ -22,6 +22,8 @@ Hello 4 String4 Hello 5 String5 Hello 6 String6 === Test input_format_csv_missing_as_default + 0 0 33 \N 55 Default + 0 0 33 \N 55 Default Hello 0 0 33 \N 55 Default Hello 0 0 33 \N 55 Default Hello 1 3 2 \N 55 Default diff --git a/tests/queries/0_stateless/00301_csv.sh b/tests/queries/0_stateless/00301_csv.sh index 4555e0476d86..aa019147babf 100755 --- a/tests/queries/0_stateless/00301_csv.sh +++ b/tests/queries/0_stateless/00301_csv.sh @@ -58,7 +58,9 @@ $CLICKHOUSE_CLIENT --query="DROP TABLE csv"; echo === Test input_format_csv_missing_as_default $CLICKHOUSE_CLIENT --query="CREATE TABLE csv (f1 String, f2 UInt64, f3 UInt256, f4 UInt64 Default 33, f5 Nullable(UInt64), f6 Nullable(UInt64) Default 55, f7 String DEFAULT 'Default') ENGINE = Memory"; -echo 'Hello +echo ' +, +Hello Hello, Hello, 1, 3, 2 Hello, 1, 4, 2, 3, 4, String From 876d5ae0a71dff7724bd665076a2d681a651829e Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Tue, 20 Jun 2023 20:27:43 +0200 Subject: [PATCH 030/179] fix ReadBufferFromS3 --- src/IO/ReadBufferFromS3.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/IO/ReadBufferFromS3.cpp b/src/IO/ReadBufferFromS3.cpp index 0b320ed86ff5..fdbe1a4ba57c 100644 --- a/src/IO/ReadBufferFromS3.cpp +++ b/src/IO/ReadBufferFromS3.cpp @@ -1,6 +1,4 @@ #include -#include -#include #include "config.h" #if USE_AWS_S3 From b20ecc2060ac74945e897de688daa1eaf032dda0 Mon Sep 17 00:00:00 2001 From: Dmitry Kardymon Date: Wed, 21 Jun 2023 06:25:11 +0000 Subject: [PATCH 031/179] Add line without spaces --- tests/queries/0_stateless/00301_csv.reference | 1 + tests/queries/0_stateless/00301_csv.sh | 11 ++++++----- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/tests/queries/0_stateless/00301_csv.reference b/tests/queries/0_stateless/00301_csv.reference index 140bbda84e7b..804ccf0c713a 100644 --- a/tests/queries/0_stateless/00301_csv.reference +++ b/tests/queries/0_stateless/00301_csv.reference @@ -28,4 +28,5 @@ Hello 0 0 33 \N 55 Default Hello 0 0 33 \N 55 Default Hello 1 3 2 \N 55 Default Hello 1 4 2 3 4 String +Hello 1 4 2 3 4 String Hello 1 5 2 3 4 String diff --git a/tests/queries/0_stateless/00301_csv.sh b/tests/queries/0_stateless/00301_csv.sh index aa019147babf..c598be44261a 100755 --- a/tests/queries/0_stateless/00301_csv.sh +++ b/tests/queries/0_stateless/00301_csv.sh @@ -60,10 +60,11 @@ $CLICKHOUSE_CLIENT --query="CREATE TABLE csv (f1 String, f2 UInt64, f3 UInt256, echo ' , -Hello -Hello, -Hello, 1, 3, 2 -Hello, 1, 4, 2, 3, 4, String -Hello, 1, 5, 2, 3, 4, String,'| $CLICKHOUSE_CLIENT --input_format_defaults_for_omitted_fields=1 --input_format_csv_missing_as_default=1 --query="INSERT INTO csv FORMAT CSV"; +"Hello" +"Hello", +"Hello", 1, 3, 2 +"Hello",1,4,2,3,4,"String" +"Hello", 1, 4, 2, 3, 4, "String" +"Hello", 1, 5, 2, 3, 4, "String",'| $CLICKHOUSE_CLIENT --input_format_defaults_for_omitted_fields=1 --input_format_csv_missing_as_default=1 --query="INSERT INTO csv FORMAT CSV"; $CLICKHOUSE_CLIENT --query="SELECT * FROM csv ORDER BY f1, f2, f3, f4, f5 NULLS FIRST, f6, f7"; $CLICKHOUSE_CLIENT --query="DROP TABLE csv"; From 8bd53cad7849816a6bd6591eddebd9ba19fa7272 Mon Sep 17 00:00:00 2001 From: Dmitry Kardymon Date: Wed, 21 Jun 2023 14:01:05 +0000 Subject: [PATCH 032/179] Add quotes to test --- tests/queries/0_stateless/00301_csv.sh | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/queries/0_stateless/00301_csv.sh b/tests/queries/0_stateless/00301_csv.sh index c598be44261a..dc354433af90 100755 --- a/tests/queries/0_stateless/00301_csv.sh +++ b/tests/queries/0_stateless/00301_csv.sh @@ -44,12 +44,12 @@ $CLICKHOUSE_CLIENT --query="DROP TABLE csv"; echo === Test input_format_csv_ignore_extra_columns $CLICKHOUSE_CLIENT --query="CREATE TABLE csv (s String, n UInt64 DEFAULT 3, d String DEFAULT 'String4') ENGINE = Memory"; -echo 'Hello, 1, String1 -Hello, 2, String2, -Hello, 3, String3, 2016-01-13 -Hello, 4, , 2016-01-14 -Hello, 5, String5, 2016-01-15, 2016-01-16 -Hello, 6, String6, "line with a +echo '"Hello", 1, "String1" +"Hello", 2, "String2", +"Hello", 3, "String3", "2016-01-13" +"Hello", 4, , "2016-01-14" +"Hello", 5, "String5", "2016-01-15", "2016-01-16" +"Hello", 6, "String6", "line with a break"' | $CLICKHOUSE_CLIENT --input_format_defaults_for_omitted_fields=1 --input_format_csv_empty_as_default=1 --input_format_csv_ignore_extra_columns=1 --query="INSERT INTO csv FORMAT CSV"; $CLICKHOUSE_CLIENT --query="SELECT * FROM csv ORDER BY s, n"; $CLICKHOUSE_CLIENT --query="DROP TABLE csv"; From 4a570a05c9714c8ee94e68e4dda58efa759c8780 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Mon, 19 Jun 2023 23:35:35 +0000 Subject: [PATCH 033/179] Decrease default timeouts for S3 and HTTP requests --- docs/en/operations/settings/settings.md | 4 +- src/Backups/BackupIO_S3.cpp | 1 + src/Coordination/KeeperSnapshotManagerS3.cpp | 6 +-- src/Core/Defines.h | 2 +- src/Core/Settings.h | 1 + src/Core/SettingsChangesHistory.h | 2 + .../ObjectStorages/S3/S3ObjectStorage.cpp | 48 ++++++++++--------- src/Disks/ObjectStorages/S3/S3ObjectStorage.h | 16 +++++-- src/Disks/ObjectStorages/S3/diskSettings.cpp | 2 +- src/IO/S3/Client.cpp | 33 +++++++++---- src/IO/S3/Client.h | 22 +++++++-- src/IO/S3/tests/gtest_aws_s3_client.cpp | 1 + src/IO/WriteBufferFromS3.cpp | 4 +- src/IO/WriteBufferFromS3.h | 3 ++ src/IO/tests/gtest_writebuffer_s3.cpp | 1 + src/Storages/StorageS3.cpp | 5 +- src/Storages/StorageS3.h | 1 + src/Storages/StorageS3Settings.cpp | 5 +- src/Storages/StorageS3Settings.h | 3 +- 19 files changed, 109 insertions(+), 51 deletions(-) diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index baefbb2cf6fc..4916dfaaf7de 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -3501,7 +3501,7 @@ Possible values: - Any positive integer. - 0 - Disabled (infinite timeout). -Default value: 180. +Default value: 30. ## http_receive_timeout {#http_receive_timeout} @@ -3512,7 +3512,7 @@ Possible values: - Any positive integer. - 0 - Disabled (infinite timeout). -Default value: 180. +Default value: 30. ## check_query_single_value_result {#check_query_single_value_result} diff --git a/src/Backups/BackupIO_S3.cpp b/src/Backups/BackupIO_S3.cpp index 967beba4bf52..9a2a457e13e9 100644 --- a/src/Backups/BackupIO_S3.cpp +++ b/src/Backups/BackupIO_S3.cpp @@ -253,6 +253,7 @@ std::unique_ptr BackupWriterS3::writeFile(const String & file_name) { return std::make_unique( client, + client, // already has long timeout s3_uri.bucket, fs::path(s3_uri.key) / file_name, DBMS_DEFAULT_BUFFER_SIZE, diff --git a/src/Coordination/KeeperSnapshotManagerS3.cpp b/src/Coordination/KeeperSnapshotManagerS3.cpp index 1afe0b352c53..bf437f03ae32 100644 --- a/src/Coordination/KeeperSnapshotManagerS3.cpp +++ b/src/Coordination/KeeperSnapshotManagerS3.cpp @@ -144,14 +144,14 @@ void KeeperSnapshotManagerS3::uploadSnapshotImpl(const std::string & snapshot_pa const auto create_writer = [&](const auto & key) { - return WriteBufferFromS3 - { + return WriteBufferFromS3( + s3_client->client, s3_client->client, s3_client->uri.bucket, key, DBMS_DEFAULT_BUFFER_SIZE, request_settings_1 - }; + ); }; LOG_INFO(log, "Will try to upload snapshot on {} to S3", snapshot_path); diff --git a/src/Core/Defines.h b/src/Core/Defines.h index e9b84b71cae1..efe14b93a3df 100644 --- a/src/Core/Defines.h +++ b/src/Core/Defines.h @@ -41,7 +41,7 @@ /// The boundary on which the blocks for asynchronous file operations should be aligned. #define DEFAULT_AIO_FILE_BLOCK_SIZE 4096 -#define DEFAULT_HTTP_READ_BUFFER_TIMEOUT 180 +#define DEFAULT_HTTP_READ_BUFFER_TIMEOUT 30 #define DEFAULT_HTTP_READ_BUFFER_CONNECTION_TIMEOUT 1 /// Maximum number of http-connections between two endpoints /// the number is unmotivated diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 204a27483df9..5162e0f273ef 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -102,6 +102,7 @@ class IColumn; M(Bool, s3_allow_parallel_part_upload, true, "Use multiple threads for s3 multipart upload. It may lead to slightly higher memory usage", 0) \ M(Bool, s3_throw_on_zero_files_match, false, "Throw an error, when ListObjects request cannot match any files", 0) \ M(UInt64, s3_retry_attempts, 10, "Setting for Aws::Client::RetryStrategy, Aws::Client does retries itself, 0 means no retries", 0) \ + M(UInt64, s3_request_timeout_ms, 3000, "Idleness timeout for sending and receiving data to/from S3. Fail if a single TCP read or write call blocks for this long.", 0) \ M(Bool, enable_s3_requests_logging, false, "Enable very explicit logging of S3 requests. Makes sense for debug only.", 0) \ M(UInt64, hdfs_replication, 0, "The actual number of replications can be specified when the hdfs file is created.", 0) \ M(Bool, hdfs_truncate_on_insert, false, "Enables or disables truncate before insert in s3 engine tables", 0) \ diff --git a/src/Core/SettingsChangesHistory.h b/src/Core/SettingsChangesHistory.h index 9fd45ac16d60..2886cdd288d0 100644 --- a/src/Core/SettingsChangesHistory.h +++ b/src/Core/SettingsChangesHistory.h @@ -80,6 +80,8 @@ namespace SettingsChangesHistory /// It's used to implement `compatibility` setting (see https://github.com/ClickHouse/ClickHouse/issues/35972) static std::map settings_changes_history = { + {"23.6", {{"http_send_timeout", 180, 30, "3 minutes seems crazy long. Note that this is timeout for a single network write call, not for the whole upload operation."}, + {"http_receive_timeout", 180, 30, "See http_send_timeout."}}}, {"23.5", {{"input_format_parquet_preserve_order", true, false, "Allow Parquet reader to reorder rows for better parallelism."}, {"parallelize_output_from_storages", false, true, "Allow parallelism when executing queries that read from file/url/s3/etc. This may reorder rows."}, {"use_with_fill_by_sorting_prefix", false, true, "Columns preceding WITH FILL columns in ORDER BY clause form sorting prefix. Rows with different values in sorting prefix are filled independently"}, diff --git a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp index e48924326e14..e46ca3d08282 100644 --- a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp +++ b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp @@ -149,7 +149,7 @@ class S3IteratorAsync final : public IObjectStorageIteratorAsync bool S3ObjectStorage::exists(const StoredObject & object) const { auto settings_ptr = s3_settings.get(); - return S3::objectExists(*client.get(), bucket, object.remote_path, {}, settings_ptr->request_settings, /* for_disk_s3= */ true); + return S3::objectExists(*clients.get()->client, bucket, object.remote_path, {}, settings_ptr->request_settings, /* for_disk_s3= */ true); } std::unique_ptr S3ObjectStorage::readObjects( /// NOLINT @@ -168,7 +168,7 @@ std::unique_ptr S3ObjectStorage::readObjects( /// NOLINT (const std::string & path, size_t read_until_position) -> std::unique_ptr { return std::make_unique( - client.get(), + clients.get()->client, bucket, path, version_id, @@ -218,7 +218,7 @@ std::unique_ptr S3ObjectStorage::readObject( /// NOLINT { auto settings_ptr = s3_settings.get(); return std::make_unique( - client.get(), + clients.get()->client, bucket, object.remote_path, version_id, @@ -243,8 +243,10 @@ std::unique_ptr S3ObjectStorage::writeObject( /// NOLIN if (write_settings.s3_allow_parallel_part_upload) scheduler = threadPoolCallbackRunner(getThreadPoolWriter(), "VFSWrite"); + auto clients_ = clients.get(); return std::make_unique( - client.get(), + clients_->client, + clients_->client_with_long_timeout, bucket, object.remote_path, buf_size, @@ -258,7 +260,7 @@ std::unique_ptr S3ObjectStorage::writeObject( /// NOLIN ObjectStorageIteratorPtr S3ObjectStorage::iterate(const std::string & path_prefix) const { auto settings_ptr = s3_settings.get(); - auto client_ptr = client.get(); + auto client_ptr = clients.get()->client; return std::make_shared(bucket, path_prefix, client_ptr, settings_ptr->list_object_keys_size); } @@ -266,7 +268,7 @@ ObjectStorageIteratorPtr S3ObjectStorage::iterate(const std::string & path_prefi void S3ObjectStorage::listObjects(const std::string & path, RelativePathsWithMetadata & children, int max_keys) const { auto settings_ptr = s3_settings.get(); - auto client_ptr = client.get(); + auto client_ptr = clients.get()->client; S3::ListObjectsV2Request request; request.SetBucket(bucket); @@ -307,7 +309,7 @@ void S3ObjectStorage::listObjects(const std::string & path, RelativePathsWithMet void S3ObjectStorage::removeObjectImpl(const StoredObject & object, bool if_exists) { - auto client_ptr = client.get(); + auto client_ptr = clients.get()->client; ProfileEvents::increment(ProfileEvents::S3DeleteObjects); ProfileEvents::increment(ProfileEvents::DiskS3DeleteObjects); @@ -333,7 +335,7 @@ void S3ObjectStorage::removeObjectsImpl(const StoredObjects & objects, bool if_e } else { - auto client_ptr = client.get(); + auto client_ptr = clients.get()->client; auto settings_ptr = s3_settings.get(); size_t chunk_size_limit = settings_ptr->objects_chunk_size_to_delete; @@ -394,7 +396,7 @@ void S3ObjectStorage::removeObjectsIfExist(const StoredObjects & objects) std::optional S3ObjectStorage::tryGetObjectMetadata(const std::string & path) const { auto settings_ptr = s3_settings.get(); - auto object_info = S3::getObjectInfo(*client.get(), bucket, path, {}, settings_ptr->request_settings, /* with_metadata= */ true, /* for_disk_s3= */ true, /* throw_on_error= */ false); + auto object_info = S3::getObjectInfo(*clients.get()->client, bucket, path, {}, settings_ptr->request_settings, /* with_metadata= */ true, /* for_disk_s3= */ true, /* throw_on_error= */ false); if (object_info.size == 0 && object_info.last_modification_time == 0 && object_info.metadata.empty()) return {}; @@ -410,7 +412,7 @@ std::optional S3ObjectStorage::tryGetObjectMetadata(const std::s ObjectMetadata S3ObjectStorage::getObjectMetadata(const std::string & path) const { auto settings_ptr = s3_settings.get(); - auto object_info = S3::getObjectInfo(*client.get(), bucket, path, {}, settings_ptr->request_settings, /* with_metadata= */ true, /* for_disk_s3= */ true); + auto object_info = S3::getObjectInfo(*clients.get()->client, bucket, path, {}, settings_ptr->request_settings, /* with_metadata= */ true, /* for_disk_s3= */ true); ObjectMetadata result; result.size_bytes = object_info.size; @@ -429,7 +431,7 @@ void S3ObjectStorage::copyObjectToAnotherObjectStorage( // NOLINT /// Shortcut for S3 if (auto * dest_s3 = dynamic_cast(&object_storage_to); dest_s3 != nullptr) { - auto client_ptr = client.get(); + auto client_ptr = clients.get()->client; auto settings_ptr = s3_settings.get(); auto size = S3::getObjectSize(*client_ptr, bucket, object_from.remote_path, {}, settings_ptr->request_settings, /* for_disk_s3= */ true); auto scheduler = threadPoolCallbackRunner(getThreadPoolWriter(), "S3ObjStor_copy"); @@ -445,7 +447,7 @@ void S3ObjectStorage::copyObjectToAnotherObjectStorage( // NOLINT void S3ObjectStorage::copyObject( // NOLINT const StoredObject & object_from, const StoredObject & object_to, std::optional object_to_attributes) { - auto client_ptr = client.get(); + auto client_ptr = clients.get()->client; auto settings_ptr = s3_settings.get(); auto size = S3::getObjectSize(*client_ptr, bucket, object_from.remote_path, {}, settings_ptr->request_settings, /* for_disk_s3= */ true); auto scheduler = threadPoolCallbackRunner(getThreadPoolWriter(), "S3ObjStor_copy"); @@ -458,35 +460,33 @@ void S3ObjectStorage::setNewSettings(std::unique_ptr && s3_settings.set(std::move(s3_settings_)); } -void S3ObjectStorage::setNewClient(std::unique_ptr && client_) -{ - client.set(std::move(client_)); -} - void S3ObjectStorage::shutdown() { - auto client_ptr = client.get(); + auto clients_ptr = clients.get(); /// This call stops any next retry attempts for ongoing S3 requests. /// If S3 request is failed and the method below is executed S3 client immediately returns the last failed S3 request outcome. /// If S3 is healthy nothing wrong will be happened and S3 requests will be processed in a regular way without errors. /// This should significantly speed up shutdown process if S3 is unhealthy. - const_cast(*client_ptr).DisableRequestProcessing(); + const_cast(*clients_ptr->client).DisableRequestProcessing(); + const_cast(*clients_ptr->client_with_long_timeout).DisableRequestProcessing(); } void S3ObjectStorage::startup() { - auto client_ptr = client.get(); + auto clients_ptr = clients.get(); /// Need to be enabled if it was disabled during shutdown() call. - const_cast(*client_ptr).EnableRequestProcessing(); + const_cast(*clients_ptr->client).EnableRequestProcessing(); + const_cast(*clients_ptr->client_with_long_timeout).EnableRequestProcessing(); } void S3ObjectStorage::applyNewSettings(const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix, ContextPtr context) { auto new_s3_settings = getSettings(config, config_prefix, context); auto new_client = getClient(config, config_prefix, context, *new_s3_settings); + auto new_clients = std::make_unique(std::move(new_client), *new_s3_settings); s3_settings.set(std::move(new_s3_settings)); - client.set(std::move(new_client)); + clients.set(std::move(new_clients)); } std::unique_ptr S3ObjectStorage::cloneObjectStorage( @@ -501,7 +501,9 @@ std::unique_ptr S3ObjectStorage::cloneObjectStorage( endpoint); } -} +S3ObjectStorage::Clients::Clients(std::shared_ptr client_, const S3ObjectStorageSettings & settings) + : client(std::move(client_)), client_with_long_timeout(client->clone(std::nullopt, settings.request_settings.long_request_timeout_ms)) {} +} #endif diff --git a/src/Disks/ObjectStorages/S3/S3ObjectStorage.h b/src/Disks/ObjectStorages/S3/S3ObjectStorage.h index 072e1354d382..527b1479d89f 100644 --- a/src/Disks/ObjectStorages/S3/S3ObjectStorage.h +++ b/src/Disks/ObjectStorages/S3/S3ObjectStorage.h @@ -39,6 +39,16 @@ struct S3ObjectStorageSettings class S3ObjectStorage : public IObjectStorage { +public: + struct Clients + { + std::shared_ptr client; + std::shared_ptr client_with_long_timeout; + + Clients() = default; + Clients(std::shared_ptr client, const S3ObjectStorageSettings & settings); + }; + private: friend class S3PlainObjectStorage; @@ -51,7 +61,7 @@ class S3ObjectStorage : public IObjectStorage String bucket_, String connection_string) : bucket(bucket_) - , client(std::move(client_)) + , clients(std::make_unique(std::move(client_), *s3_settings_)) , s3_settings(std::move(s3_settings_)) , s3_capabilities(s3_capabilities_) , version_id(std::move(version_id_)) @@ -159,14 +169,12 @@ class S3ObjectStorage : public IObjectStorage private: void setNewSettings(std::unique_ptr && s3_settings_); - void setNewClient(std::unique_ptr && client_); - void removeObjectImpl(const StoredObject & object, bool if_exists); void removeObjectsImpl(const StoredObjects & objects, bool if_exists); std::string bucket; - MultiVersion client; + MultiVersion clients; MultiVersion s3_settings; S3Capabilities s3_capabilities; diff --git a/src/Disks/ObjectStorages/S3/diskSettings.cpp b/src/Disks/ObjectStorages/S3/diskSettings.cpp index 409eb2a3dc3f..cbf0392aae9c 100644 --- a/src/Disks/ObjectStorages/S3/diskSettings.cpp +++ b/src/Disks/ObjectStorages/S3/diskSettings.cpp @@ -129,7 +129,7 @@ std::unique_ptr getClient( throw Exception(ErrorCodes::BAD_ARGUMENTS, "S3 path must ends with '/', but '{}' doesn't.", uri.key); client_configuration.connectTimeoutMs = config.getUInt(config_prefix + ".connect_timeout_ms", 1000); - client_configuration.requestTimeoutMs = config.getUInt(config_prefix + ".request_timeout_ms", 30000); + client_configuration.requestTimeoutMs = config.getUInt(config_prefix + ".request_timeout_ms", 3000); client_configuration.maxConnections = config.getUInt(config_prefix + ".max_connections", 100); client_configuration.endpointOverride = uri.endpoint; diff --git a/src/IO/S3/Client.cpp b/src/IO/S3/Client.cpp index 668b1a3959d5..7e20b1a9e8fa 100644 --- a/src/IO/S3/Client.cpp +++ b/src/IO/S3/Client.cpp @@ -100,7 +100,7 @@ std::unique_ptr Client::create( size_t max_redirects_, ServerSideEncryptionKMSConfig sse_kms_config_, const std::shared_ptr & credentials_provider, - const Aws::Client::ClientConfiguration & client_configuration, + const PocoHTTPClientConfiguration & client_configuration, Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy sign_payloads, bool use_virtual_addressing) { @@ -109,9 +109,16 @@ std::unique_ptr Client::create( new Client(max_redirects_, std::move(sse_kms_config_), credentials_provider, client_configuration, sign_payloads, use_virtual_addressing)); } -std::unique_ptr Client::create(const Client & other) +std::unique_ptr Client::clone( + std::optional> override_retry_strategy, + std::optional override_request_timeout_ms) const { - return std::unique_ptr(new Client(other)); + PocoHTTPClientConfiguration new_configuration = client_configuration; + if (override_retry_strategy.has_value()) + new_configuration.retryStrategy = *override_retry_strategy; + if (override_request_timeout_ms.has_value()) + new_configuration.requestTimeoutMs = *override_request_timeout_ms; + return std::unique_ptr(new Client(*this, new_configuration)); } namespace @@ -134,11 +141,14 @@ Client::Client( size_t max_redirects_, ServerSideEncryptionKMSConfig sse_kms_config_, const std::shared_ptr & credentials_provider_, - const Aws::Client::ClientConfiguration & client_configuration, - Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy sign_payloads, - bool use_virtual_addressing) - : Aws::S3::S3Client(credentials_provider_, client_configuration, std::move(sign_payloads), use_virtual_addressing) + const PocoHTTPClientConfiguration & client_configuration_, + Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy sign_payloads_, + bool use_virtual_addressing_) + : Aws::S3::S3Client(credentials_provider_, client_configuration_, sign_payloads_, use_virtual_addressing_) , credentials_provider(credentials_provider_) + , client_configuration(client_configuration_) + , sign_payloads(sign_payloads_) + , use_virtual_addressing(use_virtual_addressing_) , max_redirects(max_redirects_) , sse_kms_config(std::move(sse_kms_config_)) , log(&Poco::Logger::get("S3Client")) @@ -175,10 +185,15 @@ Client::Client( ClientCacheRegistry::instance().registerClient(cache); } -Client::Client(const Client & other) - : Aws::S3::S3Client(other) +Client::Client( + const Client & other, const PocoHTTPClientConfiguration & client_configuration_) + : Aws::S3::S3Client(other.credentials_provider, client_configuration_, other.sign_payloads, + other.use_virtual_addressing) , initial_endpoint(other.initial_endpoint) , credentials_provider(other.credentials_provider) + , client_configuration(client_configuration_) + , sign_payloads(other.sign_payloads) + , use_virtual_addressing(other.use_virtual_addressing) , explicit_region(other.explicit_region) , detect_region(other.detect_region) , provider_type(other.provider_type) diff --git a/src/IO/S3/Client.h b/src/IO/S3/Client.h index e1b99c893a68..8904c850553b 100644 --- a/src/IO/S3/Client.h +++ b/src/IO/S3/Client.h @@ -105,6 +105,8 @@ class ClientCacheRegistry class Client : private Aws::S3::S3Client { public: + class RetryStrategy; + /// we use a factory method to verify arguments before creating a client because /// there are certain requirements on arguments for it to work correctly /// e.g. Client::RetryStrategy should be used @@ -112,11 +114,19 @@ class Client : private Aws::S3::S3Client size_t max_redirects_, ServerSideEncryptionKMSConfig sse_kms_config_, const std::shared_ptr & credentials_provider, - const Aws::Client::ClientConfiguration & client_configuration, + const PocoHTTPClientConfiguration & client_configuration, Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy sign_payloads, bool use_virtual_addressing); - static std::unique_ptr create(const Client & other); + /// Create a client with adjusted settings: + /// * override_retry_strategy can be used to disable retries to avoid nested retries when we have + /// a retry loop outside of S3 client. Specifically, for read and write buffers. Currently not + /// actually used. + /// * override_request_timeout_ms is used to increase timeout for CompleteMultipartUploadRequest + /// because it often sits idle for 10 seconds: https://github.com/ClickHouse/ClickHouse/pull/42321 + std::unique_ptr clone( + std::optional> override_retry_strategy = std::nullopt, + std::optional override_request_timeout_ms = std::nullopt) const; Client & operator=(const Client &) = delete; @@ -211,11 +221,12 @@ class Client : private Aws::S3::S3Client Client(size_t max_redirects_, ServerSideEncryptionKMSConfig sse_kms_config_, const std::shared_ptr & credentials_provider_, - const Aws::Client::ClientConfiguration& client_configuration, + const PocoHTTPClientConfiguration & client_configuration, Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy sign_payloads, bool use_virtual_addressing); - Client(const Client & other); + Client( + const Client & other, const PocoHTTPClientConfiguration & client_configuration); /// Leave regular functions private so we don't accidentally use them /// otherwise region and endpoint redirection won't work @@ -251,6 +262,9 @@ class Client : private Aws::S3::S3Client String initial_endpoint; std::shared_ptr credentials_provider; + PocoHTTPClientConfiguration client_configuration; + Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy sign_payloads; + bool use_virtual_addressing; std::string explicit_region; mutable bool detect_region = true; diff --git a/src/IO/S3/tests/gtest_aws_s3_client.cpp b/src/IO/S3/tests/gtest_aws_s3_client.cpp index a9b5fa03f30b..5731e9061d69 100644 --- a/src/IO/S3/tests/gtest_aws_s3_client.cpp +++ b/src/IO/S3/tests/gtest_aws_s3_client.cpp @@ -89,6 +89,7 @@ void doWriteRequest(std::shared_ptr client, const DB::S3:: DB::S3Settings::RequestSettings request_settings; request_settings.max_unexpected_write_error_retries = max_unexpected_write_error_retries; DB::WriteBufferFromS3 write_buffer( + client, client, uri.bucket, uri.key, diff --git a/src/IO/WriteBufferFromS3.cpp b/src/IO/WriteBufferFromS3.cpp index 990505adfb3f..900861a78318 100644 --- a/src/IO/WriteBufferFromS3.cpp +++ b/src/IO/WriteBufferFromS3.cpp @@ -77,6 +77,7 @@ struct WriteBufferFromS3::PartData WriteBufferFromS3::WriteBufferFromS3( std::shared_ptr client_ptr_, + std::shared_ptr client_with_long_timeout_ptr_, const String & bucket_, const String & key_, size_t buf_size_, @@ -91,6 +92,7 @@ WriteBufferFromS3::WriteBufferFromS3( , upload_settings(request_settings.getUploadSettings()) , write_settings(write_settings_) , client_ptr(std::move(client_ptr_)) + , client_with_long_timeout_ptr(std::move(client_with_long_timeout_ptr_)) , object_metadata(std::move(object_metadata_)) , buffer_allocation_policy(ChooseBufferPolicy(upload_settings)) , task_tracker( @@ -551,7 +553,7 @@ void WriteBufferFromS3::completeMultipartUpload() ProfileEvents::increment(ProfileEvents::DiskS3CompleteMultipartUpload); Stopwatch watch; - auto outcome = client_ptr->CompleteMultipartUpload(req); + auto outcome = client_with_long_timeout_ptr->CompleteMultipartUpload(req); watch.stop(); ProfileEvents::increment(ProfileEvents::WriteBufferFromS3Microseconds, watch.elapsedMicroseconds()); diff --git a/src/IO/WriteBufferFromS3.h b/src/IO/WriteBufferFromS3.h index f4200b0a6462..32f4867a4398 100644 --- a/src/IO/WriteBufferFromS3.h +++ b/src/IO/WriteBufferFromS3.h @@ -29,6 +29,8 @@ class WriteBufferFromS3 final : public WriteBufferFromFileBase public: WriteBufferFromS3( std::shared_ptr client_ptr_, + /// for CompleteMultipartUploadRequest, because it blocks on recv() for a few seconds on big uploads + std::shared_ptr client_with_long_timeout_ptr_, const String & bucket_, const String & key_, size_t buf_size_, @@ -86,6 +88,7 @@ class WriteBufferFromS3 final : public WriteBufferFromFileBase const S3Settings::RequestSettings::PartUploadSettings & upload_settings; const WriteSettings write_settings; const std::shared_ptr client_ptr; + const std::shared_ptr client_with_long_timeout_ptr; const std::optional> object_metadata; Poco::Logger * log = &Poco::Logger::get("WriteBufferFromS3"); diff --git a/src/IO/tests/gtest_writebuffer_s3.cpp b/src/IO/tests/gtest_writebuffer_s3.cpp index cd38291fb318..44c0ee67669e 100644 --- a/src/IO/tests/gtest_writebuffer_s3.cpp +++ b/src/IO/tests/gtest_writebuffer_s3.cpp @@ -526,6 +526,7 @@ class WBS3Test : public ::testing::Test getAsyncPolicy().setAutoExecute(false); return std::make_unique( + client, client, bucket, file_name, diff --git a/src/Storages/StorageS3.cpp b/src/Storages/StorageS3.cpp index f4791e45e2b4..135722dbce27 100644 --- a/src/Storages/StorageS3.cpp +++ b/src/Storages/StorageS3.cpp @@ -150,7 +150,7 @@ class StorageS3Source::DisclosedGlobIterator::Impl : WithContext KeysWithInfo * read_keys_, const S3Settings::RequestSettings & request_settings_) : WithContext(context_) - , client(S3::Client::create(client_)) + , client(client_.clone()) , globbed_uri(globbed_uri_) , query(query_) , virtual_header(virtual_header_) @@ -783,6 +783,7 @@ class StorageS3Sink : public SinkToStorage write_buf = wrapWriteBufferWithCompressionMethod( std::make_unique( configuration_.client, + configuration_.client_with_long_timeout, bucket, key, DBMS_DEFAULT_BUFFER_SIZE, @@ -1296,6 +1297,8 @@ void StorageS3::Configuration::connect(ContextPtr context) context->getConfigRef().getUInt64("s3.expiration_window_seconds", S3::DEFAULT_EXPIRATION_WINDOW_SECONDS)), auth_settings.no_sign_request.value_or(context->getConfigRef().getBool("s3.no_sign_request", false)), }); + + client_with_long_timeout = client->clone(std::nullopt, request_settings.long_request_timeout_ms); } void StorageS3::processNamedCollectionResult(StorageS3::Configuration & configuration, const NamedCollection & collection) diff --git a/src/Storages/StorageS3.h b/src/Storages/StorageS3.h index 130538336230..8d571dd796f0 100644 --- a/src/Storages/StorageS3.h +++ b/src/Storages/StorageS3.h @@ -274,6 +274,7 @@ class StorageS3 : public IStorage HTTPHeaderEntries headers_from_ast; std::shared_ptr client; + std::shared_ptr client_with_long_timeout; std::vector keys; }; diff --git a/src/Storages/StorageS3Settings.cpp b/src/Storages/StorageS3Settings.cpp index 23b4630707cd..89e6ee46b4d7 100644 --- a/src/Storages/StorageS3Settings.cpp +++ b/src/Storages/StorageS3Settings.cpp @@ -199,7 +199,7 @@ S3Settings::RequestSettings::RequestSettings( list_object_keys_size = config.getUInt64(key + "list_object_keys_size", settings.s3_list_object_keys_size); throw_on_zero_files_match = config.getBool(key + "throw_on_zero_files_match", settings.s3_throw_on_zero_files_match); retry_attempts = config.getUInt64(key + "retry_attempts", settings.s3_retry_attempts); - request_timeout_ms = config.getUInt64(key + "request_timeout_ms", request_timeout_ms); + request_timeout_ms = config.getUInt64(key + "request_timeout_ms", settings.s3_request_timeout_ms); /// NOTE: it would be better to reuse old throttlers to avoid losing token bucket state on every config reload, /// which could lead to exceeding limit for short time. But it is good enough unless very high `burst` values are used. @@ -255,6 +255,9 @@ void S3Settings::RequestSettings::updateFromSettingsImpl(const Settings & settin if (!if_changed || settings.s3_retry_attempts.changed) retry_attempts = settings.s3_retry_attempts; + + if (!if_changed || settings.s3_request_timeout_ms.changed) + request_timeout_ms = settings.s3_request_timeout_ms; } void S3Settings::RequestSettings::updateFromSettings(const Settings & settings) diff --git a/src/Storages/StorageS3Settings.h b/src/Storages/StorageS3Settings.h index 41489927e7f3..991e323acb6c 100644 --- a/src/Storages/StorageS3Settings.h +++ b/src/Storages/StorageS3Settings.h @@ -69,7 +69,8 @@ struct S3Settings ThrottlerPtr get_request_throttler; ThrottlerPtr put_request_throttler; size_t retry_attempts = 10; - size_t request_timeout_ms = 30000; + size_t request_timeout_ms = 3000; + size_t long_request_timeout_ms = 30000; // TODO: Take this from config like request_timeout_ms bool throw_on_zero_files_match = false; From 1419bb7adbac4603439c02d8e8b68d1338437c48 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Tue, 20 Jun 2023 20:31:23 +0200 Subject: [PATCH 034/179] rollback changes in test --- tests/integration/test_merge_tree_s3/test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_merge_tree_s3/test.py b/tests/integration/test_merge_tree_s3/test.py index 22805eb6e948..2ccd517923a4 100644 --- a/tests/integration/test_merge_tree_s3/test.py +++ b/tests/integration/test_merge_tree_s3/test.py @@ -923,7 +923,7 @@ def test_merge_canceled_by_s3_errors_when_move(cluster, broken_s3, node_name): @pytest.mark.parametrize("node_name", ["node"]) @pytest.mark.parametrize( - "in_flight_memory", [(10, 288044299), (5, 193557290), (1, 128348733)] + "in_flight_memory", [(10, 245918115), (5, 156786752), (1, 106426187)] ) def test_s3_engine_heavy_write_check_mem( cluster, broken_s3, node_name, in_flight_memory From 2c3a4cb90de34569277edb3e4cf9f50fa9e5d5a2 Mon Sep 17 00:00:00 2001 From: Dmitry Kardymon Date: Thu, 22 Jun 2023 10:47:07 +0000 Subject: [PATCH 035/179] Style fix --- src/Processors/Formats/Impl/CSVRowInputFormat.cpp | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/Processors/Formats/Impl/CSVRowInputFormat.cpp b/src/Processors/Formats/Impl/CSVRowInputFormat.cpp index a727a5bc4902..59b0f25f0bfd 100644 --- a/src/Processors/Formats/Impl/CSVRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/CSVRowInputFormat.cpp @@ -156,8 +156,7 @@ void CSVFormatReader::skipFieldDelimiter() { skipWhitespacesAndTabs(*buf, format_settings.csv.allow_whitespace_or_tab_as_delimiter); - bool res = checkChar(format_settings.csv.delimiter, *buf); - if (!res) + if (!checkChar(format_settings.csv.delimiter, *buf)) { if (!format_settings.csv.missing_as_default) { @@ -165,9 +164,7 @@ void CSVFormatReader::skipFieldDelimiter() throwAtAssertionFailed(err, *buf); } else - { current_row_has_missing_fields = true; - } } } From a0fde6a55b3ddb9cac0b3914fc18af58f6419eac Mon Sep 17 00:00:00 2001 From: Dmitry Kardymon Date: Thu, 22 Jun 2023 10:50:14 +0000 Subject: [PATCH 036/179] Style fix --- .../Formats/Impl/CSVRowInputFormat.cpp | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/src/Processors/Formats/Impl/CSVRowInputFormat.cpp b/src/Processors/Formats/Impl/CSVRowInputFormat.cpp index 59b0f25f0bfd..edbc33fb3c30 100644 --- a/src/Processors/Formats/Impl/CSVRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/CSVRowInputFormat.cpp @@ -156,16 +156,17 @@ void CSVFormatReader::skipFieldDelimiter() { skipWhitespacesAndTabs(*buf, format_settings.csv.allow_whitespace_or_tab_as_delimiter); - if (!checkChar(format_settings.csv.delimiter, *buf)) + bool res = checkChar(format_settings.csv.delimiter, *buf); + if (res) + return; + + if (!format_settings.csv.missing_as_default) { - if (!format_settings.csv.missing_as_default) - { - char err[2] = {format_settings.csv.delimiter, '\0'}; - throwAtAssertionFailed(err, *buf); - } - else - current_row_has_missing_fields = true; + char err[2] = {format_settings.csv.delimiter, '\0'}; + throwAtAssertionFailed(err, *buf); } + else + current_row_has_missing_fields = true; } template From 415749f64c6eb6e49fa95ac5038fc689766902ca Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 23 Jun 2023 05:09:20 +0200 Subject: [PATCH 037/179] Add a test for #44816 --- ...793_implicit_pretty_format_settings.expect | 27 +++++++++++++++++++ ..._implicit_pretty_format_settings.reference | 0 2 files changed, 27 insertions(+) create mode 100755 tests/queries/0_stateless/02793_implicit_pretty_format_settings.expect create mode 100644 tests/queries/0_stateless/02793_implicit_pretty_format_settings.reference diff --git a/tests/queries/0_stateless/02793_implicit_pretty_format_settings.expect b/tests/queries/0_stateless/02793_implicit_pretty_format_settings.expect new file mode 100755 index 000000000000..569cbc7330e7 --- /dev/null +++ b/tests/queries/0_stateless/02793_implicit_pretty_format_settings.expect @@ -0,0 +1,27 @@ +#!/usr/bin/expect -f + +set basedir [file dirname $argv0] +set basename [file tail $argv0] +exp_internal -f $env(CLICKHOUSE_TMP)/$basename.debuglog 0 +set history_file $env(CLICKHOUSE_TMP)/$basename.history + +log_user 0 +set timeout 60 +match_max 100000 +expect_after { + # Do not ignore eof from expect + -i $any_spawn_id eof { exp_continue } + # A default timeout action is to do nothing, change it to fail + -i $any_spawn_id timeout { exit 1 } +} + +spawn bash -c "source $basedir/../shell_config.sh ; \$CLICKHOUSE_CLIENT_BINARY \$CLICKHOUSE_CLIENT_OPT --disable_suggestion --history_file=$history_file" +expect ":) " + +# Send a command +send -- "SELECT 1 SETTINGS output_format_pretty_row_numbers = 1\r" +expect "1. │ 1 │" +expect ":) " + +send -- "\4" +expect eof diff --git a/tests/queries/0_stateless/02793_implicit_pretty_format_settings.reference b/tests/queries/0_stateless/02793_implicit_pretty_format_settings.reference new file mode 100644 index 000000000000..e69de29bb2d1 From 118f02b522420a786b093b3e55fcd404045df8a0 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 23 Jun 2023 07:42:01 +0200 Subject: [PATCH 038/179] Add a test for calculate_text_stack_trace setting --- .../02796_calculate_text_stack_trace.reference | 2 ++ .../0_stateless/02796_calculate_text_stack_trace.sql | 8 ++++++++ 2 files changed, 10 insertions(+) create mode 100644 tests/queries/0_stateless/02796_calculate_text_stack_trace.reference create mode 100644 tests/queries/0_stateless/02796_calculate_text_stack_trace.sql diff --git a/tests/queries/0_stateless/02796_calculate_text_stack_trace.reference b/tests/queries/0_stateless/02796_calculate_text_stack_trace.reference new file mode 100644 index 000000000000..b261da18d51a --- /dev/null +++ b/tests/queries/0_stateless/02796_calculate_text_stack_trace.reference @@ -0,0 +1,2 @@ +1 +0 diff --git a/tests/queries/0_stateless/02796_calculate_text_stack_trace.sql b/tests/queries/0_stateless/02796_calculate_text_stack_trace.sql new file mode 100644 index 000000000000..3c2806ac0101 --- /dev/null +++ b/tests/queries/0_stateless/02796_calculate_text_stack_trace.sql @@ -0,0 +1,8 @@ +SELECT throwIf(1); -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO } +SYSTEM FLUSH LOGS; +SELECT length(stack_trace) > 1000 FROM system.query_log WHERE current_database = currentDatabase() AND query LIKE '%SELECT throwIf(1)%' AND query NOT LIKE '%system%' ORDER BY event_time_microseconds DESC LIMIT 1; + +SET calculate_text_stack_trace = 0; +SELECT throwIf(1); -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO } +SYSTEM FLUSH LOGS; +SELECT length(stack_trace) FROM system.query_log WHERE current_database = currentDatabase() AND query LIKE '%SELECT throwIf(1)%' AND query NOT LIKE '%system%' ORDER BY event_time_microseconds DESC LIMIT 1; From 7fc8942ea9d7b15800d9d6ec8355b162013bf32e Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 23 Jun 2023 07:50:44 +0200 Subject: [PATCH 039/179] Update test --- .../02796_calculate_text_stack_trace.reference | 4 ++++ .../02796_calculate_text_stack_trace.sql | 16 ++++++++++++---- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/tests/queries/0_stateless/02796_calculate_text_stack_trace.reference b/tests/queries/0_stateless/02796_calculate_text_stack_trace.reference index b261da18d51a..c800bbce32bf 100644 --- a/tests/queries/0_stateless/02796_calculate_text_stack_trace.reference +++ b/tests/queries/0_stateless/02796_calculate_text_stack_trace.reference @@ -1,2 +1,6 @@ 1 +1 +1 +0 +0 0 diff --git a/tests/queries/0_stateless/02796_calculate_text_stack_trace.sql b/tests/queries/0_stateless/02796_calculate_text_stack_trace.sql index 3c2806ac0101..601bd16fb39e 100644 --- a/tests/queries/0_stateless/02796_calculate_text_stack_trace.sql +++ b/tests/queries/0_stateless/02796_calculate_text_stack_trace.sql @@ -1,8 +1,16 @@ -SELECT throwIf(1); -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO } +-- Tags: no-parallel + +TRUNCATE TABLE system.text_log; + +SELECT 'Hello', throwIf(1); -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO } SYSTEM FLUSH LOGS; -SELECT length(stack_trace) > 1000 FROM system.query_log WHERE current_database = currentDatabase() AND query LIKE '%SELECT throwIf(1)%' AND query NOT LIKE '%system%' ORDER BY event_time_microseconds DESC LIMIT 1; +SELECT length(stack_trace) > 1000 FROM system.query_log WHERE current_database = currentDatabase() AND query LIKE '%SELECT \'Hello\', throwIf(1)%' AND query NOT LIKE '%system%' ORDER BY event_time_microseconds DESC LIMIT 1; +SELECT message LIKE '%Stack trace%' FROM system.text_log WHERE level = 'Error' AND message LIKE '%Exception%throwIf%' ORDER BY event_time_microseconds DESC LIMIT 10; + +TRUNCATE TABLE system.text_log; SET calculate_text_stack_trace = 0; -SELECT throwIf(1); -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO } +SELECT 'World', throwIf(1); -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO } SYSTEM FLUSH LOGS; -SELECT length(stack_trace) FROM system.query_log WHERE current_database = currentDatabase() AND query LIKE '%SELECT throwIf(1)%' AND query NOT LIKE '%system%' ORDER BY event_time_microseconds DESC LIMIT 1; +SELECT length(stack_trace) FROM system.query_log WHERE current_database = currentDatabase() AND query LIKE '%SELECT \'World\', throwIf(1)%' AND query NOT LIKE '%system%' ORDER BY event_time_microseconds DESC LIMIT 1; +SELECT message LIKE '%Stack trace%' FROM system.text_log WHERE level = 'Error' AND message LIKE '%Exception%throwIf%' ORDER BY event_time_microseconds DESC LIMIT 10; From 9680596d36c3aa6a98a3ad899cdb709d87a75b6b Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Fri, 23 Jun 2023 13:03:41 +0200 Subject: [PATCH 040/179] decrease log level, make logs shorter --- src/IO/WriteBufferFromS3.cpp | 76 ++++++++++++++----------- src/IO/WriteBufferFromS3.h | 3 +- src/IO/WriteBufferFromS3TaskTracker.cpp | 5 +- 3 files changed, 48 insertions(+), 36 deletions(-) diff --git a/src/IO/WriteBufferFromS3.cpp b/src/IO/WriteBufferFromS3.cpp index ebab9b323b86..ff6da9bf4445 100644 --- a/src/IO/WriteBufferFromS3.cpp +++ b/src/IO/WriteBufferFromS3.cpp @@ -98,15 +98,13 @@ WriteBufferFromS3::WriteBufferFromS3( std::move(schedule_), upload_settings.max_inflight_parts_for_one_file)) { - LOG_TRACE(log, "Create WriteBufferFromS3, {}", getLogDetails()); + LOG_TRACE(log, "Create WriteBufferFromS3, {}", getShortLogDetails()); allocateBuffer(); } void WriteBufferFromS3::nextImpl() { - LOG_TRACE(log, "nextImpl with incoming data size {}, memory buffer size {}. {}", offset(), memory.size(), getLogDetails()); - if (is_prefinalized) throw Exception( ErrorCodes::LOGICAL_ERROR, @@ -138,7 +136,7 @@ void WriteBufferFromS3::preFinalize() if (is_prefinalized) return; - LOG_TRACE(log, "preFinalize WriteBufferFromS3. {}", getLogDetails()); + LOG_TEST(log, "preFinalize WriteBufferFromS3. {}", getShortLogDetails()); /// This function should not be run again if an exception has occurred is_prefinalized = true; @@ -177,7 +175,7 @@ void WriteBufferFromS3::preFinalize() void WriteBufferFromS3::finalizeImpl() { - LOG_TRACE(log, "finalizeImpl WriteBufferFromS3. {}.", getLogDetails()); + LOG_TRACE(log, "finalizeImpl WriteBufferFromS3. {}.", getShortLogDetails()); if (!is_prefinalized) preFinalize(); @@ -206,7 +204,7 @@ void WriteBufferFromS3::finalizeImpl() } } -String WriteBufferFromS3::getLogDetails() const +String WriteBufferFromS3::getVerboseLogDetails() const { String multipart_upload_details; if (!multipart_upload_id.empty()) @@ -217,6 +215,17 @@ String WriteBufferFromS3::getLogDetails() const bucket, key, total_size, count(), hidden_size, offset(), task_tracker->isAsync(), is_prefinalized, finalized, multipart_upload_details); } +String WriteBufferFromS3::getShortLogDetails() const +{ + String multipart_upload_details; + if (!multipart_upload_id.empty()) + multipart_upload_details = fmt::format(", upload id {}" + , multipart_upload_id); + + return fmt::format("Details: bucket {}, key {}, total size {}{}", + bucket, key, total_size, multipart_upload_details); +} + void WriteBufferFromS3::tryToAbortMultipartUpload() { try @@ -226,14 +235,14 @@ void WriteBufferFromS3::tryToAbortMultipartUpload() } catch (...) { - LOG_ERROR(log, "Multipart upload hasn't aborted. {}", getLogDetails()); + LOG_ERROR(log, "Multipart upload hasn't aborted. {}", getVerboseLogDetails()); tryLogCurrentException(__PRETTY_FUNCTION__); } } WriteBufferFromS3::~WriteBufferFromS3() { - LOG_TRACE(log, "Close WriteBufferFromS3. {}.", getLogDetails()); + LOG_TRACE(log, "Close WriteBufferFromS3. {}.", getShortLogDetails()); /// That destructor could be call with finalized=false in case of exceptions if (!finalized) @@ -243,14 +252,14 @@ WriteBufferFromS3::~WriteBufferFromS3() "WriteBufferFromS3 is not finalized in destructor. " "The file might not be written to S3. " "{}.", - getLogDetails()); + getVerboseLogDetails()); } task_tracker->safeWaitAll(); if (!multipart_upload_id.empty() && !multipart_upload_finished) { - LOG_WARNING(log, "WriteBufferFromS3 was neither finished nor aborted, try to abort upload in destructor. {}.", getLogDetails()); + LOG_WARNING(log, "WriteBufferFromS3 was neither finished nor aborted, try to abort upload in destructor. {}.", getVerboseLogDetails()); tryToAbortMultipartUpload(); } } @@ -321,8 +330,6 @@ void WriteBufferFromS3::allocateBuffer() memory = Memory(buffer_allocation_policy->getBufferSize()); WriteBuffer::set(memory.data(), memory.size()); - - LOG_TRACE(log, "Allocated buffer with size {}. {}", buffer_allocation_policy->getBufferSize(), getLogDetails()); } void WriteBufferFromS3::setFakeBufferWhenPreFinalized() @@ -346,7 +353,7 @@ void WriteBufferFromS3::writeMultipartUpload() void WriteBufferFromS3::createMultipartUpload() { - LOG_TRACE(log, "Create multipart upload. Bucket: {}, Key: {}, Upload id: {}", bucket, key, multipart_upload_id); + LOG_TEST(log, "Create multipart upload. {}", getShortLogDetails()); S3::CreateMultipartUploadRequest req; @@ -378,18 +385,18 @@ void WriteBufferFromS3::createMultipartUpload() } multipart_upload_id = outcome.GetResult().GetUploadId(); - LOG_TRACE(log, "Multipart upload has created. {}", getLogDetails()); + LOG_TRACE(log, "Multipart upload has created. {}", getShortLogDetails()); } void WriteBufferFromS3::abortMultipartUpload() { if (multipart_upload_id.empty()) { - LOG_WARNING(log, "Nothing to abort. {}", getLogDetails()); + LOG_WARNING(log, "Nothing to abort. {}", getVerboseLogDetails()); return; } - LOG_WARNING(log, "Abort multipart upload. {}", getLogDetails()); + LOG_WARNING(log, "Abort multipart upload. {}", getVerboseLogDetails()); S3::AbortMultipartUploadRequest req; req.SetBucket(bucket); @@ -412,13 +419,12 @@ void WriteBufferFromS3::abortMultipartUpload() throw S3Exception(outcome.GetError().GetMessage(), outcome.GetError().GetErrorType()); } - LOG_WARNING(log, "Multipart upload has aborted successfully. {}", getLogDetails()); + LOG_WARNING(log, "Multipart upload has aborted successfully. {}", getVerboseLogDetails()); } S3::UploadPartRequest WriteBufferFromS3::getUploadRequest(size_t part_number, PartData & data) { ProfileEvents::increment(ProfileEvents::WriteBufferFromS3Bytes, data.data_size); - LOG_TRACE(log, "getUploadRequest, size {}, key: {}", data.data_size, key); S3::UploadPartRequest req; @@ -439,13 +445,13 @@ void WriteBufferFromS3::writePart(WriteBufferFromS3::PartData && data) { if (data.data_size == 0) { - LOG_TRACE(log, "Skipping writing part as empty."); + LOG_TEST(log, "Skipping writing part as empty {}", getShortLogDetails()); return; } multipart_tags.push_back({}); size_t part_number = multipart_tags.size(); - LOG_TRACE(log, "writePart {}, part size: {}, part number: {}", getLogDetails(), data.data_size, part_number); + LOG_TEST(log, "writePart {}, part size {}, part number {}", getShortLogDetails(), data.data_size, part_number); if (multipart_upload_id.empty()) throw Exception( @@ -468,11 +474,12 @@ void WriteBufferFromS3::writePart(WriteBufferFromS3::PartData && data) { throw Exception( ErrorCodes::LOGICAL_ERROR, - "Part size exceeded max_upload_part_size, part number: {}, part size {}, max_upload_part_size {}, {}", + "Part size exceeded max_upload_part_size. {}, part number {}, part size {}, max_upload_part_size {}", + getShortLogDetails(), part_number, data.data_size, - upload_settings.max_upload_part_size, - getLogDetails()); + upload_settings.max_upload_part_size + ); } auto req = getUploadRequest(part_number, data); @@ -480,7 +487,10 @@ void WriteBufferFromS3::writePart(WriteBufferFromS3::PartData && data) auto upload_worker = [&, worker_data, part_number] () { - LOG_TEST(log, "Writing part started. bucket {}, key {}, part id {}", bucket, key, part_number); + auto & data_size = std::get<1>(*worker_data).data_size; + + LOG_TEST(log, "Write part started {}, part size {}, part number {}", + getShortLogDetails(), data_size, part_number); ProfileEvents::increment(ProfileEvents::S3UploadPart); if (write_settings.for_object_storage) @@ -506,7 +516,8 @@ void WriteBufferFromS3::writePart(WriteBufferFromS3::PartData && data) multipart_tags[part_number-1] = outcome.GetResult().GetETag(); - LOG_TEST(log, "Writing part finished. bucket {}, key{}, part id {}, etag {}", bucket, key, part_number, multipart_tags[part_number-1]); + LOG_TEST(log, "Write part succeeded {}, part size {}, part number {}, etag {}", + getShortLogDetails(), data_size, part_number, multipart_tags[part_number-1]); }; task_tracker->add(std::move(upload_worker)); @@ -514,7 +525,7 @@ void WriteBufferFromS3::writePart(WriteBufferFromS3::PartData && data) void WriteBufferFromS3::completeMultipartUpload() { - LOG_TRACE(log, "Completing multipart upload. {}, Parts: {}", getLogDetails(), multipart_tags.size()); + LOG_TEST(log, "Completing multipart upload. {}, Parts: {}", getShortLogDetails(), multipart_tags.size()); if (multipart_tags.empty()) throw Exception( @@ -559,7 +570,7 @@ void WriteBufferFromS3::completeMultipartUpload() if (outcome.IsSuccess()) { - LOG_TRACE(log, "Multipart upload has completed. {}, Parts: {}", getLogDetails(), multipart_tags.size()); + LOG_TRACE(log, "Multipart upload has completed. {}, Parts: {}", getShortLogDetails(), multipart_tags.size()); return; } @@ -569,7 +580,7 @@ void WriteBufferFromS3::completeMultipartUpload() { /// For unknown reason, at least MinIO can respond with NO_SUCH_KEY for put requests /// BTW, NO_SUCH_UPLOAD is expected error and we shouldn't retry it - LOG_INFO(log, "Multipart upload failed with NO_SUCH_KEY error, will retry. {}, Parts: {}", getLogDetails(), multipart_tags.size()); + LOG_INFO(log, "Multipart upload failed with NO_SUCH_KEY error, will retry. {}, Parts: {}", getVerboseLogDetails(), multipart_tags.size()); } else { @@ -589,7 +600,6 @@ void WriteBufferFromS3::completeMultipartUpload() S3::PutObjectRequest WriteBufferFromS3::getPutRequest(PartData & data) { ProfileEvents::increment(ProfileEvents::WriteBufferFromS3Bytes, data.data_size); - LOG_TRACE(log, "getPutRequest, size {}, key {}", data.data_size, key); S3::PutObjectRequest req; @@ -612,14 +622,14 @@ S3::PutObjectRequest WriteBufferFromS3::getPutRequest(PartData & data) void WriteBufferFromS3::makeSinglepartUpload(WriteBufferFromS3::PartData && data) { - LOG_TRACE(log, "Making single part upload. {}.", getLogDetails()); + LOG_TEST(log, "Making single part upload. {}, size {}", getShortLogDetails(), data.data_size); auto req = getPutRequest(data); auto worker_data = std::make_shared>(std::move(req), std::move(data)); auto upload_worker = [&, worker_data] () { - LOG_TEST(log, "writing single part upload started. bucket {}, key {}", bucket, key); + LOG_TEST(log, "writing single part upload started. {}", getShortLogDetails()); auto & request = std::get<0>(*worker_data); size_t content_length = request.GetContentLength(); @@ -642,7 +652,7 @@ void WriteBufferFromS3::makeSinglepartUpload(WriteBufferFromS3::PartData && data if (outcome.IsSuccess()) { - LOG_TRACE(log, "Single part upload has completed. bucket {}, key {}, object size {}", bucket, key, content_length); + LOG_TRACE(log, "Single part upload has completed. {}, size {}", getShortLogDetails(), content_length); return; } @@ -653,7 +663,7 @@ void WriteBufferFromS3::makeSinglepartUpload(WriteBufferFromS3::PartData && data { /// For unknown reason, at least MinIO can respond with NO_SUCH_KEY for put requests - LOG_INFO(log, "Single part upload failed with NO_SUCH_KEY error for bucket {}, key {}, object size {}, will retry", bucket, key, content_length); + LOG_INFO(log, "Single part upload failed with NO_SUCH_KEY error. {}, size {}, will retry", getShortLogDetails(), content_length); } else { diff --git a/src/IO/WriteBufferFromS3.h b/src/IO/WriteBufferFromS3.h index f4200b0a6462..48698df2eb50 100644 --- a/src/IO/WriteBufferFromS3.h +++ b/src/IO/WriteBufferFromS3.h @@ -59,7 +59,8 @@ class WriteBufferFromS3 final : public WriteBufferFromFileBase /// Receives response from the server after sending all data. void finalizeImpl() override; - String getLogDetails() const; + String getVerboseLogDetails() const; + String getShortLogDetails() const; struct PartData; void hidePartialData(); diff --git a/src/IO/WriteBufferFromS3TaskTracker.cpp b/src/IO/WriteBufferFromS3TaskTracker.cpp index 2790d71db3d5..f97afe821643 100644 --- a/src/IO/WriteBufferFromS3TaskTracker.cpp +++ b/src/IO/WriteBufferFromS3TaskTracker.cpp @@ -130,8 +130,6 @@ void WriteBufferFromS3::TaskTracker::add(Callback && func) /// this move is nothrow *future_placeholder = scheduler(std::move(func_with_notification), Priority{}); - LOG_TEST(log, "add ended, in queue {}, limit {}", futures.size(), max_tasks_inflight); - waitTilInflightShrink(); } @@ -140,6 +138,9 @@ void WriteBufferFromS3::TaskTracker::waitTilInflightShrink() if (!max_tasks_inflight) return; + if (futures.size() >= max_tasks_inflight) + LOG_TEST(log, "have to wait some tasks finish, in queue {}, limit {}", futures.size(), max_tasks_inflight); + Stopwatch watch; /// Alternative approach is to wait until at least futures.size() - max_tasks_inflight element are finished From 5b6dabdc34e1b6f198a76b489ebf0ff728c4b166 Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Fri, 23 Jun 2023 23:02:30 +0200 Subject: [PATCH 041/179] introduce LogSeriesLimiterPtrt --- src/Common/LoggingFormatStringHelpers.cpp | 101 ++++++++++++++++++++++ src/Common/LoggingFormatStringHelpers.h | 28 ++++++ src/Common/logger_useful.h | 3 + src/IO/WriteBufferFromS3.cpp | 33 +++---- src/IO/WriteBufferFromS3.h | 2 + src/IO/WriteBufferFromS3TaskTracker.cpp | 5 +- src/IO/WriteBufferFromS3TaskTracker.h | 6 +- 7 files changed, 158 insertions(+), 20 deletions(-) diff --git a/src/Common/LoggingFormatStringHelpers.cpp b/src/Common/LoggingFormatStringHelpers.cpp index 85659e45791d..648ec034adfc 100644 --- a/src/Common/LoggingFormatStringHelpers.cpp +++ b/src/Common/LoggingFormatStringHelpers.cpp @@ -1,3 +1,4 @@ +#include #include #include #include @@ -74,3 +75,103 @@ void LogFrequencyLimiterIml::cleanup(time_t too_old_threshold_s) std::erase_if(logged_messages, [old](const auto & elem) { return elem.second.first < old; }); last_cleanup = now; } + + + +std::unordered_map> LogSeriesLimiter::series_settings; +std::unordered_map> LogSeriesLimiter::series_loggers; +std::mutex LogSeriesLimiter::mutex; + +LogSeriesLimiter::LogSeriesLimiter(Poco::Logger * logger_, size_t allowed_count_, time_t interval_s_) + : logger(logger_) +{ + if (allowed_count_ == 0) + { + accepted = false; + return; + } + + if (interval_s_ == 0) + { + accepted = true; + return; + } + + time_t now = time(nullptr); + UInt128 name_hash = sipHash128(logger->name().c_str(), logger->name().size()); + + std::lock_guard lock(mutex); + + if (series_settings.contains(name_hash)) + { + auto & settings = series_settings[name_hash]; + auto & [allowed_count, interval_s] = settings; + chassert(allowed_count_ == allowed_count); + chassert(interval_s_ == interval_s); + } + else + { + series_settings[name_hash] = std::make_tuple(allowed_count_, interval_s_); + } + + auto register_as_first = [&] () TSA_REQUIRES(mutex) + { + assert(allowed_count_ > 0); + accepted = true; + series_loggers[name_hash] = std::make_tuple(now, 1, 1); + }; + + + if (!series_loggers.contains(name_hash)) + { + register_as_first(); + return; + } + + auto & [last_time, accepted_count, total_count] = series_loggers[name_hash]; + if (last_time + interval_s_ <= now) + { + debug_message = fmt::format( + " (LogSeriesLimiter: on interval from {} to {} accepted series {} / {} for the logger {} : {})", + DateLUT::instance().timeToString(last_time), + DateLUT::instance().timeToString(now), + accepted_count, + total_count, + logger->name(), + double(name_hash)); + + register_as_first(); + return; + } + + if (accepted_count < allowed_count_) + { + accepted = true; + ++accepted_count; + } + ++total_count; +} + +void LogSeriesLimiter::log(Poco::Message & message) +{ + std::string_view pattern = message.getFormatString(); + if (pattern.empty()) + { + /// Do not filter messages without a format string + if (auto * channel = logger->getChannel()) + channel->log(message); + return; + } + + if (!accepted) + return; + + if (!debug_message.empty()) + { + message.appendText(debug_message); + debug_message.clear(); + } + + if (auto * channel = logger->getChannel()) + channel->log(message); +} diff --git a/src/Common/LoggingFormatStringHelpers.h b/src/Common/LoggingFormatStringHelpers.h index b29510a2c933..5dece8cd6ea2 100644 --- a/src/Common/LoggingFormatStringHelpers.h +++ b/src/Common/LoggingFormatStringHelpers.h @@ -191,6 +191,34 @@ class LogFrequencyLimiterIml Poco::Logger * getLogger() { return logger; } }; +/// This wrapper helps to avoid too noisy log messages from similar objects. +/// For the value logger_name it remembers when such a message was logged the last time. +class LogSeriesLimiter +{ + static std::mutex mutex; + + /// Hash(logger_name) -> (allowed_count, interval_s) + static std::unordered_map> series_settings TSA_GUARDED_BY(mutex); + + /// Hash(logger_name) -> (last_logged_time_s, accepted, muted) + static std::unordered_map> series_loggers TSA_GUARDED_BY(mutex); + + Poco::Logger * logger = nullptr; + bool accepted = false; + String debug_message; +public: + LogSeriesLimiter(Poco::Logger * logger_, size_t allowed_count_, time_t interval_s_); + + LogSeriesLimiter & operator -> () { return *this; } + bool is(Poco::Message::Priority priority) { return logger->is(priority); } + LogSeriesLimiter * getChannel() {return this; } + const String & name() const { return logger->name(); } + + void log(Poco::Message & message); + + Poco::Logger * getLogger() { return logger; } +}; + /// This wrapper is useful to save formatted message into a String before sending it to a logger class LogToStrImpl { diff --git a/src/Common/logger_useful.h b/src/Common/logger_useful.h index 3ac950cbdfbd..3ebb1d25075c 100644 --- a/src/Common/logger_useful.h +++ b/src/Common/logger_useful.h @@ -15,12 +15,15 @@ namespace Poco { class Logger; } #define LogToStr(x, y) std::make_unique(x, y) #define LogFrequencyLimiter(x, y) std::make_unique(x, y) +using LogSeriesLimiterPtr = std::shared_ptr; + namespace { [[maybe_unused]] const ::Poco::Logger * getLogger(const ::Poco::Logger * logger) { return logger; } [[maybe_unused]] const ::Poco::Logger * getLogger(const std::atomic<::Poco::Logger *> & logger) { return logger.load(); } [[maybe_unused]] std::unique_ptr getLogger(std::unique_ptr && logger) { return logger; } [[maybe_unused]] std::unique_ptr getLogger(std::unique_ptr && logger) { return logger; } + [[maybe_unused]] LogSeriesLimiterPtr getLogger(LogSeriesLimiterPtr & logger) { return logger; } } #define LOG_IMPL_FIRST_ARG(X, ...) X diff --git a/src/IO/WriteBufferFromS3.cpp b/src/IO/WriteBufferFromS3.cpp index ff6da9bf4445..8714282f7a8f 100644 --- a/src/IO/WriteBufferFromS3.cpp +++ b/src/IO/WriteBufferFromS3.cpp @@ -96,9 +96,10 @@ WriteBufferFromS3::WriteBufferFromS3( , task_tracker( std::make_unique( std::move(schedule_), - upload_settings.max_inflight_parts_for_one_file)) + upload_settings.max_inflight_parts_for_one_file, + limitedLog)) { - LOG_TRACE(log, "Create WriteBufferFromS3, {}", getShortLogDetails()); + LOG_TRACE(limitedLog, "Create WriteBufferFromS3, {}", getShortLogDetails()); allocateBuffer(); } @@ -136,7 +137,7 @@ void WriteBufferFromS3::preFinalize() if (is_prefinalized) return; - LOG_TEST(log, "preFinalize WriteBufferFromS3. {}", getShortLogDetails()); + LOG_TEST(limitedLog, "preFinalize WriteBufferFromS3. {}", getShortLogDetails()); /// This function should not be run again if an exception has occurred is_prefinalized = true; @@ -175,7 +176,7 @@ void WriteBufferFromS3::preFinalize() void WriteBufferFromS3::finalizeImpl() { - LOG_TRACE(log, "finalizeImpl WriteBufferFromS3. {}.", getShortLogDetails()); + LOG_TRACE(limitedLog, "finalizeImpl WriteBufferFromS3. {}.", getShortLogDetails()); if (!is_prefinalized) preFinalize(); @@ -242,7 +243,7 @@ void WriteBufferFromS3::tryToAbortMultipartUpload() WriteBufferFromS3::~WriteBufferFromS3() { - LOG_TRACE(log, "Close WriteBufferFromS3. {}.", getShortLogDetails()); + LOG_TRACE(limitedLog, "Close WriteBufferFromS3. {}.", getShortLogDetails()); /// That destructor could be call with finalized=false in case of exceptions if (!finalized) @@ -353,7 +354,7 @@ void WriteBufferFromS3::writeMultipartUpload() void WriteBufferFromS3::createMultipartUpload() { - LOG_TEST(log, "Create multipart upload. {}", getShortLogDetails()); + LOG_TEST(limitedLog, "Create multipart upload. {}", getShortLogDetails()); S3::CreateMultipartUploadRequest req; @@ -385,7 +386,7 @@ void WriteBufferFromS3::createMultipartUpload() } multipart_upload_id = outcome.GetResult().GetUploadId(); - LOG_TRACE(log, "Multipart upload has created. {}", getShortLogDetails()); + LOG_TRACE(limitedLog, "Multipart upload has created. {}", getShortLogDetails()); } void WriteBufferFromS3::abortMultipartUpload() @@ -445,13 +446,13 @@ void WriteBufferFromS3::writePart(WriteBufferFromS3::PartData && data) { if (data.data_size == 0) { - LOG_TEST(log, "Skipping writing part as empty {}", getShortLogDetails()); + LOG_TEST(limitedLog, "Skipping writing part as empty {}", getShortLogDetails()); return; } multipart_tags.push_back({}); size_t part_number = multipart_tags.size(); - LOG_TEST(log, "writePart {}, part size {}, part number {}", getShortLogDetails(), data.data_size, part_number); + LOG_TEST(limitedLog, "writePart {}, part size {}, part number {}", getShortLogDetails(), data.data_size, part_number); if (multipart_upload_id.empty()) throw Exception( @@ -489,7 +490,7 @@ void WriteBufferFromS3::writePart(WriteBufferFromS3::PartData && data) { auto & data_size = std::get<1>(*worker_data).data_size; - LOG_TEST(log, "Write part started {}, part size {}, part number {}", + LOG_TEST(limitedLog, "Write part started {}, part size {}, part number {}", getShortLogDetails(), data_size, part_number); ProfileEvents::increment(ProfileEvents::S3UploadPart); @@ -516,7 +517,7 @@ void WriteBufferFromS3::writePart(WriteBufferFromS3::PartData && data) multipart_tags[part_number-1] = outcome.GetResult().GetETag(); - LOG_TEST(log, "Write part succeeded {}, part size {}, part number {}, etag {}", + LOG_TEST(limitedLog, "Write part succeeded {}, part size {}, part number {}, etag {}", getShortLogDetails(), data_size, part_number, multipart_tags[part_number-1]); }; @@ -525,7 +526,7 @@ void WriteBufferFromS3::writePart(WriteBufferFromS3::PartData && data) void WriteBufferFromS3::completeMultipartUpload() { - LOG_TEST(log, "Completing multipart upload. {}, Parts: {}", getShortLogDetails(), multipart_tags.size()); + LOG_TEST(limitedLog, "Completing multipart upload. {}, Parts: {}", getShortLogDetails(), multipart_tags.size()); if (multipart_tags.empty()) throw Exception( @@ -570,7 +571,7 @@ void WriteBufferFromS3::completeMultipartUpload() if (outcome.IsSuccess()) { - LOG_TRACE(log, "Multipart upload has completed. {}, Parts: {}", getShortLogDetails(), multipart_tags.size()); + LOG_TRACE(limitedLog, "Multipart upload has completed. {}, Parts: {}", getShortLogDetails(), multipart_tags.size()); return; } @@ -622,14 +623,14 @@ S3::PutObjectRequest WriteBufferFromS3::getPutRequest(PartData & data) void WriteBufferFromS3::makeSinglepartUpload(WriteBufferFromS3::PartData && data) { - LOG_TEST(log, "Making single part upload. {}, size {}", getShortLogDetails(), data.data_size); + LOG_TEST(limitedLog, "Making single part upload. {}, size {}", getShortLogDetails(), data.data_size); auto req = getPutRequest(data); auto worker_data = std::make_shared>(std::move(req), std::move(data)); auto upload_worker = [&, worker_data] () { - LOG_TEST(log, "writing single part upload started. {}", getShortLogDetails()); + LOG_TEST(limitedLog, "writing single part upload started. {}", getShortLogDetails()); auto & request = std::get<0>(*worker_data); size_t content_length = request.GetContentLength(); @@ -652,7 +653,7 @@ void WriteBufferFromS3::makeSinglepartUpload(WriteBufferFromS3::PartData && data if (outcome.IsSuccess()) { - LOG_TRACE(log, "Single part upload has completed. {}, size {}", getShortLogDetails(), content_length); + LOG_TRACE(limitedLog, "Single part upload has completed. {}, size {}", getShortLogDetails(), content_length); return; } diff --git a/src/IO/WriteBufferFromS3.h b/src/IO/WriteBufferFromS3.h index 48698df2eb50..590342cc997f 100644 --- a/src/IO/WriteBufferFromS3.h +++ b/src/IO/WriteBufferFromS3.h @@ -5,6 +5,7 @@ #if USE_AWS_S3 #include +#include #include #include #include @@ -89,6 +90,7 @@ class WriteBufferFromS3 final : public WriteBufferFromFileBase const std::shared_ptr client_ptr; const std::optional> object_metadata; Poco::Logger * log = &Poco::Logger::get("WriteBufferFromS3"); + LogSeriesLimiterPtr limitedLog = std::make_shared(log, 1, 5); IBufferAllocationPolicyPtr buffer_allocation_policy; diff --git a/src/IO/WriteBufferFromS3TaskTracker.cpp b/src/IO/WriteBufferFromS3TaskTracker.cpp index f97afe821643..bce122dd6c8c 100644 --- a/src/IO/WriteBufferFromS3TaskTracker.cpp +++ b/src/IO/WriteBufferFromS3TaskTracker.cpp @@ -12,10 +12,11 @@ namespace ProfileEvents namespace DB { -WriteBufferFromS3::TaskTracker::TaskTracker(ThreadPoolCallbackRunner scheduler_, size_t max_tasks_inflight_) +WriteBufferFromS3::TaskTracker::TaskTracker(ThreadPoolCallbackRunner scheduler_, size_t max_tasks_inflight_, LogSeriesLimiterPtr limitedLog_) : is_async(bool(scheduler_)) , scheduler(scheduler_ ? std::move(scheduler_) : syncRunner()) , max_tasks_inflight(max_tasks_inflight_) + , limitedLog(limitedLog_) {} WriteBufferFromS3::TaskTracker::~TaskTracker() @@ -139,7 +140,7 @@ void WriteBufferFromS3::TaskTracker::waitTilInflightShrink() return; if (futures.size() >= max_tasks_inflight) - LOG_TEST(log, "have to wait some tasks finish, in queue {}, limit {}", futures.size(), max_tasks_inflight); + LOG_TEST(limitedLog, "have to wait some tasks finish, in queue {}, limit {}", futures.size(), max_tasks_inflight); Stopwatch watch; diff --git a/src/IO/WriteBufferFromS3TaskTracker.h b/src/IO/WriteBufferFromS3TaskTracker.h index c3f4628b946f..815e041ae52b 100644 --- a/src/IO/WriteBufferFromS3TaskTracker.h +++ b/src/IO/WriteBufferFromS3TaskTracker.h @@ -6,6 +6,8 @@ #include "WriteBufferFromS3.h" +#include + #include namespace DB @@ -25,7 +27,7 @@ class WriteBufferFromS3::TaskTracker public: using Callback = std::function; - TaskTracker(ThreadPoolCallbackRunner scheduler_, size_t max_tasks_inflight_); + TaskTracker(ThreadPoolCallbackRunner scheduler_, size_t max_tasks_inflight_, LogSeriesLimiterPtr limitedLog_); ~TaskTracker(); static ThreadPoolCallbackRunner syncRunner(); @@ -57,7 +59,7 @@ class WriteBufferFromS3::TaskTracker using FutureList = std::list>; FutureList futures; - Poco::Logger * log = &Poco::Logger::get("TaskTracker"); + LogSeriesLimiterPtr limitedLog; std::mutex mutex; std::condition_variable has_finished TSA_GUARDED_BY(mutex); From 80aa8863e5330bf5774e569d0f92307bf98bad52 Mon Sep 17 00:00:00 2001 From: Sema Checherinda <104093494+CheSema@users.noreply.github.com> Date: Sat, 24 Jun 2023 00:12:30 +0200 Subject: [PATCH 042/179] Update LoggingFormatStringHelpers.cpp --- src/Common/LoggingFormatStringHelpers.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Common/LoggingFormatStringHelpers.cpp b/src/Common/LoggingFormatStringHelpers.cpp index 648ec034adfc..ed578018d5f9 100644 --- a/src/Common/LoggingFormatStringHelpers.cpp +++ b/src/Common/LoggingFormatStringHelpers.cpp @@ -77,7 +77,6 @@ void LogFrequencyLimiterIml::cleanup(time_t too_old_threshold_s) } - std::unordered_map> LogSeriesLimiter::series_settings; std::unordered_map> LogSeriesLimiter::series_loggers; std::mutex LogSeriesLimiter::mutex; From b8ede5262a5df9b4db2c25ecebc7818f763f9e9c Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 24 Jun 2023 07:51:49 +0200 Subject: [PATCH 043/179] Add a test for #42691 --- .../0_stateless/02802_with_cube_with_totals.reference | 8 ++++++++ tests/queries/0_stateless/02802_with_cube_with_totals.sql | 2 ++ 2 files changed, 10 insertions(+) create mode 100644 tests/queries/0_stateless/02802_with_cube_with_totals.reference create mode 100644 tests/queries/0_stateless/02802_with_cube_with_totals.sql diff --git a/tests/queries/0_stateless/02802_with_cube_with_totals.reference b/tests/queries/0_stateless/02802_with_cube_with_totals.reference new file mode 100644 index 000000000000..c7b7b5704560 --- /dev/null +++ b/tests/queries/0_stateless/02802_with_cube_with_totals.reference @@ -0,0 +1,8 @@ +((2147483648,(-0,1.1754943508222875e-38,2147483646,'-9223372036854775808',NULL))) 0 +((2147483648,(-0,1.1754943508222875e-38,2147483646,'-9223372036854775808',NULL))) 0 + +((2147483648,(-0,1.1754943508222875e-38,2147483646,'-9223372036854775808',NULL))) 0 +\N +\N + +\N diff --git a/tests/queries/0_stateless/02802_with_cube_with_totals.sql b/tests/queries/0_stateless/02802_with_cube_with_totals.sql new file mode 100644 index 000000000000..77adb68eb4b7 --- /dev/null +++ b/tests/queries/0_stateless/02802_with_cube_with_totals.sql @@ -0,0 +1,2 @@ +SELECT tuple((2147483648, (-0., 1.1754943508222875e-38, 2147483646, '-9223372036854775808', NULL))), toInt128(0.0001) GROUP BY ((256, toInt64(1.1754943508222875e-38), NULL), NULL, -0., ((65535, '-92233720368547758.07'), 0.9999), tuple(((1., 3.4028234663852886e38, '1', 0.5), NULL, tuple('0.1')))) WITH CUBE WITH TOTALS; +SELECT NULL GROUP BY toUUID(NULL, '0', NULL, '0.0000065535'), 1 WITH CUBE WITH TOTALS; From f0aee54dab32b1db35171c526525354a7701e21a Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 24 Jun 2023 07:57:40 +0200 Subject: [PATCH 044/179] Add a test for #32474 --- tests/queries/0_stateless/02804_intersect_bad_cast.reference | 0 tests/queries/0_stateless/02804_intersect_bad_cast.sql | 1 + 2 files changed, 1 insertion(+) create mode 100644 tests/queries/0_stateless/02804_intersect_bad_cast.reference create mode 100644 tests/queries/0_stateless/02804_intersect_bad_cast.sql diff --git a/tests/queries/0_stateless/02804_intersect_bad_cast.reference b/tests/queries/0_stateless/02804_intersect_bad_cast.reference new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/queries/0_stateless/02804_intersect_bad_cast.sql b/tests/queries/0_stateless/02804_intersect_bad_cast.sql new file mode 100644 index 000000000000..c7eb8fdd3bce --- /dev/null +++ b/tests/queries/0_stateless/02804_intersect_bad_cast.sql @@ -0,0 +1 @@ +SELECT 2., * FROM (SELECT 1024, 256 INTERSECT SELECT 100 AND inf, 256); From fa6df80aa204ec4e9f2d872eba0a2c7baee2cce4 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 24 Jun 2023 08:05:45 +0200 Subject: [PATCH 045/179] Add a test for #35801 --- .../02807_default_date_time_nullable.reference | 2 ++ .../02807_default_date_time_nullable.sql | 18 ++++++++++++++++++ 2 files changed, 20 insertions(+) create mode 100644 tests/queries/0_stateless/02807_default_date_time_nullable.reference create mode 100644 tests/queries/0_stateless/02807_default_date_time_nullable.sql diff --git a/tests/queries/0_stateless/02807_default_date_time_nullable.reference b/tests/queries/0_stateless/02807_default_date_time_nullable.reference new file mode 100644 index 000000000000..d103460bff74 --- /dev/null +++ b/tests/queries/0_stateless/02807_default_date_time_nullable.reference @@ -0,0 +1,2 @@ +1 1977-01-01 00:00:00 +1 1977-01-01 00:00:00 diff --git a/tests/queries/0_stateless/02807_default_date_time_nullable.sql b/tests/queries/0_stateless/02807_default_date_time_nullable.sql new file mode 100644 index 000000000000..9152f1987876 --- /dev/null +++ b/tests/queries/0_stateless/02807_default_date_time_nullable.sql @@ -0,0 +1,18 @@ +create temporary table test ( + data int, + default Nullable(DateTime) DEFAULT '1977-01-01 00:00:00' +) engine = Memory(); + +insert into test (data) select 1; + +select * from test; + +drop temporary table test; + +create temporary table test ( + data int, + default DateTime DEFAULT '1977-01-01 00:00:00' +) engine = Memory(); +insert into test (data) select 1; + +select * from test; From 0c7a4142e40b186da12c3ac3f0664cb3a94e979f Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sat, 24 Jun 2023 20:57:39 +0200 Subject: [PATCH 046/179] Use separate default settings for clickhouse-local There are already two of them: - storage_file_read_method can use mmap method for clickhouse-local - there is no sense in disabling allow_introspection_functions for clickhouse-local since it can hurt only itself And likely there will be more, once the infrastructure will be there. Signed-off-by: Azat Khuzhin --- src/Core/Settings.h | 2 +- src/Core/SettingsOverridesLocal.cpp | 13 +++++++++++++ src/Core/SettingsOverridesLocal.h | 11 +++++++++++ src/Interpreters/Context.cpp | 3 +++ ...2800_clickhouse_local_default_settings.reference | 2 ++ .../02800_clickhouse_local_default_settings.sh | 8 ++++++++ 6 files changed, 38 insertions(+), 1 deletion(-) create mode 100644 src/Core/SettingsOverridesLocal.cpp create mode 100644 src/Core/SettingsOverridesLocal.h create mode 100644 tests/queries/0_stateless/02800_clickhouse_local_default_settings.reference create mode 100755 tests/queries/0_stateless/02800_clickhouse_local_default_settings.sh diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 3d42bd582ed4..c51076f32375 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -657,7 +657,7 @@ class IColumn; M(UInt64, function_range_max_elements_in_block, 500000000, "Maximum number of values generated by function 'range' per block of data (sum of array sizes for every row in a block, see also 'max_block_size' and 'min_insert_block_size_rows'). It is a safety threshold.", 0) \ M(ShortCircuitFunctionEvaluation, short_circuit_function_evaluation, ShortCircuitFunctionEvaluation::ENABLE, "Setting for short-circuit function evaluation configuration. Possible values: 'enable' - use short-circuit function evaluation for functions that are suitable for it, 'disable' - disable short-circuit function evaluation, 'force_enable' - use short-circuit function evaluation for all functions.", 0) \ \ - M(LocalFSReadMethod, storage_file_read_method, LocalFSReadMethod::mmap, "Method of reading data from storage file, one of: read, pread, mmap. The mmap method does not apply to clickhouse-server (it's intended for clickhouse-local).", 0) \ + M(LocalFSReadMethod, storage_file_read_method, LocalFSReadMethod::pread, "Method of reading data from storage file, one of: read, pread, mmap. The mmap method does not apply to clickhouse-server (it's intended for clickhouse-local).", 0) \ M(String, local_filesystem_read_method, "pread_threadpool", "Method of reading data from local filesystem, one of: read, pread, mmap, io_uring, pread_threadpool. The 'io_uring' method is experimental and does not work for Log, TinyLog, StripeLog, File, Set and Join, and other tables with append-able files in presence of concurrent reads and writes.", 0) \ M(String, remote_filesystem_read_method, "threadpool", "Method of reading data from remote filesystem, one of: read, threadpool.", 0) \ M(Bool, local_filesystem_read_prefetch, false, "Should use prefetching when reading data from local filesystem.", 0) \ diff --git a/src/Core/SettingsOverridesLocal.cpp b/src/Core/SettingsOverridesLocal.cpp new file mode 100644 index 000000000000..2beb560ece29 --- /dev/null +++ b/src/Core/SettingsOverridesLocal.cpp @@ -0,0 +1,13 @@ +#include +#include + +namespace DB +{ + +void applySettingsOverridesForLocal(Settings & settings) +{ + settings.allow_introspection_functions = true; + settings.storage_file_read_method = LocalFSReadMethod::mmap; +} + +} diff --git a/src/Core/SettingsOverridesLocal.h b/src/Core/SettingsOverridesLocal.h new file mode 100644 index 000000000000..89b79f4ad55a --- /dev/null +++ b/src/Core/SettingsOverridesLocal.h @@ -0,0 +1,11 @@ +#pragma once + +namespace DB +{ + +struct Settings; + +/// Update some settings defaults for clickhouse-local +void applySettingsOverridesForLocal(Settings & settings); + +} diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index 106264320b2f..dccdf4efca00 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -48,6 +48,7 @@ #include #include #include +#include #include #include #include @@ -3646,6 +3647,8 @@ void Context::setDefaultProfiles(const Poco::Util::AbstractConfiguration & confi setCurrentProfile(shared->system_profile_name); applySettingsQuirks(settings, &Poco::Logger::get("SettingsQuirks")); + if (shared->application_type == ApplicationType::LOCAL) + applySettingsOverridesForLocal(settings); shared->buffer_profile_name = config.getString("buffer_profile", shared->system_profile_name); buffer_context = Context::createCopy(shared_from_this()); diff --git a/tests/queries/0_stateless/02800_clickhouse_local_default_settings.reference b/tests/queries/0_stateless/02800_clickhouse_local_default_settings.reference new file mode 100644 index 000000000000..0f18d1a3897d --- /dev/null +++ b/tests/queries/0_stateless/02800_clickhouse_local_default_settings.reference @@ -0,0 +1,2 @@ +allow_introspection_functions 1 +storage_file_read_method mmap diff --git a/tests/queries/0_stateless/02800_clickhouse_local_default_settings.sh b/tests/queries/0_stateless/02800_clickhouse_local_default_settings.sh new file mode 100755 index 000000000000..792e187fc512 --- /dev/null +++ b/tests/queries/0_stateless/02800_clickhouse_local_default_settings.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash +# Tags: no-random-settings, no-random-merge-tree-settings + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +$CLICKHOUSE_LOCAL -q "select name, value from system.settings where changed" From a7b14f87e0b43f02fac2cd216e906b045dbbfa42 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sat, 24 Jun 2023 21:14:28 +0200 Subject: [PATCH 047/179] Throw an error instead of silenty ignore storage_file_read_method=mmap in server Signed-off-by: Azat Khuzhin --- src/Storages/StorageFile.cpp | 8 ++++---- .../0_stateless/02497_storage_file_reader_selection.sh | 2 ++ 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/src/Storages/StorageFile.cpp b/src/Storages/StorageFile.cpp index ff67272e542c..5301b159f966 100644 --- a/src/Storages/StorageFile.cpp +++ b/src/Storages/StorageFile.cpp @@ -205,7 +205,7 @@ std::unique_ptr selectReadBuffer( { auto read_method = context->getSettingsRef().storage_file_read_method; - /** But using mmap on server-side is unsafe for the following reasons: + /** Using mmap on server-side is unsafe for the following reasons: * - concurrent modifications of a file will result in SIGBUS; * - IO error from the device will result in SIGBUS; * - recovery from this signal is not feasible even with the usage of siglongjmp, @@ -214,10 +214,10 @@ std::unique_ptr selectReadBuffer( * * But we keep this mode for clickhouse-local as it is not so bad for a command line tool. */ + if (context->getApplicationType() == Context::ApplicationType::SERVER && read_method == LocalFSReadMethod::mmap) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Using storage_file_read_method=mmap is not safe in server mode. Consider using pread."); - if (S_ISREG(file_stat.st_mode) - && context->getApplicationType() != Context::ApplicationType::SERVER - && read_method == LocalFSReadMethod::mmap) + if (S_ISREG(file_stat.st_mode) && read_method == LocalFSReadMethod::mmap) { try { diff --git a/tests/queries/0_stateless/02497_storage_file_reader_selection.sh b/tests/queries/0_stateless/02497_storage_file_reader_selection.sh index 20bde68718de..25387e61db60 100755 --- a/tests/queries/0_stateless/02497_storage_file_reader_selection.sh +++ b/tests/queries/0_stateless/02497_storage_file_reader_selection.sh @@ -13,4 +13,6 @@ $CLICKHOUSE_LOCAL --storage_file_read_method=mmap --print-profile-events -q "SEL $CLICKHOUSE_LOCAL --storage_file_read_method=pread --print-profile-events -q "SELECT * FROM file($DATA_FILE) FORMAT Null" 2>&1 | grep -F -c "CreatedReadBufferMMap" $CLICKHOUSE_LOCAL --storage_file_read_method=pread --print-profile-events -q "SELECT * FROM file($DATA_FILE) FORMAT Null" 2>&1 | grep -F -c "CreatedReadBufferOrdinary" +$CLICKHOUSE_CLIENT --storage_file_read_method=mmap -nq "SELECT * FROM file('/dev/null', 'LineAsString') FORMAT Null -- { serverError BAD_ARGUMENTS }" + rm $DATA_FILE From 59f11863d7776134c383b168a1ec7ff2acc8bc16 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sat, 24 Jun 2023 21:41:33 +0200 Subject: [PATCH 048/179] Simplify settings overrides or clickhouse-local Signed-off-by: Azat Khuzhin --- programs/local/LocalServer.cpp | 16 +++++++++++++++- src/Core/SettingsOverridesLocal.cpp | 13 ------------- src/Core/SettingsOverridesLocal.h | 11 ----------- src/Interpreters/Context.cpp | 3 --- 4 files changed, 15 insertions(+), 28 deletions(-) delete mode 100644 src/Core/SettingsOverridesLocal.cpp delete mode 100644 src/Core/SettingsOverridesLocal.h diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index caca7cfb50d2..033d2b91ec64 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -71,6 +71,15 @@ namespace ErrorCodes extern const int FILE_ALREADY_EXISTS; } +void applySettingsOverridesForLocal(ContextMutablePtr context) +{ + Settings settings = context->getSettings(); + + settings.allow_introspection_functions = true; + settings.storage_file_read_method = LocalFSReadMethod::mmap; + + context->setSettings(settings); +} void LocalServer::processError(const String &) const { @@ -657,6 +666,12 @@ void LocalServer::processConfig() CompiledExpressionCacheFactory::instance().init(compiled_expression_cache_size, compiled_expression_cache_elements_size); #endif + /// NOTE: it is important to apply any overrides before + /// setDefaultProfiles() calls since it will copy current context (i.e. + /// there is separate context for Buffer tables). + applySettingsOverridesForLocal(global_context); + applyCmdOptions(global_context); + /// Load global settings from default_profile and system_profile. global_context->setDefaultProfiles(config()); @@ -671,7 +686,6 @@ void LocalServer::processConfig() std::string default_database = config().getString("default_database", "_local"); DatabaseCatalog::instance().attachDatabase(default_database, std::make_shared(default_database, global_context)); global_context->setCurrentDatabase(default_database); - applyCmdOptions(global_context); if (config().has("path")) { diff --git a/src/Core/SettingsOverridesLocal.cpp b/src/Core/SettingsOverridesLocal.cpp deleted file mode 100644 index 2beb560ece29..000000000000 --- a/src/Core/SettingsOverridesLocal.cpp +++ /dev/null @@ -1,13 +0,0 @@ -#include -#include - -namespace DB -{ - -void applySettingsOverridesForLocal(Settings & settings) -{ - settings.allow_introspection_functions = true; - settings.storage_file_read_method = LocalFSReadMethod::mmap; -} - -} diff --git a/src/Core/SettingsOverridesLocal.h b/src/Core/SettingsOverridesLocal.h deleted file mode 100644 index 89b79f4ad55a..000000000000 --- a/src/Core/SettingsOverridesLocal.h +++ /dev/null @@ -1,11 +0,0 @@ -#pragma once - -namespace DB -{ - -struct Settings; - -/// Update some settings defaults for clickhouse-local -void applySettingsOverridesForLocal(Settings & settings); - -} diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index dccdf4efca00..106264320b2f 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -48,7 +48,6 @@ #include #include #include -#include #include #include #include @@ -3647,8 +3646,6 @@ void Context::setDefaultProfiles(const Poco::Util::AbstractConfiguration & confi setCurrentProfile(shared->system_profile_name); applySettingsQuirks(settings, &Poco::Logger::get("SettingsQuirks")); - if (shared->application_type == ApplicationType::LOCAL) - applySettingsOverridesForLocal(settings); shared->buffer_profile_name = config.getString("buffer_profile", shared->system_profile_name); buffer_context = Context::createCopy(shared_from_this()); From 940cf69ce436107415c3990088738b83dfb201c7 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 25 Jun 2023 07:30:32 +0200 Subject: [PATCH 049/179] Add a test for #43358 --- tests/queries/0_stateless/02809_has_token.reference | 1 + tests/queries/0_stateless/02809_has_token.sql | 3 +++ 2 files changed, 4 insertions(+) create mode 100644 tests/queries/0_stateless/02809_has_token.reference create mode 100644 tests/queries/0_stateless/02809_has_token.sql diff --git a/tests/queries/0_stateless/02809_has_token.reference b/tests/queries/0_stateless/02809_has_token.reference new file mode 100644 index 000000000000..573541ac9702 --- /dev/null +++ b/tests/queries/0_stateless/02809_has_token.reference @@ -0,0 +1 @@ +0 diff --git a/tests/queries/0_stateless/02809_has_token.sql b/tests/queries/0_stateless/02809_has_token.sql new file mode 100644 index 000000000000..08edf3756d1f --- /dev/null +++ b/tests/queries/0_stateless/02809_has_token.sql @@ -0,0 +1,3 @@ +-- in old versions of ClickHouse, the following query returned a wrong result: + +SELECT hasToken('quotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotquota', 'quota') AS r; From 79a03432bf688c9f6f29554f7b9548e2b36b2178 Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Sun, 25 Jun 2023 13:27:07 +0200 Subject: [PATCH 050/179] add test, add comment --- src/Common/LoggingFormatStringHelpers.h | 5 ++++- src/Daemon/BaseDaemon.cpp | 2 ++ src/IO/tests/gtest_writebuffer_s3.cpp | 28 +++++++++++++++++++++++++ 3 files changed, 34 insertions(+), 1 deletion(-) diff --git a/src/Common/LoggingFormatStringHelpers.h b/src/Common/LoggingFormatStringHelpers.h index 5dece8cd6ea2..82c260e52a68 100644 --- a/src/Common/LoggingFormatStringHelpers.h +++ b/src/Common/LoggingFormatStringHelpers.h @@ -192,7 +192,10 @@ class LogFrequencyLimiterIml }; /// This wrapper helps to avoid too noisy log messages from similar objects. -/// For the value logger_name it remembers when such a message was logged the last time. +/// Once an instance of LogSeriesLimiter type is created the decision is done +/// All followed message which use this instance is either printed or muted all together. +/// LogSeriesLimiter differs from LogFrequencyLimiterIml in a way that +/// LogSeriesLimiter is useful for accept or mute series of logs when LogFrequencyLimiterIml works for each line independently. class LogSeriesLimiter { static std::mutex mutex; diff --git a/src/Daemon/BaseDaemon.cpp b/src/Daemon/BaseDaemon.cpp index bfd5568b71d9..6a6175b802f2 100644 --- a/src/Daemon/BaseDaemon.cpp +++ b/src/Daemon/BaseDaemon.cpp @@ -417,6 +417,8 @@ class SignalListener : public Poco::Runnable { SentryWriter::onFault(sig, error_message, stack_trace); +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wunreachable-code" /// Advice the user to send it manually. if constexpr (std::string_view(VERSION_OFFICIAL).contains("official build")) { diff --git a/src/IO/tests/gtest_writebuffer_s3.cpp b/src/IO/tests/gtest_writebuffer_s3.cpp index cd38291fb318..a4433fee60ec 100644 --- a/src/IO/tests/gtest_writebuffer_s3.cpp +++ b/src/IO/tests/gtest_writebuffer_s3.cpp @@ -1119,4 +1119,32 @@ TEST_P(SyncAsync, IncreaseLimited) { } } +TEST_P(SyncAsync, StrictUploadPartSize) { + getSettings().s3_check_objects_after_upload = false; + + { + getSettings().s3_max_single_part_upload_size = 10; + getSettings().s3_strict_upload_part_size = 11; + + { + auto counters = MockS3::EventCounts{.multiUploadCreate = 1, .multiUploadComplete = 1, .uploadParts = 6}; + runSimpleScenario(counters, 66); + + auto actual_parts_sizes = MockS3::BucketMemStore::GetPartSizes(getCompletedPartUploads().back().second); + ASSERT_THAT(actual_parts_sizes, testing::ElementsAre(11, 11, 11, 11, 11, 11)); + + // parts: 11 22 33 44 55 66 + // size: 11 11 11 11 11 11 + } + + { + auto counters = MockS3::EventCounts{.multiUploadCreate = 1, .multiUploadComplete = 1, .uploadParts = 7}; + runSimpleScenario(counters, 67); + + auto actual_parts_sizes = MockS3::BucketMemStore::GetPartSizes(getCompletedPartUploads().back().second); + ASSERT_THAT(actual_parts_sizes, testing::ElementsAre(11, 11, 11, 11, 11, 11, 1)); + } + } +} + #endif From f13752a2805baf77a00d1ad0f50094e553a27f17 Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Sun, 25 Jun 2023 13:29:41 +0200 Subject: [PATCH 051/179] delete 02720_s3_strict_upload_part_size --- ...02720_s3_strict_upload_part_size.reference | 4 --- .../02720_s3_strict_upload_part_size.sh | 25 ------------------- 2 files changed, 29 deletions(-) delete mode 100644 tests/queries/0_stateless/02720_s3_strict_upload_part_size.reference delete mode 100755 tests/queries/0_stateless/02720_s3_strict_upload_part_size.sh diff --git a/tests/queries/0_stateless/02720_s3_strict_upload_part_size.reference b/tests/queries/0_stateless/02720_s3_strict_upload_part_size.reference deleted file mode 100644 index f7c4ece5f1f6..000000000000 --- a/tests/queries/0_stateless/02720_s3_strict_upload_part_size.reference +++ /dev/null @@ -1,4 +0,0 @@ -part size: 6000001, part number: 1 -part size: 6000001, part number: 2 -part size: 6000001, part number: 3 -part size: 2971517, part number: 4 diff --git a/tests/queries/0_stateless/02720_s3_strict_upload_part_size.sh b/tests/queries/0_stateless/02720_s3_strict_upload_part_size.sh deleted file mode 100755 index 9799ef0478a1..000000000000 --- a/tests/queries/0_stateless/02720_s3_strict_upload_part_size.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env bash -# Tags: no-fasttest, long -# Tag no-fasttest: requires S3 - -CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# shellcheck source=../shell_config.sh -. "$CUR_DIR"/../shell_config.sh - -in="$CUR_DIR/$CLICKHOUSE_TEST_UNIQUE_NAME.in" -out="$CUR_DIR/$CLICKHOUSE_TEST_UNIQUE_NAME.out" -log="$CUR_DIR/$CLICKHOUSE_TEST_UNIQUE_NAME.log" - -set -e -trap 'rm -f "${out:?}" "${in:?}" "${log:?}"' EXIT - -# Generate a file of 20MiB in size, with our part size it will have 4 parts -# NOTE: 1 byte is for new line, so 1023 not 1024 -$CLICKHOUSE_LOCAL -q "SELECT randomPrintableASCII(1023) FROM numbers(20*1024) FORMAT LineAsString" > "$in" - -$CLICKHOUSE_CLIENT --send_logs_level=trace --server_logs_file="$log" -q "INSERT INTO FUNCTION s3(s3_conn, filename='$CLICKHOUSE_TEST_UNIQUE_NAME', format='LineAsString', structure='line String') FORMAT LineAsString" --s3_strict_upload_part_size=6000001 < "$in" -grep -F '' "$log" || : -grep -o 'WriteBufferFromS3: writePart.*, part size: .*' "$log" | grep -o 'part size: .*' -$CLICKHOUSE_CLIENT -q "SELECT * FROM s3(s3_conn, filename='$CLICKHOUSE_TEST_UNIQUE_NAME', format='LineAsString', structure='line String') FORMAT LineAsString" > "$out" - -diff -q "$in" "$out" From dd3d2c9aeaa5798467521eaf2fc85f2332a07a6a Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 27 Jun 2023 08:01:15 +0200 Subject: [PATCH 052/179] Fix syntax error --- tests/integration/test_attach_table_normalizer/test.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/integration/test_attach_table_normalizer/test.py b/tests/integration/test_attach_table_normalizer/test.py index ba0068e9c59c..49acefdcd177 100644 --- a/tests/integration/test_attach_table_normalizer/test.py +++ b/tests/integration/test_attach_table_normalizer/test.py @@ -4,7 +4,7 @@ cluster = ClickHouseCluster(__file__) node = cluster.add_instance( - 'node', main_configs=["configs/config.xml"], with_zookeeper=True, stay_alive=True + "node", main_configs=["configs/config.xml"], with_zookeeper=True, stay_alive=True ) @@ -18,13 +18,13 @@ def started_cluster(): def replace_substring_to_substr(node): - node.exec_in_container(( + node.exec_in_container( [ "bash", "-c", "sed -i 's/substring/substr/g' /var/lib/clickhouse/metadata/default/file.sql", ], - user="root" + user="root", ) From 40f721ae4f290c76d492260d740c1eb37df20e4c Mon Sep 17 00:00:00 2001 From: serxa Date: Tue, 27 Jun 2023 17:14:33 +0000 Subject: [PATCH 053/179] fix possible race on shutdown wait --- programs/server/Server.cpp | 4 ++-- src/Server/waitServersToFinish.cpp | 11 +++++++---- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index d2d8a0d07fb7..41df7a119d1e 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -1523,7 +1523,7 @@ try LOG_INFO(log, "Closed all listening sockets."); if (current_connections > 0) - current_connections = waitServersToFinish(servers_to_start_before_tables, config().getInt("shutdown_wait_unfinished", 5)); + current_connections = waitServersToFinish(servers_to_start_before_tables, servers_lock, config().getInt("shutdown_wait_unfinished", 5)); if (current_connections) LOG_INFO(log, "Closed connections to servers for tables. But {} remain. Probably some tables of other users cannot finish their connections after context shutdown.", current_connections); @@ -1827,7 +1827,7 @@ try global_context->getProcessList().killAllQueries(); if (current_connections) - current_connections = waitServersToFinish(servers, config().getInt("shutdown_wait_unfinished", 5)); + current_connections = waitServersToFinish(servers, servers_lock, config().getInt("shutdown_wait_unfinished", 5)); if (current_connections) LOG_WARNING(log, "Closed connections. But {} remain." diff --git a/src/Server/waitServersToFinish.cpp b/src/Server/waitServersToFinish.cpp index f2e36fae86cb..3b07c0820672 100644 --- a/src/Server/waitServersToFinish.cpp +++ b/src/Server/waitServersToFinish.cpp @@ -5,7 +5,7 @@ namespace DB { -size_t waitServersToFinish(std::vector & servers, size_t seconds_to_wait) +size_t waitServersToFinish(std::vector & servers, std::mutex & mutex, size_t seconds_to_wait) { const size_t sleep_max_ms = 1000 * seconds_to_wait; const size_t sleep_one_ms = 100; @@ -15,10 +15,13 @@ size_t waitServersToFinish(std::vector & servers, siz { current_connections = 0; - for (auto & server : servers) { - server.stop(); - current_connections += server.currentConnections(); + std::scoped_lock lock{mutex}; + for (auto & server : servers) + { + server.stop(); + current_connections += server.currentConnections(); + } } if (!current_connections) From 7583da9b3806850a3ed99e7b93f253c17ddb5aa8 Mon Sep 17 00:00:00 2001 From: serxa Date: Tue, 27 Jun 2023 18:48:54 +0000 Subject: [PATCH 054/179] fix --- src/Server/waitServersToFinish.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Server/waitServersToFinish.h b/src/Server/waitServersToFinish.h index 5e90790cefb5..b6daa0259646 100644 --- a/src/Server/waitServersToFinish.h +++ b/src/Server/waitServersToFinish.h @@ -5,6 +5,6 @@ namespace DB { class ProtocolServerAdapter; -size_t waitServersToFinish(std::vector & servers, size_t seconds_to_wait); +size_t waitServersToFinish(std::vector & servers, std::mutex & mutex, size_t seconds_to_wait); } From 6515d52f6018570560eeb56d93d05ca1b530a892 Mon Sep 17 00:00:00 2001 From: serxa Date: Tue, 27 Jun 2023 18:50:40 +0000 Subject: [PATCH 055/179] fix2 --- programs/keeper/Keeper.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/programs/keeper/Keeper.cpp b/programs/keeper/Keeper.cpp index a18256651888..43c3489bbda8 100644 --- a/programs/keeper/Keeper.cpp +++ b/programs/keeper/Keeper.cpp @@ -465,7 +465,7 @@ try LOG_INFO(log, "Closed all listening sockets."); if (current_connections > 0) - current_connections = waitServersToFinish(*servers, config().getInt("shutdown_wait_unfinished", 5)); + current_connections = waitServersToFinish(*servers, servers_lock, config().getInt("shutdown_wait_unfinished", 5)); if (current_connections) LOG_INFO(log, "Closed connections to Keeper. But {} remain. Probably some users cannot finish their connections after context shutdown.", current_connections); From 13854e5259ee446c7b76be2db619bd22fd6491bb Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Tue, 27 Jun 2023 17:23:51 +0200 Subject: [PATCH 056/179] impl --- src/Processors/QueryPlan/PartsSplitter.cpp | 6 +- ...nal_block_structure_mismatch_bug.reference | 9 +++ ...791_final_block_structure_mismatch_bug.sql | 66 +++++++++++++++++++ 3 files changed, 78 insertions(+), 3 deletions(-) create mode 100644 tests/queries/0_stateless/02791_final_block_structure_mismatch_bug.reference create mode 100644 tests/queries/0_stateless/02791_final_block_structure_mismatch_bug.sql diff --git a/src/Processors/QueryPlan/PartsSplitter.cpp b/src/Processors/QueryPlan/PartsSplitter.cpp index 9796e696f6c2..e1fc3facf04d 100644 --- a/src/Processors/QueryPlan/PartsSplitter.cpp +++ b/src/Processors/QueryPlan/PartsSplitter.cpp @@ -271,6 +271,9 @@ Pipes buildPipesForReadingByPKRanges( for (size_t i = 0; i < result_layers.size(); ++i) { pipes[i] = reading_step_getter(std::move(result_layers[i])); + auto pk_expression = std::make_shared(primary_key.expression->getActionsDAG().clone()); + pipes[i].addSimpleTransform([pk_expression](const Block & header) + { return std::make_shared(header, pk_expression); }); auto & filter_function = filters[i]; if (!filter_function) continue; @@ -279,9 +282,6 @@ Pipes buildPipesForReadingByPKRanges( ExpressionActionsPtr expression_actions = std::make_shared(std::move(actions)); auto description = fmt::format( "filter values in [{}, {})", i ? ::toString(borders[i - 1]) : "-inf", i < borders.size() ? ::toString(borders[i]) : "+inf"); - auto pk_expression = std::make_shared(primary_key.expression->getActionsDAG().clone()); - pipes[i].addSimpleTransform([pk_expression](const Block & header) - { return std::make_shared(header, pk_expression); }); pipes[i].addSimpleTransform( [&](const Block & header) { diff --git a/tests/queries/0_stateless/02791_final_block_structure_mismatch_bug.reference b/tests/queries/0_stateless/02791_final_block_structure_mismatch_bug.reference new file mode 100644 index 000000000000..a8401b1cae8f --- /dev/null +++ b/tests/queries/0_stateless/02791_final_block_structure_mismatch_bug.reference @@ -0,0 +1,9 @@ +1 +2 +3 +1 +2 +3 +1 +2 +3 diff --git a/tests/queries/0_stateless/02791_final_block_structure_mismatch_bug.sql b/tests/queries/0_stateless/02791_final_block_structure_mismatch_bug.sql new file mode 100644 index 000000000000..4c7ac50b8d0c --- /dev/null +++ b/tests/queries/0_stateless/02791_final_block_structure_mismatch_bug.sql @@ -0,0 +1,66 @@ +SET do_not_merge_across_partitions_select_final=1; + +CREATE TABLE test_block_mismatch +( + a UInt32, + b DateTime +) +ENGINE = ReplacingMergeTree +PARTITION BY toYYYYMM(b) +ORDER BY (toDate(b), a); + +INSERT INTO test_block_mismatch VALUES (1, toDateTime('2023-01-01 12:12:12')); +INSERT INTO test_block_mismatch VALUES (1, toDateTime('2023-01-01 12:12:12')); +SELECT count(*) FROM test_block_mismatch FINAL; + +INSERT INTO test_block_mismatch VALUES (1, toDateTime('2023-02-02 12:12:12')); +INSERT INTO test_block_mismatch VALUES (1, toDateTime('2023-02-02 12:12:12')); +SELECT count(*) FROM test_block_mismatch FINAL; + +INSERT INTO test_block_mismatch VALUES (2, toDateTime('2023-01-01 12:12:12')); +INSERT INTO test_block_mismatch VALUES (2, toDateTime('2023-01-01 12:12:12')); +SELECT count(*) FROM test_block_mismatch FINAL; + +CREATE TABLE test_block_mismatch_sk1 +( + a UInt32, + b DateTime +) +ENGINE = ReplacingMergeTree +PARTITION BY toYYYYMM(b) +PRIMARY KEY (toDate(b)) +ORDER BY (toDate(b), a); + +INSERT INTO test_block_mismatch_sk1 VALUES (1, toDateTime('2023-01-01 12:12:12')); +INSERT INTO test_block_mismatch_sk1 VALUES (1, toDateTime('2023-01-01 12:12:12')); +SELECT count(*) FROM test_block_mismatch_sk1 FINAL; + +INSERT INTO test_block_mismatch_sk1 VALUES (1, toDateTime('2023-02-02 12:12:12')); +INSERT INTO test_block_mismatch_sk1 VALUES (1, toDateTime('2023-02-02 12:12:12')); +SELECT count(*) FROM test_block_mismatch_sk1 FINAL; + +INSERT INTO test_block_mismatch_sk1 VALUES (2, toDateTime('2023-01-01 12:12:12')); +INSERT INTO test_block_mismatch_sk1 VALUES (2, toDateTime('2023-01-01 12:12:12')); +SELECT count(*) FROM test_block_mismatch_sk1 FINAL; + +CREATE TABLE test_block_mismatch_sk2 +( + a UInt32, + b DateTime +) +ENGINE = ReplacingMergeTree +PARTITION BY toYYYYMM(b) +PRIMARY KEY (a) +ORDER BY (a, toDate(b)); + +INSERT INTO test_block_mismatch_sk2 VALUES (1, toDateTime('2023-01-01 12:12:12')); +INSERT INTO test_block_mismatch_sk2 VALUES (1, toDateTime('2023-01-01 12:12:12')); +SELECT count(*) FROM test_block_mismatch_sk2 FINAL; + +INSERT INTO test_block_mismatch_sk2 VALUES (1, toDateTime('2023-02-02 12:12:12')); +INSERT INTO test_block_mismatch_sk2 VALUES (1, toDateTime('2023-02-02 12:12:12')); +SELECT count(*) FROM test_block_mismatch_sk2 FINAL; + +INSERT INTO test_block_mismatch_sk2 VALUES (2, toDateTime('2023-01-01 12:12:12')); +INSERT INTO test_block_mismatch_sk2 VALUES (2, toDateTime('2023-01-01 12:12:12')); +SELECT count(*) FROM test_block_mismatch_sk2 FINAL; From e2f20ea0e2b012796e05f1e734152609b34167e7 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Wed, 28 Jun 2023 00:30:51 +0200 Subject: [PATCH 057/179] fix --- src/Processors/QueryPlan/PartsSplitter.cpp | 6 ++--- src/Processors/QueryPlan/PartsSplitter.h | 1 + .../QueryPlan/ReadFromMergeTree.cpp | 22 +++++++++--------- ...nal_block_structure_mismatch_bug.reference | 1 + ...791_final_block_structure_mismatch_bug.sql | 23 +++++++++++++++++++ 5 files changed, 39 insertions(+), 14 deletions(-) diff --git a/src/Processors/QueryPlan/PartsSplitter.cpp b/src/Processors/QueryPlan/PartsSplitter.cpp index e1fc3facf04d..533fbde1e130 100644 --- a/src/Processors/QueryPlan/PartsSplitter.cpp +++ b/src/Processors/QueryPlan/PartsSplitter.cpp @@ -256,6 +256,7 @@ namespace ErrorCodes Pipes buildPipesForReadingByPKRanges( const KeyDescription & primary_key, + ExpressionActionsPtr sorting_expr, RangesInDataParts parts, size_t max_layers, ContextPtr context, @@ -271,9 +272,8 @@ Pipes buildPipesForReadingByPKRanges( for (size_t i = 0; i < result_layers.size(); ++i) { pipes[i] = reading_step_getter(std::move(result_layers[i])); - auto pk_expression = std::make_shared(primary_key.expression->getActionsDAG().clone()); - pipes[i].addSimpleTransform([pk_expression](const Block & header) - { return std::make_shared(header, pk_expression); }); + pipes[i].addSimpleTransform([sorting_expr](const Block & header) + { return std::make_shared(header, sorting_expr); }); auto & filter_function = filters[i]; if (!filter_function) continue; diff --git a/src/Processors/QueryPlan/PartsSplitter.h b/src/Processors/QueryPlan/PartsSplitter.h index 56bca688c2d4..4ba655a6f6d0 100644 --- a/src/Processors/QueryPlan/PartsSplitter.h +++ b/src/Processors/QueryPlan/PartsSplitter.h @@ -18,6 +18,7 @@ using ReadingInOrderStepGetter = std::function; /// Will try to produce exactly max_layer pipes but may return less if data is distributed in not a very parallelizable way. Pipes buildPipesForReadingByPKRanges( const KeyDescription & primary_key, + ExpressionActionsPtr sorting_expr, RangesInDataParts parts, size_t max_layers, ContextPtr context, diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index 3c38ecbbd3fb..fac8ebd6e1f8 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -979,6 +979,8 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsFinal( RangesInDataParts lonely_parts; size_t sum_marks_in_lonely_parts = 0; + auto sorting_expr = std::make_shared(metadata_for_reading->getSortingKey().expression->getActionsDAG().clone()); + for (size_t range_index = 0; range_index < parts_to_merge_ranges.size() - 1; ++range_index) { Pipes pipes; @@ -1022,12 +1024,20 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsFinal( info.use_uncompressed_cache); }; pipes = buildPipesForReadingByPKRanges( - metadata_for_reading->getPrimaryKey(), std::move(new_parts), num_streams, context, std::move(reading_step_getter)); + metadata_for_reading->getPrimaryKey(), + sorting_expr, + std::move(new_parts), + num_streams, + context, + std::move(reading_step_getter)); } else { pipes.emplace_back(read( std::move(new_parts), column_names, ReadFromMergeTree::ReadType::InOrder, num_streams, 0, info.use_uncompressed_cache)); + + pipes.back().addSimpleTransform([sorting_expr](const Block & header) + { return std::make_shared(header, sorting_expr); }); } /// Drop temporary columns, added by 'sorting_key_expr' @@ -1035,13 +1045,6 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsFinal( out_projection = createProjection(pipes.front().getHeader()); } - auto sorting_expr = std::make_shared( - metadata_for_reading->getSortingKey().expression->getActionsDAG().clone()); - - for (auto & pipe : pipes) - pipe.addSimpleTransform([sorting_expr](const Block & header) - { return std::make_shared(header, sorting_expr); }); - /// If do_not_merge_across_partitions_select_final is true and there is only one part in partition /// with level > 0 then we won't postprocess this part if (settings.do_not_merge_across_partitions_select_final && @@ -1098,9 +1101,6 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsFinal( if (!out_projection) out_projection = createProjection(pipe.getHeader()); - auto sorting_expr = std::make_shared( - metadata_for_reading->getSortingKey().expression->getActionsDAG().clone()); - pipe.addSimpleTransform([sorting_expr](const Block & header) { return std::make_shared(header, sorting_expr); diff --git a/tests/queries/0_stateless/02791_final_block_structure_mismatch_bug.reference b/tests/queries/0_stateless/02791_final_block_structure_mismatch_bug.reference index a8401b1cae8f..ca810c46a2db 100644 --- a/tests/queries/0_stateless/02791_final_block_structure_mismatch_bug.reference +++ b/tests/queries/0_stateless/02791_final_block_structure_mismatch_bug.reference @@ -7,3 +7,4 @@ 1 2 3 +2 diff --git a/tests/queries/0_stateless/02791_final_block_structure_mismatch_bug.sql b/tests/queries/0_stateless/02791_final_block_structure_mismatch_bug.sql index 4c7ac50b8d0c..a82e43d81f43 100644 --- a/tests/queries/0_stateless/02791_final_block_structure_mismatch_bug.sql +++ b/tests/queries/0_stateless/02791_final_block_structure_mismatch_bug.sql @@ -64,3 +64,26 @@ SELECT count(*) FROM test_block_mismatch_sk2 FINAL; INSERT INTO test_block_mismatch_sk2 VALUES (2, toDateTime('2023-01-01 12:12:12')); INSERT INTO test_block_mismatch_sk2 VALUES (2, toDateTime('2023-01-01 12:12:12')); SELECT count(*) FROM test_block_mismatch_sk2 FINAL; + +CREATE TABLE test_block_mismatch_magic_row_dist +( + a UInt32, + b DateTime +) +ENGINE = ReplacingMergeTree +PARTITION BY toYYYYMM(b) +ORDER BY (toDate(b), a); + +INSERT INTO test_block_mismatch_magic_row_dist VALUES (1, toDateTime('2023-02-02 12:12:12')); +INSERT INTO test_block_mismatch_magic_row_dist VALUES (1, toDateTime('2023-02-02 12:12:12')); +INSERT INTO test_block_mismatch_magic_row_dist VALUES (1, toDateTime('2023-02-02 12:12:12')); +INSERT INTO test_block_mismatch_magic_row_dist VALUES (1, toDateTime('2023-02-02 12:12:12')); + +optimize table test_block_mismatch_magic_row_dist final; + +system stop merges test_block_mismatch_magic_row_dist; + +INSERT INTO test_block_mismatch_magic_row_dist VALUES (1, toDateTime('2023-01-01 12:12:12')); +INSERT INTO test_block_mismatch_magic_row_dist VALUES (1, toDateTime('2023-01-01 12:12:12')); + +SELECT count(*) FROM test_block_mismatch_magic_row_dist FINAL; From 521137c55d18f956c86cf71b1ca7bca2601f7d70 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 28 Jun 2023 06:28:21 +0300 Subject: [PATCH 058/179] Update test.py --- tests/integration/test_attach_table_normalizer/test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_attach_table_normalizer/test.py b/tests/integration/test_attach_table_normalizer/test.py index 49acefdcd177..10b400494ab8 100644 --- a/tests/integration/test_attach_table_normalizer/test.py +++ b/tests/integration/test_attach_table_normalizer/test.py @@ -4,7 +4,7 @@ cluster = ClickHouseCluster(__file__) node = cluster.add_instance( - "node", main_configs=["configs/config.xml"], with_zookeeper=True, stay_alive=True + "node", stay_alive=True ) From 5df6f3d6e28483a029f3a8859c8bd09fdab008a0 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Wed, 28 Jun 2023 03:40:09 +0000 Subject: [PATCH 059/179] Automatic style fix --- tests/integration/test_attach_table_normalizer/test.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tests/integration/test_attach_table_normalizer/test.py b/tests/integration/test_attach_table_normalizer/test.py index 10b400494ab8..79093bf40142 100644 --- a/tests/integration/test_attach_table_normalizer/test.py +++ b/tests/integration/test_attach_table_normalizer/test.py @@ -3,9 +3,7 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node = cluster.add_instance( - "node", stay_alive=True -) +node = cluster.add_instance("node", stay_alive=True) @pytest.fixture(scope="module") From 7e6d606b1c6b5277b1420a509cf841d1c1120ffc Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 28 Jun 2023 10:41:39 +0200 Subject: [PATCH 060/179] Remove bad code and fix a bug --- src/Common/QueryProfiler.cpp | 9 +++ src/Common/StringSearcher.h | 89 +---------------------- src/Common/Volnitsky.h | 3 - src/Functions/HasTokenImpl.h | 45 ++++++++---- src/Functions/hasToken.cpp | 5 +- src/Functions/hasTokenCaseInsensitive.cpp | 5 +- 6 files changed, 48 insertions(+), 108 deletions(-) diff --git a/src/Common/QueryProfiler.cpp b/src/Common/QueryProfiler.cpp index 085c8fb8af49..313d4b77739d 100644 --- a/src/Common/QueryProfiler.cpp +++ b/src/Common/QueryProfiler.cpp @@ -120,6 +120,15 @@ void Timer::createIfNecessary(UInt64 thread_id, int clock_type, int pause_signal throw Exception(ErrorCodes::CANNOT_CREATE_TIMER, "Failed to create thread timer. The function " "'timer_create' returned non-zero but didn't set errno. This is bug in your OS."); + /// For example, it cannot be created if the server is run under QEMU: + /// "Failed to create thread timer, errno: 11, strerror: Resource temporarily unavailable." + + /// You could accidentally run the server under QEMU without being aware, + /// if you use Docker image for a different architecture, + /// and you have the "binfmt-misc" kernel module, and "qemu-user" tools. + + /// Also, it cannot be created if the server has too many threads. + throwFromErrno("Failed to create thread timer", ErrorCodes::CANNOT_CREATE_TIMER); } timer_id.emplace(local_timer_id); diff --git a/src/Common/StringSearcher.h b/src/Common/StringSearcher.h index 3ed192d05f33..b3065354f654 100644 --- a/src/Common/StringSearcher.h +++ b/src/Common/StringSearcher.h @@ -793,88 +793,6 @@ class StringSearcher : public StringSearcherBase } }; - -// Searches for needle surrounded by token-separators. -// Separators are anything inside ASCII (0-128) and not alphanum. -// Any value outside of basic ASCII (>=128) is considered a non-separator symbol, hence UTF-8 strings -// should work just fine. But any Unicode whitespace is not considered a token separtor. -template -class TokenSearcher : public StringSearcherBase -{ - StringSearcher searcher; - size_t needle_size; - -public: - - template - requires (sizeof(CharT) == 1) - static bool isValidNeedle(const CharT * needle_, size_t needle_size_) - { - return std::none_of(needle_, needle_ + needle_size_, isTokenSeparator); - } - - template - requires (sizeof(CharT) == 1) - TokenSearcher(const CharT * needle_, size_t needle_size_) - : searcher(needle_, needle_size_) - , needle_size(needle_size_) - { - /// The caller is responsible for calling isValidNeedle() - chassert(isValidNeedle(needle_, needle_size_)); - } - - template - requires (sizeof(CharT) == 1) - ALWAYS_INLINE bool compare(const CharT * haystack, const CharT * haystack_end, const CharT * pos) const - { - // use searcher only if pos is in the beginning of token and pos + searcher.needle_size is end of token. - if (isToken(haystack, haystack_end, pos)) - return searcher.compare(haystack, haystack_end, pos); - - return false; - } - - template - requires (sizeof(CharT) == 1) - const CharT * search(const CharT * haystack, const CharT * const haystack_end) const - { - // use searcher.search(), then verify that returned value is a token - // if it is not, skip it and re-run - - const auto * pos = haystack; - while (pos < haystack_end) - { - pos = searcher.search(pos, haystack_end); - if (pos == haystack_end || isToken(haystack, haystack_end, pos)) - return pos; - - // assuming that heendle does not contain any token separators. - pos += needle_size; - } - return haystack_end; - } - - template - requires (sizeof(CharT) == 1) - const CharT * search(const CharT * haystack, size_t haystack_size) const - { - return search(haystack, haystack + haystack_size); - } - - template - requires (sizeof(CharT) == 1) - ALWAYS_INLINE bool isToken(const CharT * haystack, const CharT * const haystack_end, const CharT* p) const - { - return (p == haystack || isTokenSeparator(*(p - 1))) - && (p + needle_size >= haystack_end || isTokenSeparator(*(p + needle_size))); - } - - ALWAYS_INLINE static bool isTokenSeparator(const uint8_t c) - { - return !(isAlphaNumericASCII(c) || !isASCII(c)); - } -}; - } using ASCIICaseSensitiveStringSearcher = impl::StringSearcher; @@ -882,9 +800,6 @@ using ASCIICaseInsensitiveStringSearcher = impl::StringSearcher; using UTF8CaseSensitiveStringSearcher = impl::StringSearcher; using UTF8CaseInsensitiveStringSearcher = impl::StringSearcher; -using ASCIICaseSensitiveTokenSearcher = impl::TokenSearcher; -using ASCIICaseInsensitiveTokenSearcher = impl::TokenSearcher; - /// Use only with short haystacks where cheap initialization is required. template struct StdLibASCIIStringSearcher @@ -906,11 +821,11 @@ struct StdLibASCIIStringSearcher if constexpr (CaseInsensitive) return std::search( haystack_start, haystack_end, needle_start, needle_end, - [](char c1, char c2) {return std::toupper(c1) == std::toupper(c2);}); + [](char c1, char c2) { return std::toupper(c1) == std::toupper(c2); }); else return std::search( haystack_start, haystack_end, needle_start, needle_end, - [](char c1, char c2) {return c1 == c2;}); + [](char c1, char c2) { return c1 == c2; }); } template diff --git a/src/Common/Volnitsky.h b/src/Common/Volnitsky.h index 8f9aa23a38aa..3360c197984f 100644 --- a/src/Common/Volnitsky.h +++ b/src/Common/Volnitsky.h @@ -730,9 +730,6 @@ using VolnitskyUTF8 = VolnitskyBase; /// ignores non-ASCII bytes using VolnitskyCaseInsensitiveUTF8 = VolnitskyBase; -using VolnitskyCaseSensitiveToken = VolnitskyBase; -using VolnitskyCaseInsensitiveToken = VolnitskyBase; - using MultiVolnitsky = MultiVolnitskyBase; using MultiVolnitskyUTF8 = MultiVolnitskyBase; using MultiVolnitskyCaseInsensitive = MultiVolnitskyBase; diff --git a/src/Functions/HasTokenImpl.h b/src/Functions/HasTokenImpl.h index 8cacdfff99d9..fdec5fcb0b7b 100644 --- a/src/Functions/HasTokenImpl.h +++ b/src/Functions/HasTokenImpl.h @@ -17,7 +17,7 @@ namespace ErrorCodes /** Token search the string, means that needle must be surrounded by some separator chars, like whitespace or puctuation. */ -template +template struct HasTokenImpl { using ResultType = UInt8; @@ -46,7 +46,7 @@ struct HasTokenImpl const UInt8 * const end = haystack_data.data() + haystack_data.size(); const UInt8 * pos = begin; - if (!ASCIICaseSensitiveTokenSearcher::isValidNeedle(pattern.data(), pattern.size())) + if (!std::none_of(pattern.begin(), pattern.end(), isTokenSeparator)) { if (res_null) { @@ -58,7 +58,8 @@ struct HasTokenImpl throw Exception(ErrorCodes::BAD_ARGUMENTS, "Needle must not contain whitespace or separator characters"); } - TokenSearcher searcher(pattern.data(), pattern.size(), end - pos); + size_t pattern_size = pattern.size(); + Searcher searcher(pattern.data(), pattern_size, end - pos); if (res_null) std::ranges::fill(res_null->getData(), false); @@ -67,21 +68,31 @@ struct HasTokenImpl /// We will search for the next occurrence in all rows at once. while (pos < end && end != (pos = searcher.search(pos, end - pos))) { - /// Let's determine which index it refers to. - while (begin + haystack_offsets[i] <= pos) + /// The found substring is a token + if ((pos == begin || isTokenSeparator(pos[-1])) + && (pos + pattern_size == end || isTokenSeparator(pos[pattern_size]))) { - res[i] = negate; + /// Let's determine which index it refers to. + while (begin + haystack_offsets[i] <= pos) + { + res[i] = negate; + ++i; + } + + /// We check that the entry does not pass through the boundaries of strings. + if (pos + pattern.size() < begin + haystack_offsets[i]) + res[i] = !negate; + else + res[i] = negate; + + pos = begin + haystack_offsets[i]; ++i; } - - /// We check that the entry does not pass through the boundaries of strings. - if (pos + pattern.size() < begin + haystack_offsets[i]) - res[i] = !negate; else - res[i] = negate; - - pos = begin + haystack_offsets[i]; - ++i; + { + /// Not a token. Jump over it. + pos += pattern_size; + } } /// Tail, in which there can be no substring. @@ -113,6 +124,12 @@ struct HasTokenImpl { throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Function '{}' doesn't support FixedString haystack argument", name); } + +private: + static bool isTokenSeparator(UInt8 c) + { + return isASCII(c) && !isAlphaNumericASCII(c); + } }; } diff --git a/src/Functions/hasToken.cpp b/src/Functions/hasToken.cpp index b90750ea233d..fa41abf26415 100644 --- a/src/Functions/hasToken.cpp +++ b/src/Functions/hasToken.cpp @@ -6,6 +6,7 @@ namespace DB { + struct NameHasToken { static constexpr auto name = "hasToken"; @@ -17,9 +18,9 @@ struct NameHasTokenOrNull }; using FunctionHasToken - = FunctionsStringSearch>; + = FunctionsStringSearch>; using FunctionHasTokenOrNull - = FunctionsStringSearch, ExecutionErrorPolicy::Null>; + = FunctionsStringSearch, ExecutionErrorPolicy::Null>; REGISTER_FUNCTION(HasToken) { diff --git a/src/Functions/hasTokenCaseInsensitive.cpp b/src/Functions/hasTokenCaseInsensitive.cpp index d7381e336b56..32675b9384d5 100644 --- a/src/Functions/hasTokenCaseInsensitive.cpp +++ b/src/Functions/hasTokenCaseInsensitive.cpp @@ -6,6 +6,7 @@ namespace DB { + struct NameHasTokenCaseInsensitive { static constexpr auto name = "hasTokenCaseInsensitive"; @@ -17,9 +18,9 @@ struct NameHasTokenCaseInsensitiveOrNull }; using FunctionHasTokenCaseInsensitive - = FunctionsStringSearch>; + = FunctionsStringSearch>; using FunctionHasTokenCaseInsensitiveOrNull - = FunctionsStringSearch, ExecutionErrorPolicy::Null>; + = FunctionsStringSearch, ExecutionErrorPolicy::Null>; REGISTER_FUNCTION(HasTokenCaseInsensitive) { From 23d0a9e3a83f263f563c0d2b0983bff6aa9a2d90 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Wed, 28 Jun 2023 16:20:45 +0200 Subject: [PATCH 061/179] fix --- .../01861_explain_pipeline.reference | 18 +++++----- ...inal_streams_data_skipping_index.reference | 36 +++++++++---------- 2 files changed, 24 insertions(+), 30 deletions(-) diff --git a/tests/queries/0_stateless/01861_explain_pipeline.reference b/tests/queries/0_stateless/01861_explain_pipeline.reference index aec3ae06dce3..427b3eaefc08 100644 --- a/tests/queries/0_stateless/01861_explain_pipeline.reference +++ b/tests/queries/0_stateless/01861_explain_pipeline.reference @@ -17,14 +17,12 @@ ExpressionTransform × 2 (ReadFromMergeTree) ExpressionTransform × 2 ReplacingSorted - ExpressionTransform - FilterSortedStreamByRange - Description: filter values in [(5), +inf) - ExpressionTransform - MergeTreeInOrder 0 → 1 - ReplacingSorted 2 → 1 + FilterSortedStreamByRange + Description: filter values in [(5), +inf) + ExpressionTransform + MergeTreeInOrder 0 → 1 + ReplacingSorted 2 → 1 + FilterSortedStreamByRange × 2 + Description: filter values in [-inf, (5)) ExpressionTransform × 2 - FilterSortedStreamByRange × 2 - Description: filter values in [-inf, (5)) - ExpressionTransform × 2 - MergeTreeInOrder × 2 0 → 1 + MergeTreeInOrder × 2 0 → 1 diff --git a/tests/queries/0_stateless/02780_final_streams_data_skipping_index.reference b/tests/queries/0_stateless/02780_final_streams_data_skipping_index.reference index d7a540ae479c..5242c6253250 100644 --- a/tests/queries/0_stateless/02780_final_streams_data_skipping_index.reference +++ b/tests/queries/0_stateless/02780_final_streams_data_skipping_index.reference @@ -9,17 +9,15 @@ ExpressionTransform × 2 (ReadFromMergeTree) ExpressionTransform × 2 AggregatingSortedTransform 2 → 1 - ExpressionTransform × 2 - FilterSortedStreamByRange × 2 - Description: filter values in [(999424), +inf) - ExpressionTransform × 2 - MergeTreeInOrder × 2 0 → 1 - AggregatingSortedTransform + FilterSortedStreamByRange × 2 + Description: filter values in [(999424), +inf) + ExpressionTransform × 2 + MergeTreeInOrder × 2 0 → 1 + AggregatingSortedTransform + FilterSortedStreamByRange + Description: filter values in [-inf, (999424)) ExpressionTransform - FilterSortedStreamByRange - Description: filter values in [-inf, (999424)) - ExpressionTransform - MergeTreeInOrder 0 → 1 + MergeTreeInOrder 0 → 1 EXPLAIN PIPELINE SELECT * FROM data FINAL WHERE v1 >= now() - INTERVAL 180 DAY SETTINGS max_threads=2, max_final_threads=2, force_data_skipping_indices='v1_index', use_skip_indexes_if_final=0 FORMAT LineAsString; @@ -30,14 +28,12 @@ ExpressionTransform × 2 (ReadFromMergeTree) ExpressionTransform × 2 AggregatingSortedTransform 2 → 1 - ExpressionTransform × 2 - FilterSortedStreamByRange × 2 - Description: filter values in [(999424), +inf) - ExpressionTransform × 2 - MergeTreeInOrder × 2 0 → 1 - AggregatingSortedTransform + FilterSortedStreamByRange × 2 + Description: filter values in [(999424), +inf) + ExpressionTransform × 2 + MergeTreeInOrder × 2 0 → 1 + AggregatingSortedTransform + FilterSortedStreamByRange + Description: filter values in [-inf, (999424)) ExpressionTransform - FilterSortedStreamByRange - Description: filter values in [-inf, (999424)) - ExpressionTransform - MergeTreeInOrder 0 → 1 + MergeTreeInOrder 0 → 1 From c9fad7b1410740d7ada64b65dfda5fefbe4a45ff Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Wed, 28 Jun 2023 18:40:48 +0200 Subject: [PATCH 062/179] Don't run 02782_uniq_exact_parallel_merging_bug in parallel with other tests --- .../0_stateless/02782_uniq_exact_parallel_merging_bug.sh | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/02782_uniq_exact_parallel_merging_bug.sh b/tests/queries/0_stateless/02782_uniq_exact_parallel_merging_bug.sh index d84ffd21b87a..a7f71eacf0f9 100755 --- a/tests/queries/0_stateless/02782_uniq_exact_parallel_merging_bug.sh +++ b/tests/queries/0_stateless/02782_uniq_exact_parallel_merging_bug.sh @@ -1,10 +1,8 @@ #!/usr/bin/env bash -# Tags: long, no-random-settings, no-tsan, no-asan, no-ubsan, no-msan +# Tags: long, no-random-settings, no-tsan, no-asan, no-ubsan, no-msan, no-parallel # shellcheck disable=SC2154 -unset CLICKHOUSE_LOG_COMMENT - CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh From 7f8ad3d5cbab240a5ef4d75b55f55478ceed22e0 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Wed, 28 Jun 2023 17:48:54 +0200 Subject: [PATCH 063/179] Convert assert to LOGICAL_ERROR in createBlockSelector() for zero weight Signed-off-by: Azat Khuzhin --- src/Interpreters/createBlockSelector.cpp | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/Interpreters/createBlockSelector.cpp b/src/Interpreters/createBlockSelector.cpp index 659fc483373b..a8eb39e6c9d6 100644 --- a/src/Interpreters/createBlockSelector.cpp +++ b/src/Interpreters/createBlockSelector.cpp @@ -2,6 +2,7 @@ #include #include #include +#include #include @@ -12,13 +13,19 @@ namespace DB { +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + template IColumn::Selector createBlockSelector( const IColumn & column, const std::vector & slots) { const auto total_weight = slots.size(); - assert(total_weight != 0); + if (total_weight == 0) + throw Exception(ErrorCodes::LOGICAL_ERROR, "weight is zero"); size_t num_rows = column.size(); IColumn::Selector selector(num_rows); From c9adfe1efd9aa0210185eecfbc9d446f4060077f Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Wed, 28 Jun 2023 17:53:14 +0200 Subject: [PATCH 064/179] Prohibit cluster with zero weight across all shards Before it leads to SIGSEGV, due to either divizion by zero or an a check in libdivide. Signed-off-by: Azat Khuzhin --- src/Interpreters/Cluster.cpp | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/Interpreters/Cluster.cpp b/src/Interpreters/Cluster.cpp index edbef77ef024..89bfb70f7c52 100644 --- a/src/Interpreters/Cluster.cpp +++ b/src/Interpreters/Cluster.cpp @@ -30,6 +30,7 @@ namespace ErrorCodes extern const int SYNTAX_ERROR; extern const int INVALID_SHARD_ID; extern const int NO_SUCH_REPLICA; + extern const int BAD_ARGUMENTS; } namespace @@ -614,6 +615,12 @@ Poco::Timespan Cluster::saturate(Poco::Timespan v, Poco::Timespan limit) void Cluster::initMisc() { + /// NOTE: It is possible to have cluster w/o shards for + /// optimize_skip_unused_shards (i.e. WHERE 0 expression), so check the + /// slots only if shards is not empty. + if (!shards_info.empty() && slot_to_shard.empty()) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Cluster with zero weight on all shards is prohibited"); + for (const auto & shard_info : shards_info) { if (!shard_info.isLocal() && !shard_info.hasRemoteConnections()) From 2a12fb42461f0916455a9efd8fd9b5ada4edca69 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Wed, 28 Jun 2023 17:57:53 +0200 Subject: [PATCH 065/179] Initialize weight/slot_to_shards for cluster not from xml correcty This is: - clusterAllReplicas - copier - some distributed cases Signed-off-by: Azat Khuzhin --- src/Interpreters/Cluster.cpp | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/src/Interpreters/Cluster.cpp b/src/Interpreters/Cluster.cpp index 89bfb70f7c52..891586d88b66 100644 --- a/src/Interpreters/Cluster.cpp +++ b/src/Interpreters/Cluster.cpp @@ -525,7 +525,7 @@ Cluster::Cluster( addresses_with_failover.emplace_back(current); - addShard(settings, std::move(current), params.treat_local_as_remote, current_shard_num); + addShard(settings, std::move(current), params.treat_local_as_remote, current_shard_num, /* insert_paths= */ {}, /* weight= */ 1); ++current_shard_num; } @@ -553,7 +553,7 @@ Cluster::Cluster( addresses_with_failover.emplace_back(current); - addShard(settings, std::move(current), params.treat_local_as_remote, current_shard_num); + addShard(settings, std::move(current), params.treat_local_as_remote, current_shard_num, /* insert_paths= */ {}, /* weight= */ 1); ++current_shard_num; } @@ -715,6 +715,7 @@ Cluster::Cluster(Cluster::ReplicasAsShardsTag, const Cluster & from, const Setti ShardInfo info; info.shard_num = ++shard_num; + info.weight = 1; if (address.is_local) info.local_addresses.push_back(address); @@ -740,6 +741,8 @@ Cluster::Cluster(Cluster::ReplicasAsShardsTag, const Cluster & from, const Setti info.per_replica_pools = {std::move(pool)}; addresses_with_failover.emplace_back(Addresses{address}); + + slot_to_shard.insert(std::end(slot_to_shard), info.weight, shards_info.size()); shards_info.emplace_back(std::move(info)); } }; @@ -769,7 +772,11 @@ Cluster::Cluster(Cluster::SubclusterTag, const Cluster & from, const std::vector { for (size_t index : indices) { - shards_info.emplace_back(from.shards_info.at(index)); + const auto & from_shard = from.shards_info.at(index); + + if (from_shard.weight) + slot_to_shard.insert(std::end(slot_to_shard), from_shard.weight, shards_info.size()); + shards_info.emplace_back(from_shard); if (!from.addresses_with_failover.empty()) addresses_with_failover.emplace_back(from.addresses_with_failover.at(index)); From 006d05c6a7aacc6f1c321822725389778b8c299c Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Wed, 28 Jun 2023 18:03:04 +0200 Subject: [PATCH 066/179] Add test for INSERT INTO clusterAllReplicas() (leads to SIGSEGV before) Signed-off-by: Azat Khuzhin --- .../0_stateless/02804_clusterAllReplicas_insert.reference | 1 + .../queries/0_stateless/02804_clusterAllReplicas_insert.sql | 5 +++++ 2 files changed, 6 insertions(+) create mode 100644 tests/queries/0_stateless/02804_clusterAllReplicas_insert.reference create mode 100644 tests/queries/0_stateless/02804_clusterAllReplicas_insert.sql diff --git a/tests/queries/0_stateless/02804_clusterAllReplicas_insert.reference b/tests/queries/0_stateless/02804_clusterAllReplicas_insert.reference new file mode 100644 index 000000000000..0cfbf08886fc --- /dev/null +++ b/tests/queries/0_stateless/02804_clusterAllReplicas_insert.reference @@ -0,0 +1 @@ +2 diff --git a/tests/queries/0_stateless/02804_clusterAllReplicas_insert.sql b/tests/queries/0_stateless/02804_clusterAllReplicas_insert.sql new file mode 100644 index 000000000000..05bda19eb9e9 --- /dev/null +++ b/tests/queries/0_stateless/02804_clusterAllReplicas_insert.sql @@ -0,0 +1,5 @@ +drop table if exists data; +create table data (key Int) engine=Memory(); +-- NOTE: internal_replication is false, so INSERT will be done only into one shard +insert into function clusterAllReplicas(test_cluster_two_shards, currentDatabase(), data, rand()) values (2); +select * from data order by key; From fdd8a0a3966028a5c72e7ce5e07410f68ce50da5 Mon Sep 17 00:00:00 2001 From: Nikolay Degterinsky <43110995+evillique@users.noreply.github.com> Date: Thu, 29 Jun 2023 02:35:07 +0200 Subject: [PATCH 067/179] Fix flaky test 00416_pocopatch_progress_in_http_headers --- ...0416_pocopatch_progress_in_http_headers.sh | 31 +++++++++++++++++-- 1 file changed, 28 insertions(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/00416_pocopatch_progress_in_http_headers.sh b/tests/queries/0_stateless/00416_pocopatch_progress_in_http_headers.sh index b2189ab0cc2a..7e954db2c862 100755 --- a/tests/queries/0_stateless/00416_pocopatch_progress_in_http_headers.sh +++ b/tests/queries/0_stateless/00416_pocopatch_progress_in_http_headers.sh @@ -4,9 +4,28 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CURL} -vsS "${CLICKHOUSE_URL}&max_block_size=5&send_progress_in_http_headers=1&http_headers_progress_interval_ms=0" -d 'SELECT max(number) FROM numbers(10)' 2>&1 | grep -E 'Content-Encoding|X-ClickHouse-Progress|^[0-9]' +RETRIES=5 + +result="" +lines_expected=4 +counter=0 +while [ $counter -lt $RETRIES ] && [ $(echo "$result" | wc -l) != "$lines_expected" ]; do + result=$(${CLICKHOUSE_CURL} -vsS "${CLICKHOUSE_URL}&max_block_size=5&send_progress_in_http_headers=1&http_headers_progress_interval_ms=0" -d 'SELECT max(number) FROM numbers(10)' 2>&1 | grep -E 'Content-Encoding|X-ClickHouse-Progress|^[0-9]') + let counter=counter+1 + # echo "$result" | wc -l +done +echo "$result" + +result="" +lines_expected=12 +counter=0 +while [ $counter -lt $RETRIES ] && [ $(echo "$result" | wc -l) != "$lines_expected" ]; do + result=$(${CLICKHOUSE_CURL} -vsS "${CLICKHOUSE_URL}&max_block_size=1&send_progress_in_http_headers=1&http_headers_progress_interval_ms=0&output_format_parallel_formatting=0" -d 'SELECT number FROM numbers(10)' 2>&1 | grep -E 'Content-Encoding|X-ClickHouse-Progress|^[0-9]') + let counter=counter+1 + # echo "$result" | wc -l +done +echo "$result" -${CLICKHOUSE_CURL} -vsS "${CLICKHOUSE_URL}&max_block_size=1&send_progress_in_http_headers=1&http_headers_progress_interval_ms=0&output_format_parallel_formatting=0" -d 'SELECT number FROM numbers(10)' 2>&1 | grep -E 'Content-Encoding|X-ClickHouse-Progress|^[0-9]' ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&max_block_size=1&send_progress_in_http_headers=1&http_headers_progress_interval_ms=0&enable_http_compression=1" -H 'Accept-Encoding: gzip' -d 'SELECT number FROM system.numbers LIMIT 10' | gzip -d # 'send_progress_in_http_headers' is false by default @@ -26,7 +45,13 @@ ${CLICKHOUSE_CURL} -vsS "${CLICKHOUSE_URL}" -H 'Accept-Encoding: gzip' -d 'DROP ${CLICKHOUSE_CURL} -vsS "${CLICKHOUSE_URL}" -H 'Accept-Encoding: gzip' -d 'CREATE TABLE insert_number_query (record UInt32) Engine = Memory' > /dev/null 2>&1 ${CLICKHOUSE_CURL} -vsS "${CLICKHOUSE_URL}" -H 'Accept-Encoding: gzip' -d 'CREATE TABLE insert_number_query_2 (record UInt32) Engine = Memory' > /dev/null 2>&1 -${CLICKHOUSE_CURL} -vsS "${CLICKHOUSE_URL}&max_block_size=1&http_headers_progress_interval_ms=0&send_progress_in_http_headers=1" -d 'INSERT INTO insert_number_query (record) SELECT number FROM system.numbers LIMIT 10' 2>&1 | grep -E 'Content-Encoding|X-ClickHouse-Summary|^[0-9]' +result="" +counter=0 +while [ $counter -lt $RETRIES ] && [ -z "$result" ]; do + result=$(${CLICKHOUSE_CURL} -vsS "${CLICKHOUSE_URL}&max_block_size=1&http_headers_progress_interval_ms=0&send_progress_in_http_headers=1" -d 'INSERT INTO insert_number_query (record) SELECT number FROM system.numbers LIMIT 10' 2>&1 | grep -E 'Content-Encoding|X-ClickHouse-Summary|^[0-9]') + let counter=counter+1 +done +echo "$result" ${CLICKHOUSE_CURL} -vsS "${CLICKHOUSE_URL}" -H 'Accept-Encoding: gzip' -d 'DROP TABLE insert_number_query' > /dev/null 2>&1 ${CLICKHOUSE_CURL} -vsS "${CLICKHOUSE_URL}" -H 'Accept-Encoding: gzip' -d 'DROP TABLE insert_number_query_2' > /dev/null 2>&1 From 58581ce5f6bdfe0df9135a95c0df14404af91e2a Mon Sep 17 00:00:00 2001 From: Nikolay Degterinsky <43110995+evillique@users.noreply.github.com> Date: Thu, 29 Jun 2023 02:37:09 +0200 Subject: [PATCH 068/179] Update 00416_pocopatch_progress_in_http_headers.sh --- .../0_stateless/00416_pocopatch_progress_in_http_headers.sh | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/queries/0_stateless/00416_pocopatch_progress_in_http_headers.sh b/tests/queries/0_stateless/00416_pocopatch_progress_in_http_headers.sh index 7e954db2c862..ad7e89a73574 100755 --- a/tests/queries/0_stateless/00416_pocopatch_progress_in_http_headers.sh +++ b/tests/queries/0_stateless/00416_pocopatch_progress_in_http_headers.sh @@ -12,7 +12,6 @@ counter=0 while [ $counter -lt $RETRIES ] && [ $(echo "$result" | wc -l) != "$lines_expected" ]; do result=$(${CLICKHOUSE_CURL} -vsS "${CLICKHOUSE_URL}&max_block_size=5&send_progress_in_http_headers=1&http_headers_progress_interval_ms=0" -d 'SELECT max(number) FROM numbers(10)' 2>&1 | grep -E 'Content-Encoding|X-ClickHouse-Progress|^[0-9]') let counter=counter+1 - # echo "$result" | wc -l done echo "$result" @@ -22,7 +21,6 @@ counter=0 while [ $counter -lt $RETRIES ] && [ $(echo "$result" | wc -l) != "$lines_expected" ]; do result=$(${CLICKHOUSE_CURL} -vsS "${CLICKHOUSE_URL}&max_block_size=1&send_progress_in_http_headers=1&http_headers_progress_interval_ms=0&output_format_parallel_formatting=0" -d 'SELECT number FROM numbers(10)' 2>&1 | grep -E 'Content-Encoding|X-ClickHouse-Progress|^[0-9]') let counter=counter+1 - # echo "$result" | wc -l done echo "$result" From 919bf5429478261cd9d0329129191323e77263f2 Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Thu, 29 Jun 2023 12:38:46 +0200 Subject: [PATCH 069/179] fix race condition --- src/IO/WriteBufferFromS3.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/IO/WriteBufferFromS3.cpp b/src/IO/WriteBufferFromS3.cpp index 8714282f7a8f..a72fac138b3c 100644 --- a/src/IO/WriteBufferFromS3.cpp +++ b/src/IO/WriteBufferFromS3.cpp @@ -223,8 +223,8 @@ String WriteBufferFromS3::getShortLogDetails() const multipart_upload_details = fmt::format(", upload id {}" , multipart_upload_id); - return fmt::format("Details: bucket {}, key {}, total size {}{}", - bucket, key, total_size, multipart_upload_details); + return fmt::format("Details: bucket {}, key {}{}", + bucket, key, multipart_upload_details); } void WriteBufferFromS3::tryToAbortMultipartUpload() From 49ab480d40f268df1e597dfe14426eb5416a5fd6 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 29 Jun 2023 23:09:58 +0300 Subject: [PATCH 070/179] Update 00416_pocopatch_progress_in_http_headers.sh --- .../0_stateless/00416_pocopatch_progress_in_http_headers.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/00416_pocopatch_progress_in_http_headers.sh b/tests/queries/0_stateless/00416_pocopatch_progress_in_http_headers.sh index ad7e89a73574..2b0cae3c1d4c 100755 --- a/tests/queries/0_stateless/00416_pocopatch_progress_in_http_headers.sh +++ b/tests/queries/0_stateless/00416_pocopatch_progress_in_http_headers.sh @@ -9,7 +9,7 @@ RETRIES=5 result="" lines_expected=4 counter=0 -while [ $counter -lt $RETRIES ] && [ $(echo "$result" | wc -l) != "$lines_expected" ]; do +while [ $counter -lt $RETRIES ] && [ "$(echo "$result" | wc -l)" != "$lines_expected" ]; do result=$(${CLICKHOUSE_CURL} -vsS "${CLICKHOUSE_URL}&max_block_size=5&send_progress_in_http_headers=1&http_headers_progress_interval_ms=0" -d 'SELECT max(number) FROM numbers(10)' 2>&1 | grep -E 'Content-Encoding|X-ClickHouse-Progress|^[0-9]') let counter=counter+1 done @@ -18,7 +18,7 @@ echo "$result" result="" lines_expected=12 counter=0 -while [ $counter -lt $RETRIES ] && [ $(echo "$result" | wc -l) != "$lines_expected" ]; do +while [ $counter -lt $RETRIES ] && [ "$(echo "$result" | wc -l)" != "$lines_expected" ]; do result=$(${CLICKHOUSE_CURL} -vsS "${CLICKHOUSE_URL}&max_block_size=1&send_progress_in_http_headers=1&http_headers_progress_interval_ms=0&output_format_parallel_formatting=0" -d 'SELECT number FROM numbers(10)' 2>&1 | grep -E 'Content-Encoding|X-ClickHouse-Progress|^[0-9]') let counter=counter+1 done From 3c4491b706e0cbd89086db845eb582e1227f3a74 Mon Sep 17 00:00:00 2001 From: Manas Alekar Date: Thu, 29 Jun 2023 14:31:40 -0700 Subject: [PATCH 071/179] Ignore APPEND and TRUNCATE modifiers if file does not exist. --- src/Client/ClientBase.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 34b3b1e228a0..a8bdc5d0b08e 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -575,9 +575,11 @@ try } auto flags = O_WRONLY | O_EXCL; - if (query_with_output->is_outfile_append) + + auto file_exists = fs::exists(out_file); + if (file_exists && query_with_output->is_outfile_append) flags |= O_APPEND; - else if (query_with_output->is_outfile_truncate) + else if (file_exists && query_with_output->is_outfile_truncate) flags |= O_TRUNC; else flags |= O_CREAT; From 9a35921d005be1e7b34493d34429fb9dbf306ef7 Mon Sep 17 00:00:00 2001 From: Manas Alekar Date: Fri, 30 Jun 2023 13:16:02 -0700 Subject: [PATCH 072/179] Add tests. --- tests/queries/0_stateless/00415_into_outfile.reference | 4 ++++ tests/queries/0_stateless/00415_into_outfile.sh | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/tests/queries/0_stateless/00415_into_outfile.reference b/tests/queries/0_stateless/00415_into_outfile.reference index a609e77a50a8..4576a2d9d609 100644 --- a/tests/queries/0_stateless/00415_into_outfile.reference +++ b/tests/queries/0_stateless/00415_into_outfile.reference @@ -1,5 +1,9 @@ performing test: select 1 2 3 +performing test: select_with_append +1 2 3 +performing test: select_with_truncate +1 2 3 performing test: union_all 1 2 3 4 diff --git a/tests/queries/0_stateless/00415_into_outfile.sh b/tests/queries/0_stateless/00415_into_outfile.sh index 77dc96a48e68..d360a29fa5a5 100755 --- a/tests/queries/0_stateless/00415_into_outfile.sh +++ b/tests/queries/0_stateless/00415_into_outfile.sh @@ -21,6 +21,10 @@ function perform() perform "select" "SELECT 1, 2, 3 INTO OUTFILE '${CLICKHOUSE_TMP}/test_into_outfile_select.out'" +perform "select_with_append" "SELECT 1, 2, 3 INTO OUTFILE '${CLICKHOUSE_TMP}/test_into_outfile_select_with_append.out' APPEND" + +perform "select_with_truncate" "SELECT 1, 2, 3 INTO OUTFILE '${CLICKHOUSE_TMP}/test_into_outfile_select_with_truncate.out' TRUNCATE" + perform "union_all" "SELECT 1, 2 UNION ALL SELECT 3, 4 INTO OUTFILE '${CLICKHOUSE_TMP}/test_into_outfile_union_all.out' FORMAT TSV" | sort --numeric-sort perform "bad_union_all" "SELECT 1, 2 INTO OUTFILE '${CLICKHOUSE_TMP}/test_into_outfile_bad_union_all.out' UNION ALL SELECT 3, 4" From fd545deba071ffc9c6bde43683ecfbec533e4498 Mon Sep 17 00:00:00 2001 From: velavokr Date: Sun, 2 Jul 2023 17:51:43 +0300 Subject: [PATCH 073/179] added a warning on autocalculated parallelizm limits underutilizing CPU cores --- cmake/limit_jobs.cmake | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/cmake/limit_jobs.cmake b/cmake/limit_jobs.cmake index a8f105b89874..100ce921b19f 100644 --- a/cmake/limit_jobs.cmake +++ b/cmake/limit_jobs.cmake @@ -18,6 +18,9 @@ if (NOT PARALLEL_COMPILE_JOBS AND TOTAL_PHYSICAL_MEMORY AND MAX_COMPILER_MEMORY) if (NOT PARALLEL_COMPILE_JOBS) set (PARALLEL_COMPILE_JOBS 1) endif () + if (NOT NUMBER_OF_LOGICAL_CORES OR PARALLEL_COMPILE_JOBS LESS NUMBER_OF_LOGICAL_CORES) + set (PARALLEL_COMPILE_JOBS_LESS TRUE) + endif() endif () if (PARALLEL_COMPILE_JOBS AND (NOT NUMBER_OF_LOGICAL_CORES OR PARALLEL_COMPILE_JOBS LESS NUMBER_OF_LOGICAL_CORES)) @@ -33,6 +36,9 @@ if (NOT PARALLEL_LINK_JOBS AND TOTAL_PHYSICAL_MEMORY AND MAX_LINKER_MEMORY) if (NOT PARALLEL_LINK_JOBS) set (PARALLEL_LINK_JOBS 1) endif () + if (NOT NUMBER_OF_LOGICAL_CORES OR PARALLEL_LINK_JOBS LESS NUMBER_OF_LOGICAL_CORES) + set (PARALLEL_LINK_JOBS_LESS TRUE) + endif() endif () # ThinLTO provides its own parallel linking @@ -56,4 +62,10 @@ if (PARALLEL_COMPILE_JOBS OR PARALLEL_LINK_JOBS) message(STATUS "${CMAKE_CURRENT_SOURCE_DIR}: Have ${TOTAL_PHYSICAL_MEMORY} megabytes of memory. Limiting concurrent linkers jobs to ${PARALLEL_LINK_JOBS} and compiler jobs to ${PARALLEL_COMPILE_JOBS} (system has ${NUMBER_OF_LOGICAL_CORES} logical cores)") + if (PARALLEL_COMPILE_JOBS_LESS) + message(WARNING "The autocalculated compile jobs limit (${PARALLEL_COMPILE_JOBS}) will underutilize CPU cores (${NUMBER_OF_LOGICAL_CORES}). Set PARALLEL_COMPILE_JOBS to override.") + endif() + if (PARALLEL_LINK_JOBS_LESS) + message(WARNING "The autocalculated link jobs limit (${PARALLEL_LINK_JOBS}) will underutilize CPU cores (${NUMBER_OF_LOGICAL_CORES}). Set PARALLEL_LINK_JOBS to override.") + endif() endif () From 711d8db6443c4a87dcb3b7a28df3265079717e54 Mon Sep 17 00:00:00 2001 From: velavokr Date: Sun, 2 Jul 2023 17:59:48 +0300 Subject: [PATCH 074/179] better wording --- cmake/limit_jobs.cmake | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmake/limit_jobs.cmake b/cmake/limit_jobs.cmake index 100ce921b19f..3a33b3b99894 100644 --- a/cmake/limit_jobs.cmake +++ b/cmake/limit_jobs.cmake @@ -63,9 +63,9 @@ if (PARALLEL_COMPILE_JOBS OR PARALLEL_LINK_JOBS) "${CMAKE_CURRENT_SOURCE_DIR}: Have ${TOTAL_PHYSICAL_MEMORY} megabytes of memory. Limiting concurrent linkers jobs to ${PARALLEL_LINK_JOBS} and compiler jobs to ${PARALLEL_COMPILE_JOBS} (system has ${NUMBER_OF_LOGICAL_CORES} logical cores)") if (PARALLEL_COMPILE_JOBS_LESS) - message(WARNING "The autocalculated compile jobs limit (${PARALLEL_COMPILE_JOBS}) will underutilize CPU cores (${NUMBER_OF_LOGICAL_CORES}). Set PARALLEL_COMPILE_JOBS to override.") + message(WARNING "The autocalculated compile jobs limit (${PARALLEL_COMPILE_JOBS}) underutilizes CPU cores (${NUMBER_OF_LOGICAL_CORES}). Set PARALLEL_COMPILE_JOBS to override.") endif() if (PARALLEL_LINK_JOBS_LESS) - message(WARNING "The autocalculated link jobs limit (${PARALLEL_LINK_JOBS}) will underutilize CPU cores (${NUMBER_OF_LOGICAL_CORES}). Set PARALLEL_LINK_JOBS to override.") + message(WARNING "The autocalculated link jobs limit (${PARALLEL_LINK_JOBS}) underutilizes CPU cores (${NUMBER_OF_LOGICAL_CORES}). Set PARALLEL_LINK_JOBS to override.") endif() endif () From ca6930eb110903709fc4c2e1cbec19a95e55ee18 Mon Sep 17 00:00:00 2001 From: Han Fei Date: Tue, 4 Jul 2023 17:38:53 +0200 Subject: [PATCH 075/179] Revert "Revert "Merge pull request #50951 from ZhiguoZh/20230607-toyear-fix"" --- src/Functions/DateTimeTransforms.h | 72 +++++++ .../FunctionDateOrDateTimeToSomething.h | 13 ++ src/Functions/IFunction.h | 29 ++- src/Functions/IFunctionAdaptors.h | 7 + ...OrDateTimeConverterWithPreimageVisitor.cpp | 199 ++++++++++++++++++ ...teOrDateTimeConverterWithPreimageVisitor.h | 37 ++++ src/Interpreters/TreeOptimizer.cpp | 19 ++ ...783_date_predicate_optimizations.reference | 52 +++++ .../02783_date_predicate_optimizations.sql | 76 +++++++ ...dicate_optimizations_ast_rewrite.reference | 87 ++++++++ ...te_predicate_optimizations_ast_rewrite.sql | 47 +++++ 11 files changed, 632 insertions(+), 6 deletions(-) create mode 100644 src/Interpreters/OptimizeDateOrDateTimeConverterWithPreimageVisitor.cpp create mode 100644 src/Interpreters/OptimizeDateOrDateTimeConverterWithPreimageVisitor.h create mode 100644 tests/queries/0_stateless/02785_date_predicate_optimizations_ast_rewrite.reference create mode 100644 tests/queries/0_stateless/02785_date_predicate_optimizations_ast_rewrite.sql diff --git a/src/Functions/DateTimeTransforms.h b/src/Functions/DateTimeTransforms.h index 019e0c42cde7..84c71c89b111 100644 --- a/src/Functions/DateTimeTransforms.h +++ b/src/Functions/DateTimeTransforms.h @@ -322,6 +322,7 @@ struct ToTimeImpl { throwDateTimeIsNotSupported(name); } + static inline constexpr bool hasPreimage() { return false; } using FactorTransform = ToDateImpl; }; @@ -393,6 +394,7 @@ struct ToStartOfSecondImpl { throwDateTimeIsNotSupported(name); } + static inline constexpr bool hasPreimage() { return false; } using FactorTransform = ZeroTransform; }; @@ -440,6 +442,7 @@ struct ToStartOfMillisecondImpl { throwDateTimeIsNotSupported(name); } + static inline constexpr bool hasPreimage() { return false; } using FactorTransform = ZeroTransform; }; @@ -483,6 +486,7 @@ struct ToStartOfMicrosecondImpl { throwDateTimeIsNotSupported(name); } + static inline constexpr bool hasPreimage() { return false; } using FactorTransform = ZeroTransform; }; @@ -520,6 +524,7 @@ struct ToStartOfNanosecondImpl { throwDateTimeIsNotSupported(name); } + static inline constexpr bool hasPreimage() { return false; } using FactorTransform = ZeroTransform; }; @@ -718,6 +723,28 @@ struct ToYearImpl return time_zone.toYear(DayNum(d)); } + static inline constexpr bool hasPreimage() { return true; } + + static inline RangeOrNull getPreimage(const IDataType & type, const Field & point) + { + if (point.getType() != Field::Types::UInt64) return std::nullopt; + + auto year = point.get(); + if (year < DATE_LUT_MIN_YEAR || year >= DATE_LUT_MAX_YEAR) return std::nullopt; + + const DateLUTImpl & date_lut = DateLUT::instance(); + + auto start_time = date_lut.makeDateTime(year, 1, 1, 0, 0, 0); + auto end_time = date_lut.addYears(start_time, 1); + + if (isDateOrDate32(type) || isDateTime(type) || isDateTime64(type)) + return {std::make_pair(Field(start_time), Field(end_time))}; + else + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Illegal type {} of argument of function {}. Should be Date, Date32, DateTime or DateTime64", + type.getName(), name); + } + using FactorTransform = ZeroTransform; }; @@ -791,6 +818,7 @@ struct ToQuarterImpl { return time_zone.toQuarter(DayNum(d)); } + static inline constexpr bool hasPreimage() { return false; } using FactorTransform = ToStartOfYearImpl; }; @@ -815,6 +843,7 @@ struct ToMonthImpl { return time_zone.toMonth(DayNum(d)); } + static inline constexpr bool hasPreimage() { return false; } using FactorTransform = ToStartOfYearImpl; }; @@ -840,6 +869,7 @@ struct ToDayOfMonthImpl return time_zone.toDayOfMonth(DayNum(d)); } + static inline constexpr bool hasPreimage() { return false; } using FactorTransform = ToStartOfMonthImpl; }; @@ -887,6 +917,7 @@ struct ToDayOfYearImpl { return time_zone.toDayOfYear(DayNum(d)); } + static inline constexpr bool hasPreimage() { return false; } using FactorTransform = ToStartOfYearImpl; }; @@ -911,6 +942,7 @@ struct ToHourImpl { throwDateTimeIsNotSupported(name); } + static inline constexpr bool hasPreimage() { return false; } using FactorTransform = ToDateImpl; }; @@ -939,6 +971,7 @@ struct TimezoneOffsetImpl throwDateTimeIsNotSupported(name); } + static inline constexpr bool hasPreimage() { return false; } using FactorTransform = ToTimeImpl; }; @@ -962,6 +995,7 @@ struct ToMinuteImpl { throwDateTimeIsNotSupported(name); } + static inline constexpr bool hasPreimage() { return false; } using FactorTransform = ToStartOfHourImpl; }; @@ -986,6 +1020,7 @@ struct ToSecondImpl { throwDateTimeIsNotSupported(name); } + static inline constexpr bool hasPreimage() { return false; } using FactorTransform = ToStartOfMinuteImpl; }; @@ -1010,6 +1045,7 @@ struct ToISOYearImpl { return time_zone.toISOYear(DayNum(d)); } + static inline constexpr bool hasPreimage() { return false; } using FactorTransform = ZeroTransform; }; @@ -1066,6 +1102,7 @@ struct ToISOWeekImpl { return time_zone.toISOWeek(DayNum(d)); } + static inline constexpr bool hasPreimage() { return false; } using FactorTransform = ToISOYearImpl; }; @@ -1108,6 +1145,7 @@ struct ToRelativeYearNumImpl { return time_zone.toYear(DayNum(d)); } + static inline constexpr bool hasPreimage() { return false; } using FactorTransform = ZeroTransform; }; @@ -1139,6 +1177,7 @@ struct ToRelativeQuarterNumImpl { return time_zone.toRelativeQuarterNum(DayNum(d)); } + static inline constexpr bool hasPreimage() { return false; } using FactorTransform = ZeroTransform; }; @@ -1170,6 +1209,7 @@ struct ToRelativeMonthNumImpl { return time_zone.toRelativeMonthNum(DayNum(d)); } + static inline constexpr bool hasPreimage() { return false; } using FactorTransform = ZeroTransform; }; @@ -1201,6 +1241,7 @@ struct ToRelativeWeekNumImpl { return time_zone.toRelativeWeekNum(DayNum(d)); } + static inline constexpr bool hasPreimage() { return false; } using FactorTransform = ZeroTransform; }; @@ -1232,6 +1273,7 @@ struct ToRelativeDayNumImpl { return static_cast(d); } + static inline constexpr bool hasPreimage() { return false; } using FactorTransform = ZeroTransform; }; @@ -1269,6 +1311,7 @@ struct ToRelativeHourNumImpl else return static_cast(time_zone.toRelativeHourNum(DayNum(d))); } + static inline constexpr bool hasPreimage() { return false; } using FactorTransform = ZeroTransform; }; @@ -1300,6 +1343,7 @@ struct ToRelativeMinuteNumImpl { return static_cast(time_zone.toRelativeMinuteNum(DayNum(d))); } + static inline constexpr bool hasPreimage() { return false; } using FactorTransform = ZeroTransform; }; @@ -1328,6 +1372,7 @@ struct ToRelativeSecondNumImpl { return static_cast(time_zone.fromDayNum(DayNum(d))); } + static inline constexpr bool hasPreimage() { return false; } using FactorTransform = ZeroTransform; }; @@ -1352,6 +1397,31 @@ struct ToYYYYMMImpl { return time_zone.toNumYYYYMM(DayNum(d)); } + static inline constexpr bool hasPreimage() { return true; } + + static inline RangeOrNull getPreimage(const IDataType & type, const Field & point) + { + if (point.getType() != Field::Types::UInt64) return std::nullopt; + + auto year_month = point.get(); + auto year = year_month / 100; + auto month = year_month % 100; + + if (year < DATE_LUT_MIN_YEAR || year > DATE_LUT_MAX_YEAR || month < 1 || month > 12 || (year == DATE_LUT_MAX_YEAR && month == 12)) + return std::nullopt; + + const DateLUTImpl & date_lut = DateLUT::instance(); + + auto start_time = date_lut.makeDateTime(year, month, 1, 0, 0, 0); + auto end_time = date_lut.addMonths(start_time, 1); + + if (isDateOrDate32(type) || isDateTime(type) || isDateTime64(type)) + return {std::make_pair(Field(start_time), Field(end_time))}; + else + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Illegal type {} of argument of function {}. Should be Date, Date32, DateTime or DateTime64", + type.getName(), name); + } using FactorTransform = ZeroTransform; }; @@ -1376,6 +1446,7 @@ struct ToYYYYMMDDImpl { return time_zone.toNumYYYYMMDD(DayNum(d)); } + static inline constexpr bool hasPreimage() { return false; } using FactorTransform = ZeroTransform; }; @@ -1400,6 +1471,7 @@ struct ToYYYYMMDDhhmmssImpl { return time_zone.toNumYYYYMMDDhhmmss(time_zone.toDate(DayNum(d))); } + static inline constexpr bool hasPreimage() { return false; } using FactorTransform = ZeroTransform; }; diff --git a/src/Functions/FunctionDateOrDateTimeToSomething.h b/src/Functions/FunctionDateOrDateTimeToSomething.h index 82818cc3d2b9..d98b788c7d74 100644 --- a/src/Functions/FunctionDateOrDateTimeToSomething.h +++ b/src/Functions/FunctionDateOrDateTimeToSomething.h @@ -7,6 +7,7 @@ namespace DB namespace ErrorCodes { extern const int ILLEGAL_TYPE_OF_ARGUMENT; + extern const int NOT_IMPLEMENTED; } /// See DateTimeTransforms.h @@ -83,6 +84,18 @@ class FunctionDateOrDateTimeToSomething : public IFunctionDateOrDateTimegetName(), this->getName()); } + bool hasInformationAboutPreimage() const override { return Transform::hasPreimage(); } + + RangeOrNull getPreimage(const IDataType & type, const Field & point) const override + { + if constexpr (Transform::hasPreimage()) + return Transform::getPreimage(type, point); + else + throw Exception(ErrorCodes::NOT_IMPLEMENTED, + "Function {} has no information about its preimage", + Transform::name); + } + }; } diff --git a/src/Functions/IFunction.h b/src/Functions/IFunction.h index c5b9a78015da..433cb61d04e9 100644 --- a/src/Functions/IFunction.h +++ b/src/Functions/IFunction.h @@ -2,6 +2,8 @@ #include #include +#include +#include #include #include #include @@ -11,11 +13,6 @@ #include -#if USE_EMBEDDED_COMPILER -# include -#endif - - /// This file contains user interface for functions. namespace llvm @@ -35,7 +32,8 @@ namespace ErrorCodes extern const int ILLEGAL_TYPE_OF_ARGUMENT; } -class Field; +/// A left-closed and right-open interval representing the preimage of a function. +using RangeOrNull = std::optional>; /// The simplest executable object. /// Motivation: @@ -233,6 +231,12 @@ class IFunctionBase : public IResolvedFunction */ virtual bool hasInformationAboutMonotonicity() const { return false; } + /** Lets you know if the function has its definition of preimage. + * This is used to work with predicate optimizations, where the comparison between + * f(x) and a constant c could be converted to the comparison between x and f's preimage [b, e). + */ + virtual bool hasInformationAboutPreimage() const { return false; } + struct ShortCircuitSettings { /// Should we enable lazy execution for the first argument of short-circuit function? @@ -286,6 +290,14 @@ class IFunctionBase : public IResolvedFunction throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Function {} has no information about its monotonicity", getName()); } + /** Get the preimage of a function in the form of a left-closed and right-open interval. Call only if hasInformationAboutPreimage. + * std::nullopt might be returned if the point (a single value) is invalid for this function. + */ + virtual RangeOrNull getPreimage(const IDataType & /*type*/, const Field & /*point*/) const + { + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Function {} has no information about its preimage", getName()); + } + }; using FunctionBasePtr = std::shared_ptr; @@ -475,12 +487,17 @@ class IFunction virtual bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const = 0; virtual bool hasInformationAboutMonotonicity() const { return false; } + virtual bool hasInformationAboutPreimage() const { return false; } using Monotonicity = IFunctionBase::Monotonicity; virtual Monotonicity getMonotonicityForRange(const IDataType & /*type*/, const Field & /*left*/, const Field & /*right*/) const { throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Function {} has no information about its monotonicity", getName()); } + virtual RangeOrNull getPreimage(const IDataType & /*type*/, const Field & /*point*/) const + { + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Function {} has no information about its preimage", getName()); + } /// For non-variadic functions, return number of arguments; otherwise return zero (that should be ignored). virtual size_t getNumberOfArguments() const = 0; diff --git a/src/Functions/IFunctionAdaptors.h b/src/Functions/IFunctionAdaptors.h index 23725b1a8b13..123fdbc2f50f 100644 --- a/src/Functions/IFunctionAdaptors.h +++ b/src/Functions/IFunctionAdaptors.h @@ -90,10 +90,17 @@ class FunctionToFunctionBaseAdaptor final : public IFunctionBase bool hasInformationAboutMonotonicity() const override { return function->hasInformationAboutMonotonicity(); } + bool hasInformationAboutPreimage() const override { return function->hasInformationAboutPreimage(); } + Monotonicity getMonotonicityForRange(const IDataType & type, const Field & left, const Field & right) const override { return function->getMonotonicityForRange(type, left, right); } + + RangeOrNull getPreimage(const IDataType & type, const Field & point) const override + { + return function->getPreimage(type, point); + } private: std::shared_ptr function; DataTypes arguments; diff --git a/src/Interpreters/OptimizeDateOrDateTimeConverterWithPreimageVisitor.cpp b/src/Interpreters/OptimizeDateOrDateTimeConverterWithPreimageVisitor.cpp new file mode 100644 index 000000000000..a377bb4bba68 --- /dev/null +++ b/src/Interpreters/OptimizeDateOrDateTimeConverterWithPreimageVisitor.cpp @@ -0,0 +1,199 @@ +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + +/** Given a monotonic non-decreasing function f(x), which satisfies f(x) = c for any value x within [b, e). + * We could convert it into its equivalent form, x >= b AND x < e, which is free from the invocation of the function. + * And we could apply the similar transformation to other comparisons. The suggested transformations list: + * + * f(x) == c -> x >= b AND x < e + * f(x) != c -> x < b OR x >= e + * f(x) > c -> x >= e + * f(x) >= c -> x >= b + * f(x) < c -> x < b + * f(x) <= c -> x < e + * + * This function generates a new AST with the transformed relation. + */ +ASTPtr generateOptimizedDateFilterAST(const String & comparator, const NameAndTypePair & column, const std::pair& range) +{ + const DateLUTImpl & date_lut = DateLUT::instance(); + + const String & column_name = column.name; + String start_date_or_date_time; + String end_date_or_date_time; + + if (isDateOrDate32(column.type.get())) + { + start_date_or_date_time = date_lut.dateToString(range.first.get()); + end_date_or_date_time = date_lut.dateToString(range.second.get()); + } + else if (isDateTime(column.type.get()) || isDateTime64(column.type.get())) + { + start_date_or_date_time = date_lut.timeToString(range.first.get()); + end_date_or_date_time = date_lut.timeToString(range.second.get()); + } + else [[unlikely]] return {}; + + if (comparator == "equals") + { + return makeASTFunction("and", + makeASTFunction("greaterOrEquals", + std::make_shared(column_name), + std::make_shared(start_date_or_date_time) + ), + makeASTFunction("less", + std::make_shared(column_name), + std::make_shared(end_date_or_date_time) + ) + ); + } + else if (comparator == "notEquals") + { + return makeASTFunction("or", + makeASTFunction("less", + std::make_shared(column_name), + std::make_shared(start_date_or_date_time) + ), + makeASTFunction("greaterOrEquals", + std::make_shared(column_name), + std::make_shared(end_date_or_date_time) + ) + ); + } + else if (comparator == "greater") + { + return makeASTFunction("greaterOrEquals", + std::make_shared(column_name), + std::make_shared(end_date_or_date_time) + ); + } + else if (comparator == "lessOrEquals") + { + return makeASTFunction("less", + std::make_shared(column_name), + std::make_shared(end_date_or_date_time) + ); + } + else if (comparator == "less" || comparator == "greaterOrEquals") + { + return makeASTFunction(comparator, + std::make_shared(column_name), + std::make_shared(start_date_or_date_time) + ); + } + else [[unlikely]] + { + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Expected equals, notEquals, less, lessOrEquals, greater, greaterOrEquals. Actual {}", + comparator); + } +} + +void OptimizeDateOrDateTimeConverterWithPreimageMatcher::visit(const ASTFunction & function, ASTPtr & ast, const Data & data) +{ + const static std::unordered_map swap_relations = { + {"equals", "equals"}, + {"notEquals", "notEquals"}, + {"less", "greater"}, + {"greater", "less"}, + {"lessOrEquals", "greaterOrEquals"}, + {"greaterOrEquals", "lessOrEquals"}, + }; + + if (!swap_relations.contains(function.name)) return; + + if (!function.arguments || function.arguments->children.size() != 2) return; + + size_t func_id = function.arguments->children.size(); + + for (size_t i = 0; i < function.arguments->children.size(); i++) + { + if (const auto * func = function.arguments->children[i]->as()) + { + func_id = i; + } + } + + if (func_id == function.arguments->children.size()) return; + + size_t literal_id = 1 - func_id; + const auto * literal = function.arguments->children[literal_id]->as(); + + if (!literal || literal->value.getType() != Field::Types::UInt64) return; + + String comparator = literal_id > func_id ? function.name : swap_relations.at(function.name); + + const auto * ast_func = function.arguments->children[func_id]->as(); + /// Currently we only handle single-argument functions. + if (!ast_func || !ast_func->arguments || ast_func->arguments->children.size() != 1) return; + + const auto * column_id = ast_func->arguments->children.at(0)->as(); + if (!column_id) return; + + auto pos = IdentifierSemantic::getMembership(*column_id); + if (!pos) + pos = IdentifierSemantic::chooseTableColumnMatch(*column_id, data.tables, true); + if (!pos) + return; + + if (*pos >= data.tables.size()) + return; + + auto data_type_and_name = data.tables[*pos].columns.tryGetByName(column_id->shortName()); + if (!data_type_and_name) return; + + const auto & converter = FunctionFactory::instance().tryGet(ast_func->name, data.context); + if (!converter) return; + + ColumnsWithTypeAndName args; + args.emplace_back(data_type_and_name->type, "tmp"); + auto converter_base = converter->build(args); + if (!converter_base || !converter_base->hasInformationAboutPreimage()) return; + + auto preimage_range = converter_base->getPreimage(*(data_type_and_name->type), literal->value); + if (!preimage_range) return; + + const auto new_ast = generateOptimizedDateFilterAST(comparator, *data_type_and_name, *preimage_range); + if (!new_ast) return; + + ast = new_ast; +} + +bool OptimizeDateOrDateTimeConverterWithPreimageMatcher::needChildVisit(ASTPtr & ast, ASTPtr & /*child*/) +{ + const static std::unordered_set relations = { + "equals", + "notEquals", + "less", + "greater", + "lessOrEquals", + "greaterOrEquals", + }; + + if (const auto * ast_function = ast->as()) + { + return !relations.contains(ast_function->name); + } + + return true; +} + +} diff --git a/src/Interpreters/OptimizeDateOrDateTimeConverterWithPreimageVisitor.h b/src/Interpreters/OptimizeDateOrDateTimeConverterWithPreimageVisitor.h new file mode 100644 index 000000000000..778fa462364a --- /dev/null +++ b/src/Interpreters/OptimizeDateOrDateTimeConverterWithPreimageVisitor.h @@ -0,0 +1,37 @@ +#pragma once + +#include +#include + +namespace DB +{ + +class ASTFunction; + +/** Replace predicate having Date/DateTime converters with their preimages to improve performance. + * Given a Date column c, toYear(c) = 2023 -> c >= '2023-01-01' AND c < '2024-01-01' + * Or if c is a DateTime column, toYear(c) = 2023 -> c >= '2023-01-01 00:00:00' AND c < '2024-01-01 00:00:00'. + * The similar optimization also applies to other converters. + */ +class OptimizeDateOrDateTimeConverterWithPreimageMatcher +{ +public: + struct Data + { + const TablesWithColumns & tables; + ContextPtr context; + }; + + static void visit(ASTPtr & ast, Data & data) + { + if (const auto * ast_function = ast->as()) + visit(*ast_function, ast, data); + } + + static void visit(const ASTFunction & function, ASTPtr & ast, const Data & data); + + static bool needChildVisit(ASTPtr & ast, ASTPtr & child); +}; + +using OptimizeDateOrDateTimeConverterWithPreimageVisitor = InDepthNodeVisitor; +} diff --git a/src/Interpreters/TreeOptimizer.cpp b/src/Interpreters/TreeOptimizer.cpp index c38b3c790265..fd4d2c9d8461 100644 --- a/src/Interpreters/TreeOptimizer.cpp +++ b/src/Interpreters/TreeOptimizer.cpp @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -677,6 +678,21 @@ void optimizeInjectiveFunctionsInsideUniq(ASTPtr & query, ContextPtr context) RemoveInjectiveFunctionsVisitor(data).visit(query); } +void optimizeDateFilters(ASTSelectQuery * select_query, const std::vector & tables_with_columns, ContextPtr context) +{ + /// Predicates in HAVING clause has been moved to WHERE clause. + if (select_query->where()) + { + OptimizeDateOrDateTimeConverterWithPreimageVisitor::Data data{tables_with_columns, context}; + OptimizeDateOrDateTimeConverterWithPreimageVisitor(data).visit(select_query->refWhere()); + } + if (select_query->prewhere()) + { + OptimizeDateOrDateTimeConverterWithPreimageVisitor::Data data{tables_with_columns, context}; + OptimizeDateOrDateTimeConverterWithPreimageVisitor(data).visit(select_query->refPrewhere()); + } +} + void transformIfStringsIntoEnum(ASTPtr & query) { std::unordered_set function_names = {"if", "transform"}; @@ -780,6 +796,9 @@ void TreeOptimizer::apply(ASTPtr & query, TreeRewriterResult & result, tables_with_columns, result.storage_snapshot->metadata, result.storage); } + /// Rewrite date filters to avoid the calls of converters such as toYear, toYYYYMM, etc. + optimizeDateFilters(select_query, tables_with_columns, context); + /// GROUP BY injective function elimination. optimizeGroupBy(select_query, context); diff --git a/tests/queries/0_stateless/02783_date_predicate_optimizations.reference b/tests/queries/0_stateless/02783_date_predicate_optimizations.reference index cd689b93034f..872a5dd1d7d7 100644 --- a/tests/queries/0_stateless/02783_date_predicate_optimizations.reference +++ b/tests/queries/0_stateless/02783_date_predicate_optimizations.reference @@ -1,2 +1,54 @@ 2021-12-31 23:00:00 0 2021-12-31 23:00:00 0 +Date +2 +3 +2 +4 +1 +3 +3 +2 +1 +4 +1 +4 +DateTime +2 +3 +2 +4 +1 +3 +3 +2 +1 +4 +1 +4 +Date32 +2 +3 +2 +4 +1 +3 +3 +2 +1 +4 +1 +4 +DateTime64 +2 +3 +2 +4 +1 +3 +3 +2 +1 +4 +1 +4 diff --git a/tests/queries/0_stateless/02783_date_predicate_optimizations.sql b/tests/queries/0_stateless/02783_date_predicate_optimizations.sql index abb13f1005ee..0a2fa6cc93b5 100644 --- a/tests/queries/0_stateless/02783_date_predicate_optimizations.sql +++ b/tests/queries/0_stateless/02783_date_predicate_optimizations.sql @@ -11,3 +11,79 @@ INSERT INTO source values ('2021-12-31 23:00:00', 0); SELECT * FROM source WHERE toYYYYMM(ts) = 202112; SELECT * FROM source WHERE toYear(ts) = 2021; + +DROP TABLE IF EXISTS source; +CREATE TABLE source +( + `dt` Date, + `ts` DateTime, + `dt_32` Date32, + `ts_64` DateTime64(3), + `n` Int32 +) +ENGINE = MergeTree +PARTITION BY toYYYYMM(ts) +ORDER BY tuple(); + +INSERT INTO source values ('2022-12-31', '2022-12-31 23:59:59', '2022-12-31', '2022-12-31 23:59:59.123', 0); +INSERT INTO source values ('2023-01-01', '2023-01-01 00:00:00', '2023-01-01', '2023-01-01 00:00:00.000', 1); +INSERT INTO source values ('2023-12-01', '2023-12-01 00:00:00', '2023-12-01', '2023-12-01 00:00:00.000', 2); +INSERT INTO source values ('2023-12-31', '2023-12-31 23:59:59', '2023-12-31', '2023-12-31 23:59:59.123', 3); +INSERT INTO source values ('2024-01-01', '2024-01-01 00:00:00', '2024-01-01', '2024-01-01 00:00:00.000', 4); + +SELECT 'Date'; +SELECT count(*) FROM source WHERE toYYYYMM(dt) = 202312; +SELECT count(*) FROM source WHERE toYYYYMM(dt) <> 202312; +SELECT count(*) FROM source WHERE toYYYYMM(dt) < 202312; +SELECT count(*) FROM source WHERE toYYYYMM(dt) <= 202312; +SELECT count(*) FROM source WHERE toYYYYMM(dt) > 202312; +SELECT count(*) FROM source WHERE toYYYYMM(dt) >= 202312; +SELECT count(*) FROM source WHERE toYear(dt) = 2023; +SELECT count(*) FROM source WHERE toYear(dt) <> 2023; +SELECT count(*) FROM source WHERE toYear(dt) < 2023; +SELECT count(*) FROM source WHERE toYear(dt) <= 2023; +SELECT count(*) FROM source WHERE toYear(dt) > 2023; +SELECT count(*) FROM source WHERE toYear(dt) >= 2023; + +SELECT 'DateTime'; +SELECT count(*) FROM source WHERE toYYYYMM(ts) = 202312; +SELECT count(*) FROM source WHERE toYYYYMM(ts) <> 202312; +SELECT count(*) FROM source WHERE toYYYYMM(ts) < 202312; +SELECT count(*) FROM source WHERE toYYYYMM(ts) <= 202312; +SELECT count(*) FROM source WHERE toYYYYMM(ts) > 202312; +SELECT count(*) FROM source WHERE toYYYYMM(ts) >= 202312; +SELECT count(*) FROM source WHERE toYear(ts) = 2023; +SELECT count(*) FROM source WHERE toYear(ts) <> 2023; +SELECT count(*) FROM source WHERE toYear(ts) < 2023; +SELECT count(*) FROM source WHERE toYear(ts) <= 2023; +SELECT count(*) FROM source WHERE toYear(ts) > 2023; +SELECT count(*) FROM source WHERE toYear(ts) >= 2023; + +SELECT 'Date32'; +SELECT count(*) FROM source WHERE toYYYYMM(dt_32) = 202312; +SELECT count(*) FROM source WHERE toYYYYMM(dt_32) <> 202312; +SELECT count(*) FROM source WHERE toYYYYMM(dt_32) < 202312; +SELECT count(*) FROM source WHERE toYYYYMM(dt_32) <= 202312; +SELECT count(*) FROM source WHERE toYYYYMM(dt_32) > 202312; +SELECT count(*) FROM source WHERE toYYYYMM(dt_32) >= 202312; +SELECT count(*) FROM source WHERE toYear(dt_32) = 2023; +SELECT count(*) FROM source WHERE toYear(dt_32) <> 2023; +SELECT count(*) FROM source WHERE toYear(dt_32) < 2023; +SELECT count(*) FROM source WHERE toYear(dt_32) <= 2023; +SELECT count(*) FROM source WHERE toYear(dt_32) > 2023; +SELECT count(*) FROM source WHERE toYear(dt_32) >= 2023; + +SELECT 'DateTime64'; +SELECT count(*) FROM source WHERE toYYYYMM(ts_64) = 202312; +SELECT count(*) FROM source WHERE toYYYYMM(ts_64) <> 202312; +SELECT count(*) FROM source WHERE toYYYYMM(ts_64) < 202312; +SELECT count(*) FROM source WHERE toYYYYMM(ts_64) <= 202312; +SELECT count(*) FROM source WHERE toYYYYMM(ts_64) > 202312; +SELECT count(*) FROM source WHERE toYYYYMM(ts_64) >= 202312; +SELECT count(*) FROM source WHERE toYear(ts_64) = 2023; +SELECT count(*) FROM source WHERE toYear(ts_64) <> 2023; +SELECT count(*) FROM source WHERE toYear(ts_64) < 2023; +SELECT count(*) FROM source WHERE toYear(ts_64) <= 2023; +SELECT count(*) FROM source WHERE toYear(ts_64) > 2023; +SELECT count(*) FROM source WHERE toYear(ts_64) >= 2023; +DROP TABLE source; diff --git a/tests/queries/0_stateless/02785_date_predicate_optimizations_ast_rewrite.reference b/tests/queries/0_stateless/02785_date_predicate_optimizations_ast_rewrite.reference new file mode 100644 index 000000000000..9235e7e106a2 --- /dev/null +++ b/tests/queries/0_stateless/02785_date_predicate_optimizations_ast_rewrite.reference @@ -0,0 +1,87 @@ +SELECT value1 +FROM date_t +WHERE ((date1 >= \'1993-01-01\') AND (date1 < \'1994-01-01\')) AND ((id >= 1) AND (id <= 3)) +SELECT value1 +FROM date_t +WHERE ((date1 < \'1993-01-01\') OR (date1 >= \'1994-01-01\')) AND ((id >= 1) AND (id <= 3)) +SELECT value1 +FROM date_t +WHERE (date1 < \'1993-01-01\') AND ((id >= 1) AND (id <= 3)) +SELECT value1 +FROM date_t +WHERE (date1 >= \'1994-01-01\') AND ((id >= 1) AND (id <= 3)) +SELECT value1 +FROM date_t +WHERE (date1 < \'1994-01-01\') AND ((id >= 1) AND (id <= 3)) +SELECT value1 +FROM date_t +WHERE (date1 >= \'1993-01-01\') AND ((id >= 1) AND (id <= 3)) +SELECT value1 +FROM date_t +WHERE ((date1 >= \'1993-01-01\') AND (date1 < \'1998-01-01\')) AND ((id >= 1) AND (id <= 3)) +SELECT value1 +FROM date_t +WHERE (((date1 >= \'1993-01-01\') AND (date1 < \'1994-01-01\')) OR ((date1 >= \'1994-01-01\') AND (date1 < \'1995-01-01\'))) AND ((id >= 1) AND (id <= 3)) +SELECT + value1, + toYear(date1) AS year1 +FROM date_t +WHERE ((date1 >= \'1993-01-01\') AND (date1 < \'1994-01-01\')) AND ((id >= 1) AND (id <= 3)) +SELECT value1 +FROM date_t +WHERE (date1 < \'1993-01-01\') AND ((id >= 1) AND (id <= 3)) +SELECT value1 +FROM date_t +PREWHERE (date1 >= \'1993-01-01\') AND (date1 < \'1994-01-01\') +WHERE ((date1 >= \'1993-01-01\') AND (date1 < \'1994-01-01\')) AND ((id >= 1) AND (id <= 3)) +SELECT value1 +FROM date_t +WHERE ((id >= 1) AND (id <= 3)) AND ((date1 >= \'1993-01-01\') AND (date1 < \'1994-01-01\')) +SELECT value1 +FROM date_t +WHERE (toYYYYMM(date1) = 199300) AND ((id >= 1) AND (id <= 3)) +SELECT value1 +FROM date_t +WHERE (toYYYYMM(date1) = 199313) AND ((id >= 1) AND (id <= 3)) +SELECT value1 +FROM date_t +WHERE ((date1 >= \'1993-12-01\') AND (date1 < \'1994-01-01\')) AND ((id >= 1) AND (id <= 3)) +SELECT value1 +FROM date_t +WHERE ((date1 >= \'1992-03-01\') AND (date1 < \'1992-04-01\')) AND ((id >= 1) AND (id <= 3)) +SELECT value1 +FROM date_t +WHERE ((date1 < \'1992-03-01\') OR (date1 >= \'1992-04-01\')) AND ((id >= 1) AND (id <= 3)) +SELECT value1 +FROM date_t +WHERE (date1 < \'1992-03-01\') AND ((id >= 1) AND (id <= 3)) +SELECT value1 +FROM date_t +WHERE (date1 >= \'1992-04-01\') AND ((id >= 1) AND (id <= 3)) +SELECT value1 +FROM date_t +WHERE (date1 < \'1992-04-01\') AND ((id >= 1) AND (id <= 3)) +SELECT value1 +FROM date_t +WHERE (date1 >= \'1992-03-01\') AND ((id >= 1) AND (id <= 3)) +SELECT value1 +FROM date_t +WHERE ((date1 >= \'1992-03-01\') OR ((date1 >= \'1993-01-01\') AND (date1 < \'1994-01-01\'))) AND ((id >= 1) AND (id <= 3)) +SELECT value1 +FROM datetime_t +WHERE ((date1 >= \'1993-01-01 00:00:00\') AND (date1 < \'1994-01-01 00:00:00\')) AND ((id >= 1) AND (id <= 3)) +SELECT value1 +FROM datetime_t +WHERE ((date1 >= \'1993-12-01 00:00:00\') AND (date1 < \'1994-01-01 00:00:00\')) AND ((id >= 1) AND (id <= 3)) +SELECT value1 +FROM date32_t +WHERE ((date1 >= \'1993-01-01\') AND (date1 < \'1994-01-01\')) AND ((id >= 1) AND (id <= 3)) +SELECT value1 +FROM date32_t +WHERE ((date1 >= \'1993-12-01\') AND (date1 < \'1994-01-01\')) AND ((id >= 1) AND (id <= 3)) +SELECT value1 +FROM datetime64_t +WHERE ((date1 >= \'1993-01-01 00:00:00\') AND (date1 < \'1994-01-01 00:00:00\')) AND ((id >= 1) AND (id <= 3)) +SELECT value1 +FROM datetime64_t +WHERE ((date1 >= \'1993-12-01 00:00:00\') AND (date1 < \'1994-01-01 00:00:00\')) AND ((id >= 1) AND (id <= 3)) diff --git a/tests/queries/0_stateless/02785_date_predicate_optimizations_ast_rewrite.sql b/tests/queries/0_stateless/02785_date_predicate_optimizations_ast_rewrite.sql new file mode 100644 index 000000000000..266be59b0a3e --- /dev/null +++ b/tests/queries/0_stateless/02785_date_predicate_optimizations_ast_rewrite.sql @@ -0,0 +1,47 @@ +DROP TABLE IF EXISTS date_t; +CREATE TABLE date_t (id UInt32, value1 String, date1 Date) ENGINE ReplacingMergeTree() ORDER BY id; + +EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYear(date1) = 1993 AND id BETWEEN 1 AND 3; +EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYear(date1) <> 1993 AND id BETWEEN 1 AND 3; +EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYear(date1) < 1993 AND id BETWEEN 1 AND 3; +EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYear(date1) > 1993 AND id BETWEEN 1 AND 3; +EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYear(date1) <= 1993 AND id BETWEEN 1 AND 3; +EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYear(date1) >= 1993 AND id BETWEEN 1 AND 3; +EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYear(date1) BETWEEN 1993 AND 1997 AND id BETWEEN 1 AND 3; +EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE (toYear(date1) = 1993 OR toYear(date1) = 1994) AND id BETWEEN 1 AND 3; +EXPLAIN SYNTAX SELECT value1, toYear(date1) as year1 FROM date_t WHERE year1 = 1993 AND id BETWEEN 1 AND 3; +EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE 1993 > toYear(date1) AND id BETWEEN 1 AND 3; +EXPLAIN SYNTAX SELECT value1 FROM date_t PREWHERE toYear(date1) = 1993 WHERE id BETWEEN 1 AND 3; +EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE id BETWEEN 1 AND 3 HAVING toYear(date1) = 1993; +EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYYYYMM(date1) = 199300 AND id BETWEEN 1 AND 3; +EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYYYYMM(date1) = 199313 AND id BETWEEN 1 AND 3; +EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYYYYMM(date1) = 199312 AND id BETWEEN 1 AND 3; +EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYYYYMM(date1) = 199203 AND id BETWEEN 1 AND 3; +EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYYYYMM(date1) <> 199203 AND id BETWEEN 1 AND 3; +EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYYYYMM(date1) < 199203 AND id BETWEEN 1 AND 3; +EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYYYYMM(date1) > 199203 AND id BETWEEN 1 AND 3; +EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYYYYMM(date1) <= 199203 AND id BETWEEN 1 AND 3; +EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYYYYMM(date1) >= 199203 AND id BETWEEN 1 AND 3; +EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE (toYYYYMM(date1) >= 199203 OR toYear(date1) = 1993) AND id BETWEEN 1 AND 3; +DROP TABLE date_t; + +DROP TABLE IF EXISTS datetime_t; +CREATE TABLE datetime_t (id UInt32, value1 String, date1 Datetime) ENGINE ReplacingMergeTree() ORDER BY id; + +EXPLAIN SYNTAX SELECT value1 FROM datetime_t WHERE toYear(date1) = 1993 AND id BETWEEN 1 AND 3; +EXPLAIN SYNTAX SELECT value1 FROM datetime_t WHERE toYYYYMM(date1) = 199312 AND id BETWEEN 1 AND 3; +DROP TABLE datetime_t; + +DROP TABLE IF EXISTS date32_t; +CREATE TABLE date32_t (id UInt32, value1 String, date1 Date32) ENGINE ReplacingMergeTree() ORDER BY id; + +EXPLAIN SYNTAX SELECT value1 FROM date32_t WHERE toYear(date1) = 1993 AND id BETWEEN 1 AND 3; +EXPLAIN SYNTAX SELECT value1 FROM date32_t WHERE toYYYYMM(date1) = 199312 AND id BETWEEN 1 AND 3; +DROP TABLE date32_t; + +DROP TABLE IF EXISTS datetime64_t; +CREATE TABLE datetime64_t (id UInt32, value1 String, date1 Datetime64) ENGINE ReplacingMergeTree() ORDER BY id; + +EXPLAIN SYNTAX SELECT value1 FROM datetime64_t WHERE toYear(date1) = 1993 AND id BETWEEN 1 AND 3; +EXPLAIN SYNTAX SELECT value1 FROM datetime64_t WHERE toYYYYMM(date1) = 199312 AND id BETWEEN 1 AND 3; +DROP TABLE datetime64_t; From d987b94ed48594541bf91bb42fb4f5a8ced52e1f Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Tue, 4 Jul 2023 20:51:15 +0200 Subject: [PATCH 076/179] fix the way how broken parts are detached --- src/Storages/MergeTree/IMergeTreeDataPart.h | 8 +- src/Storages/MergeTree/MergeTreeData.cpp | 23 +- src/Storages/MergeTree/MergeTreeData.h | 10 +- .../ReplicatedMergeTreePartCheckThread.cpp | 356 ++++++++++-------- .../ReplicatedMergeTreePartCheckThread.h | 44 ++- src/Storages/StorageReplicatedMergeTree.cpp | 68 +++- 6 files changed, 294 insertions(+), 215 deletions(-) diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.h b/src/Storages/MergeTree/IMergeTreeDataPart.h index fd73d8025794..1fdcbd7309c3 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.h +++ b/src/Storages/MergeTree/IMergeTreeDataPart.h @@ -242,9 +242,11 @@ class IMergeTreeDataPart : public std::enable_shared_from_this is_frozen {false}; - /// Indicated that the part was marked Outdated because it's broken, not because it's actually outdated - /// See outdateBrokenPartAndCloneToDetached(...) - mutable bool outdated_because_broken = false; + /// Indicates that the part was marked Outdated by PartCheckThread because the part was not committed to ZooKeeper + mutable bool is_unexpected_local_part = false; + + /// Indicates that the part was detached and marked Outdated because it's broken + mutable std::atomic_bool was_removed_as_broken = false; /// Flag for keep S3 data when zero-copy replication over S3 turned on. mutable bool force_keep_shared_data = false; diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index e9c3a7f66aed..e37d42736290 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -4023,22 +4023,15 @@ void MergeTreeData::restoreAndActivatePart(const DataPartPtr & part, DataPartsLo } -void MergeTreeData::outdateBrokenPartAndCloneToDetached(const DataPartPtr & part_to_detach, const String & prefix) +void MergeTreeData::outdateUnexpectedPartAndCloneToDetached(const DataPartPtr & part_to_detach) { - auto metadata_snapshot = getInMemoryMetadataPtr(); - if (prefix.empty()) - LOG_INFO(log, "Cloning part {} to {} and making it obsolete.", part_to_detach->getDataPartStorage().getPartDirectory(), part_to_detach->name); - else - LOG_INFO(log, "Cloning part {} to {}_{} and making it obsolete.", part_to_detach->getDataPartStorage().getPartDirectory(), prefix, part_to_detach->name); - - part_to_detach->makeCloneInDetached(prefix, metadata_snapshot); + LOG_INFO(log, "Cloning part {} to unexpected_{} and making it obsolete.", part_to_detach->getDataPartStorage().getPartDirectory(), part_to_detach->name); + part_to_detach->makeCloneInDetached("unexpected", getInMemoryMetadataPtr()); DataPartsLock lock = lockParts(); + part_to_detach->is_unexpected_local_part = true; if (part_to_detach->getState() == DataPartState::Active) - { - part_to_detach->outdated_because_broken = true; removePartsFromWorkingSet(NO_TRANSACTION_RAW, {part_to_detach}, true, &lock); - } } void MergeTreeData::forcefullyMovePartToDetachedAndRemoveFromMemory(const MergeTreeData::DataPartPtr & part_to_detach, const String & prefix, bool restore_covered) @@ -4677,24 +4670,24 @@ MergeTreeData::DataPartsVector MergeTreeData::getVisibleDataPartsVectorInPartiti return res; } -MergeTreeData::DataPartPtr MergeTreeData::getPartIfExists(const MergeTreePartInfo & part_info, const MergeTreeData::DataPartStates & valid_states) +MergeTreeData::DataPartPtr MergeTreeData::getPartIfExists(const MergeTreePartInfo & part_info, const MergeTreeData::DataPartStates & valid_states) const { auto lock = lockParts(); return getPartIfExistsUnlocked(part_info, valid_states, lock); } -MergeTreeData::DataPartPtr MergeTreeData::getPartIfExists(const String & part_name, const MergeTreeData::DataPartStates & valid_states) +MergeTreeData::DataPartPtr MergeTreeData::getPartIfExists(const String & part_name, const MergeTreeData::DataPartStates & valid_states) const { auto lock = lockParts(); return getPartIfExistsUnlocked(part_name, valid_states, lock); } -MergeTreeData::DataPartPtr MergeTreeData::getPartIfExistsUnlocked(const String & part_name, const DataPartStates & valid_states, DataPartsLock & acquired_lock) +MergeTreeData::DataPartPtr MergeTreeData::getPartIfExistsUnlocked(const String & part_name, const DataPartStates & valid_states, DataPartsLock & acquired_lock) const { return getPartIfExistsUnlocked(MergeTreePartInfo::fromPartName(part_name, format_version), valid_states, acquired_lock); } -MergeTreeData::DataPartPtr MergeTreeData::getPartIfExistsUnlocked(const MergeTreePartInfo & part_info, const DataPartStates & valid_states, DataPartsLock & /* acquired_lock */) +MergeTreeData::DataPartPtr MergeTreeData::getPartIfExistsUnlocked(const MergeTreePartInfo & part_info, const DataPartStates & valid_states, DataPartsLock & /* acquired_lock */) const { auto it = data_parts_by_info.find(part_info); if (it == data_parts_by_info.end()) diff --git a/src/Storages/MergeTree/MergeTreeData.h b/src/Storages/MergeTree/MergeTreeData.h index b27392b355bf..d5991aaea71c 100644 --- a/src/Storages/MergeTree/MergeTreeData.h +++ b/src/Storages/MergeTree/MergeTreeData.h @@ -521,10 +521,10 @@ class MergeTreeData : public IStorage, public WithMutableContext DataPartsVector getDataPartsVectorInPartitionForInternalUsage(const DataPartStates & affordable_states, const String & partition_id, DataPartsLock * acquired_lock = nullptr) const; /// Returns the part with the given name and state or nullptr if no such part. - DataPartPtr getPartIfExistsUnlocked(const String & part_name, const DataPartStates & valid_states, DataPartsLock & acquired_lock); - DataPartPtr getPartIfExistsUnlocked(const MergeTreePartInfo & part_info, const DataPartStates & valid_states, DataPartsLock & acquired_lock); - DataPartPtr getPartIfExists(const String & part_name, const DataPartStates & valid_states); - DataPartPtr getPartIfExists(const MergeTreePartInfo & part_info, const DataPartStates & valid_states); + DataPartPtr getPartIfExistsUnlocked(const String & part_name, const DataPartStates & valid_states, DataPartsLock & acquired_lock) const; + DataPartPtr getPartIfExistsUnlocked(const MergeTreePartInfo & part_info, const DataPartStates & valid_states, DataPartsLock & acquired_lock) const; + DataPartPtr getPartIfExists(const String & part_name, const DataPartStates & valid_states) const; + DataPartPtr getPartIfExists(const MergeTreePartInfo & part_info, const DataPartStates & valid_states) const; /// Total size of active parts in bytes. size_t getTotalActiveSizeInBytes() const; @@ -654,7 +654,7 @@ class MergeTreeData : public IStorage, public WithMutableContext virtual void forcefullyRemoveBrokenOutdatedPartFromZooKeeperBeforeDetaching(const String & /*part_name*/) {} /// Outdate broken part, set remove time to zero (remove as fast as possible) and make clone in detached directory. - void outdateBrokenPartAndCloneToDetached(const DataPartPtr & part, const String & prefix); + void outdateUnexpectedPartAndCloneToDetached(const DataPartPtr & part); /// If the part is Obsolete and not used by anybody else, immediately delete it from filesystem and remove from memory. void tryRemovePartImmediately(DataPartPtr && part); diff --git a/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp index c495fdaf5e2b..d6f8dbac883d 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp @@ -131,7 +131,7 @@ size_t ReplicatedMergeTreePartCheckThread::size() const } -ReplicatedMergeTreePartCheckThread::MissingPartSearchResult ReplicatedMergeTreePartCheckThread::searchForMissingPartOnOtherReplicas(const String & part_name) +bool ReplicatedMergeTreePartCheckThread::searchForMissingPartOnOtherReplicas(const String & part_name) const { auto zookeeper = storage.getZooKeeper(); @@ -198,13 +198,13 @@ ReplicatedMergeTreePartCheckThread::MissingPartSearchResult ReplicatedMergeTreeP continue; LOG_INFO(log, "Found the missing part {} at {} on {}", part_name, part_on_replica, replica); - return MissingPartSearchResult::FoundAndNeedFetch; + return true; } if (part_on_replica_info.contains(part_info)) { LOG_INFO(log, "Found part {} on {} that covers the missing part {}", part_on_replica, replica, part_name); - return MissingPartSearchResult::FoundAndDontNeedFetch; + return true; } if (part_info.contains(part_on_replica_info)) @@ -227,11 +227,10 @@ ReplicatedMergeTreePartCheckThread::MissingPartSearchResult ReplicatedMergeTreeP if (found_part_with_the_same_min_block && found_part_with_the_same_max_block) { - /// FIXME It may never appear LOG_INFO(log, "Found parts with the same min block and with the same max block as the missing part {} on replica {}. " "Hoping that it will eventually appear as a result of a merge. Parts: {}", part_name, replica, fmt::join(parts_found, ", ")); - return MissingPartSearchResult::FoundAndDontNeedFetch; + return true; } } } @@ -247,70 +246,9 @@ ReplicatedMergeTreePartCheckThread::MissingPartSearchResult ReplicatedMergeTreeP not_found_msg = "smaller parts with either the same min block or the same max block."; LOG_ERROR(log, "No replica has part covering {} and a merge is impossible: we didn't find {}", part_name, not_found_msg); - return MissingPartSearchResult::LostForever; + return false; } -void ReplicatedMergeTreePartCheckThread::searchForMissingPartAndFetchIfPossible(const String & part_name, bool exists_in_zookeeper) -{ - auto zookeeper = storage.getZooKeeper(); - auto missing_part_search_result = searchForMissingPartOnOtherReplicas(part_name); - - /// If the part is in ZooKeeper, remove it from there and add the task to download it to the queue. - if (exists_in_zookeeper) - { - if (missing_part_search_result == MissingPartSearchResult::FoundAndNeedFetch) - { - LOG_WARNING(log, "Part {} exists in ZooKeeper but not locally and found on other replica. Removing from ZooKeeper and queueing a fetch.", part_name); - } - else - { - LOG_WARNING(log, "Part {} exists in ZooKeeper but not locally and not found on other replica. Removing it from ZooKeeper.", part_name); - } - - /// We cannot simply remove part from ZooKeeper, because it may be removed from virtual_part, - /// so we have to create some entry in the queue. Maybe we will execute it (by fetching part or covering part from somewhere), - /// maybe will simply replace with empty part. - storage.removePartAndEnqueueFetch(part_name, /* storage_init = */false); - } - - ProfileEvents::increment(ProfileEvents::ReplicatedPartChecksFailed); - - if (missing_part_search_result == MissingPartSearchResult::LostForever) - { - auto lost_part_info = MergeTreePartInfo::fromPartName(part_name, storage.format_version); - if (lost_part_info.level != 0 || lost_part_info.mutation != 0) - { - Strings source_parts; - bool part_in_queue = storage.queue.checkPartInQueueAndGetSourceParts(part_name, source_parts); - - /// If it's MERGE/MUTATION etc. we shouldn't replace result part with empty part - /// because some source parts can be lost, but some of them can exist. - if (part_in_queue && !source_parts.empty()) - { - LOG_ERROR(log, "Part {} found in queue and some source parts for it was lost. Will check all source parts.", part_name); - for (const String & source_part_name : source_parts) - enqueuePart(source_part_name); - - return; - } - } - - ThreadFuzzer::maybeInjectSleep(); - - if (storage.createEmptyPartInsteadOfLost(zookeeper, part_name)) - { - /** This situation is possible if on all the replicas where the part was, it deteriorated. - * For example, a replica that has just written it has power turned off and the data has not been written from cache to disk. - */ - LOG_ERROR(log, "Part {} is lost forever.", part_name); - ProfileEvents::increment(ProfileEvents::ReplicatedDataLoss); - } - else - { - LOG_WARNING(log, "Cannot create empty part {} instead of lost. Will retry later", part_name); - } - } -} std::pair ReplicatedMergeTreePartCheckThread::findLocalPart(const String & part_name) { @@ -335,12 +273,12 @@ std::pair ReplicatedMergeTreePartCheckThread::findLo return std::make_pair(exists_in_zookeeper, part); } -CheckResult ReplicatedMergeTreePartCheckThread::checkPart(const String & part_name) +ReplicatedCheckResult ReplicatedMergeTreePartCheckThread::checkPartImpl(const String & part_name) { - LOG_INFO(log, "Checking part {}", part_name); - ProfileEvents::increment(ProfileEvents::ReplicatedPartChecks); - + ReplicatedCheckResult result; auto [exists_in_zookeeper, part] = findLocalPart(part_name); + result.exists_in_zookeeper = exists_in_zookeeper; + result.part = part; LOG_TRACE(log, "Part {} in zookeeper: {}, locally: {}", part_name, exists_in_zookeeper, part != nullptr); @@ -351,130 +289,236 @@ CheckResult ReplicatedMergeTreePartCheckThread::checkPart(const String & part_na { /// We cannot rely on exists_in_zookeeper, because the cleanup thread is probably going to remove it from ZooKeeper /// Also, it will avoid "Cannot commit empty part: Part ... (state Outdated) already exists, but it will be deleted soon" - LOG_WARNING(log, "Part {} is Outdated, will wait for cleanup thread to handle it and check again later", part_name); time_t lifetime = time(nullptr) - outdated->remove_time; time_t max_lifetime = storage.getSettings()->old_parts_lifetime.totalSeconds(); time_t delay = lifetime >= max_lifetime ? 0 : max_lifetime - lifetime; - enqueuePart(part_name, delay + 30); - return {part_name, true, "Part is Outdated, will recheck later"}; + result.recheck_after = delay + 30; + + auto message = PreformattedMessage::create("Part {} is Outdated, will wait for cleanup thread to handle it " + "and check again after {}s", part_name, result.recheck_after); + LOG_WARNING(log, message); + result.status = {part_name, true, message.text}; + result.action = ReplicatedCheckResult::RecheckLater; + return result; } } /// We do not have this or a covering part. if (!part) { - searchForMissingPartAndFetchIfPossible(part_name, exists_in_zookeeper); - return {part_name, false, "Part is missing, will search for it"}; + result.status = {part_name, false, "Part is missing, will search for it"}; + result.action = ReplicatedCheckResult::TryFetchMissing; + return result; } /// We have this part, and it's active. We will check whether we need this part and whether it has the right data. - if (part->name == part_name) + if (part->name != part_name) { - auto zookeeper = storage.getZooKeeper(); - auto table_lock = storage.lockForShare(RWLockImpl::NO_QUERY, storage.getSettings()->lock_acquire_timeout_for_background_operations); + /// If we have a covering part, ignore all the problems with this part. + /// In the worst case, errors will still appear `old_parts_lifetime` seconds in error log until the part is removed as the old one. + auto message = PreformattedMessage::create("We have part {} covering part {}, will not check", part->name, part_name); + LOG_WARNING(log, message); + result.status = {part_name, true, message.text}; + result.action = ReplicatedCheckResult::DoNothing; + return result; + } + + time_t current_time = time(nullptr); + auto zookeeper = storage.getZooKeeper(); + auto table_lock = storage.lockForShare(RWLockImpl::NO_QUERY, storage.getSettings()->lock_acquire_timeout_for_background_operations); - auto local_part_header = ReplicatedMergeTreePartHeader::fromColumnsAndChecksums( - part->getColumns(), part->checksums); + auto local_part_header = ReplicatedMergeTreePartHeader::fromColumnsAndChecksums( + part->getColumns(), part->checksums); + + + /// If the part is in ZooKeeper, check its data with its checksums, and them with ZooKeeper. + if (exists_in_zookeeper) + { + LOG_INFO(log, "Checking data of part {}.", part_name); /// The double get scheme is needed to retain compatibility with very old parts that were created /// before the ReplicatedMergeTreePartHeader was introduced. - String part_path = storage.replica_path + "/parts/" + part_name; - String part_znode; - /// If the part is in ZooKeeper, check its data with its checksums, and them with ZooKeeper. - if (zookeeper->tryGet(part_path, part_znode)) - { - LOG_INFO(log, "Checking data of part {}.", part_name); + String part_znode = zookeeper->get(part_path); - try + try + { + ReplicatedMergeTreePartHeader zk_part_header; + if (!part_znode.empty()) + zk_part_header = ReplicatedMergeTreePartHeader::fromString(part_znode); + else { - ReplicatedMergeTreePartHeader zk_part_header; - if (!part_znode.empty()) - zk_part_header = ReplicatedMergeTreePartHeader::fromString(part_znode); - else - { - String columns_znode = zookeeper->get(part_path + "/columns"); - String checksums_znode = zookeeper->get(part_path + "/checksums"); - zk_part_header = ReplicatedMergeTreePartHeader::fromColumnsAndChecksumsZNodes( - columns_znode, checksums_znode); - } - - if (local_part_header.getColumnsHash() != zk_part_header.getColumnsHash()) - throw Exception(ErrorCodes::TABLE_DIFFERS_TOO_MUCH, "Columns of local part {} are different from ZooKeeper", part_name); + String columns_znode = zookeeper->get(part_path + "/columns"); + String checksums_znode = zookeeper->get(part_path + "/checksums"); + zk_part_header = ReplicatedMergeTreePartHeader::fromColumnsAndChecksumsZNodes( + columns_znode, checksums_znode); + } - zk_part_header.getChecksums().checkEqual(local_part_header.getChecksums(), true); + if (local_part_header.getColumnsHash() != zk_part_header.getColumnsHash()) + throw Exception(ErrorCodes::TABLE_DIFFERS_TOO_MUCH, "Columns of local part {} are different from ZooKeeper", part_name); - checkDataPart( - part, - true, - [this] { return need_stop.load(); }); + zk_part_header.getChecksums().checkEqual(local_part_header.getChecksums(), true); - if (need_stop) - { - LOG_INFO(log, "Checking part was cancelled."); - return {part_name, false, "Checking part was cancelled"}; - } + checkDataPart( + part, + true, + [this] { return need_stop.load(); }); - LOG_INFO(log, "Part {} looks good.", part_name); - } - catch (const Exception & e) + if (need_stop) { - /// Don't count the part as broken if we got known retryable exception. - /// In fact, there can be other similar situations because not all - /// of the exceptions are classified as retryable/non-retryable. But it is OK, - /// because there is a safety guard against deleting too many parts. - if (isRetryableException(e)) - throw; + result.status = {part_name, false, "Checking part was cancelled"}; + result.action = ReplicatedCheckResult::Cancelled; + return result; + } - tryLogCurrentException(log, __PRETTY_FUNCTION__); - constexpr auto fmt_string = "Part {} looks broken. Removing it and will try to fetch."; - String message = fmt::format(fmt_string, part_name); - LOG_ERROR(log, fmt_string, part_name); + part->checkMetadata(); - /// Delete part locally. - storage.outdateBrokenPartAndCloneToDetached(part, "broken"); + LOG_INFO(log, "Part {} looks good.", part_name); + result.status = {part_name, true, ""}; + result.action = ReplicatedCheckResult::DoNothing; + return result; + } + catch (const Exception & e) + { + /// Don't count the part as broken if we got known retryable exception. + /// In fact, there can be other similar situations because not all + /// of the exceptions are classified as retryable/non-retryable. But it is OK, + /// because there is a safety guard against deleting too many parts. + if (isRetryableException(e)) + throw; + + tryLogCurrentException(log, __PRETTY_FUNCTION__); + + auto message = PreformattedMessage::create("Part {} looks broken. Removing it and will try to fetch.", part_name); + LOG_ERROR(log, message); + + /// Part is broken, let's try to find it and fetch. + result.status = {part_name, false, message}; + result.action = ReplicatedCheckResult::TryFetchMissing; + return result; + } + } + else if (part->modification_time + MAX_AGE_OF_LOCAL_PART_THAT_WASNT_ADDED_TO_ZOOKEEPER < current_time) + { + /// If the part is not in ZooKeeper, delete it locally. + /// Probably, someone just wrote down the part, and has not yet added to ZK. + /// Therefore, delete only if the part is old (not very reliable). + constexpr auto fmt_string = "Unexpected part {} in filesystem. Removing."; + String message = fmt::format(fmt_string, part_name); + LOG_ERROR(log, fmt_string, part_name); + result.status = {part_name, false, message}; + result.action = ReplicatedCheckResult::DetachUnexpected; + return result; + } + else + { + auto message = PreformattedMessage::create("Young part {} with age {} seconds hasn't been added to ZooKeeper yet. It's ok.", + part_name, (current_time - part->modification_time)); + LOG_INFO(log, message); + result.recheck_after = part->modification_time + MAX_AGE_OF_LOCAL_PART_THAT_WASNT_ADDED_TO_ZOOKEEPER - current_time; + result.status = {part_name, true, message}; + result.action = ReplicatedCheckResult::RecheckLater; + return result; + } +} - ThreadFuzzer::maybeInjectMemoryLimitException(); - ThreadFuzzer::maybeInjectSleep(); - /// Part is broken, let's try to find it and fetch. - searchForMissingPartAndFetchIfPossible(part_name, exists_in_zookeeper); +CheckResult ReplicatedMergeTreePartCheckThread::checkPartAndFix(const String & part_name) +{ + LOG_INFO(log, "Checking part {}", part_name); + ProfileEvents::increment(ProfileEvents::ReplicatedPartChecks); - return {part_name, false, message}; - } - } - else if (part->modification_time + MAX_AGE_OF_LOCAL_PART_THAT_WASNT_ADDED_TO_ZOOKEEPER < time(nullptr)) + ReplicatedCheckResult result = checkPartImpl(part_name); + switch (result.action) + { + case ReplicatedCheckResult::None: UNREACHABLE(); + case ReplicatedCheckResult::DoNothing: break; + case ReplicatedCheckResult::Cancelled: + LOG_INFO(log, "Checking part was cancelled."); + break; + + case ReplicatedCheckResult::RecheckLater: + enqueuePart(part_name, result.recheck_after); + break; + + case ReplicatedCheckResult::DetachUnexpected: + chassert(!result.exists_in_zookeeper); + ProfileEvents::increment(ProfileEvents::ReplicatedPartChecksFailed); + + storage.outdateUnexpectedPartAndCloneToDetached(result.part); + break; + + case ReplicatedCheckResult::TryFetchMissing: { - /// If the part is not in ZooKeeper, delete it locally. - /// Probably, someone just wrote down the part, and has not yet added to ZK. - /// Therefore, delete only if the part is old (not very reliable). ProfileEvents::increment(ProfileEvents::ReplicatedPartChecksFailed); - constexpr auto fmt_string = "Unexpected part {} in filesystem. Removing."; - String message = fmt::format(fmt_string, part_name); - LOG_ERROR(log, fmt_string, part_name); - storage.outdateBrokenPartAndCloneToDetached(part, "unexpected"); - ThreadFuzzer::maybeInjectSleep(); - return {part_name, false, message}; + + /// If the part is in ZooKeeper, remove it from there and add the task to download it to the queue (atomically). + if (result.exists_in_zookeeper) + { + /// We cannot simply remove part from ZooKeeper, because it may be removed from virtual_part, + /// so we have to create some entry in the queue. Maybe we will execute it (by fetching part or covering part from somewhere), + /// maybe will simply replace with empty part. + if (result.part) + LOG_WARNING(log, "Part {} exists in ZooKeeper and the local part was broken. Detaching it, removing from ZooKeeper and queueing a fetch.", part_name); + else + LOG_WARNING(log, "Part {} exists in ZooKeeper but not locally. Removing from ZooKeeper and queueing a fetch.", part_name); + + storage.removePartAndEnqueueFetch(part_name, /* storage_init = */ false); + break; + } + + chassert(!result.part); + + /// Part is not in ZooKeeper and not on disk (so there's nothing to detach or remove from ZooKeeper). + /// Probably we cannot execute some entry from the replication queue (so don't need to enqueue another one). + /// Either all replicas having the part are not active, or the part is lost forever. + bool is_lost = searchForMissingPartOnOtherReplicas(part_name); + if (is_lost) + onPartIsLostForever(part_name); + + break; } - else + } + + return result.status; +} + +void ReplicatedMergeTreePartCheckThread::onPartIsLostForever(const String & part_name) +{ + auto lost_part_info = MergeTreePartInfo::fromPartName(part_name, storage.format_version); + if (lost_part_info.level != 0 || lost_part_info.mutation != 0) + { + Strings source_parts; + bool part_in_queue = storage.queue.checkPartInQueueAndGetSourceParts(part_name, source_parts); + + /// If it's MERGE/MUTATION etc. we shouldn't replace result part with empty part + /// because some source parts can be lost, but some of them can exist. + if (part_in_queue && !source_parts.empty()) { - /// TODO You need to make sure that the part is still checked after a while. - /// Otherwise, it's possible that the part was not added to ZK, - /// but remained in the filesystem and in a number of active parts. - /// And then for a long time (before restarting), the data on the replicas will be different. + LOG_ERROR(log, "Part {} found in queue and some source parts for it was lost. Will check all source parts.", part_name); + for (const String & source_part_name : source_parts) + enqueuePart(source_part_name); - LOG_TRACE(log, "Young part {} with age {} seconds hasn't been added to ZooKeeper yet. It's ok.", part_name, (time(nullptr) - part->modification_time)); + return; } } + + ThreadFuzzer::maybeInjectSleep(); + + if (storage.createEmptyPartInsteadOfLost(storage.getZooKeeper(), part_name)) + { + /** This situation is possible if on all the replicas where the part was, it deteriorated. + * For example, a replica that has just written it has power turned off and the data has not been written from cache to disk. + */ + LOG_ERROR(log, "Part {} is lost forever.", part_name); + ProfileEvents::increment(ProfileEvents::ReplicatedDataLoss); + } else { - /// If we have a covering part, ignore all the problems with this part. - /// In the worst case, errors will still appear `old_parts_lifetime` seconds in error log until the part is removed as the old one. - LOG_WARNING(log, "We have part {} covering part {}", part->name, part_name); + LOG_WARNING(log, "Cannot create empty part {} instead of lost. Will retry later", part_name); + constexpr time_t retry_after_seconds = 30; + enqueuePart(part_name, retry_after_seconds); } - - part->checkMetadata(); - return {part_name, true, ""}; } @@ -524,7 +568,7 @@ void ReplicatedMergeTreePartCheckThread::run() if (selected == parts_queue.end()) return; - checkPart(selected->first); + checkPartAndFix(selected->first); if (need_stop) return; diff --git a/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.h b/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.h index b86191dbf500..0a8fbc75c059 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.h @@ -18,6 +18,27 @@ namespace DB class StorageReplicatedMergeTree; +struct ReplicatedCheckResult +{ + enum Action + { + None, + + Cancelled, + DoNothing, + RecheckLater, + + DetachUnexpected, + TryFetchMissing, + }; + + CheckResult status; + Action action = None; + + bool exists_in_zookeeper; + MergeTreeDataPartPtr part; + time_t recheck_after = 0; +}; /** Checks the integrity of the parts requested for validation. * @@ -44,7 +65,9 @@ class ReplicatedMergeTreePartCheckThread size_t size() const; /// Check part by name - CheckResult checkPart(const String & part_name); + CheckResult checkPartAndFix(const String & part_name); + + ReplicatedCheckResult checkPartImpl(const String & part_name); std::unique_lock pausePartsCheck(); @@ -54,26 +77,13 @@ class ReplicatedMergeTreePartCheckThread private: void run(); - /// Search for missing part and queue fetch if possible. Otherwise - /// remove part from zookeeper and queue. - void searchForMissingPartAndFetchIfPossible(const String & part_name, bool exists_in_zookeeper); + void onPartIsLostForever(const String & part_name); std::pair findLocalPart(const String & part_name); - enum MissingPartSearchResult - { - /// We found this part on other replica, let's fetch it. - FoundAndNeedFetch, - /// We found covering part or source part with same min and max block number - /// don't need to fetch because we should do it during normal queue processing. - FoundAndDontNeedFetch, - /// Covering part not found anywhere and exact part_name doesn't found on other - /// replicas. - LostForever, - }; - /// Search for missing part on other replicas or covering part on all replicas (including our replica). - MissingPartSearchResult searchForMissingPartOnOtherReplicas(const String & part_name); + /// Returns false if the part is lost forever. + bool searchForMissingPartOnOtherReplicas(const String & part_name) const; StorageReplicatedMergeTree & storage; String log_name; diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index b1ba06c77f9f..56b8d431588b 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -3351,6 +3351,17 @@ bool StorageReplicatedMergeTree::canExecuteFetch(const ReplicatedMergeTreeLogEnt return false; } + if (entry.source_replica.empty()) + { + auto part = getPartIfExists(entry.new_part_name, {MergeTreeDataPartState::Active, MergeTreeDataPartState::Outdated, MergeTreeDataPartState::Deleting}); + if (part && part->was_removed_as_broken) + { + disable_reason = fmt::format("Not executing fetch of part {} because we still have broken part with that name. " + "Waiting for the broken part to be removed first.", entry.new_part_name); + return false; + } + } + return true; } @@ -3731,23 +3742,44 @@ void StorageReplicatedMergeTree::removePartAndEnqueueFetch(const String & part_n { auto zookeeper = getZooKeeper(); + DataPartPtr broken_part; + auto outdate_broken_part = [this, &broken_part]() + { + if (broken_part) + return; + DataPartsLock lock = lockParts(); + if (broken_part->getState() == DataPartState::Active) + removePartsFromWorkingSet(NO_TRANSACTION_RAW, {broken_part}, true, &lock); + }; + /// We don't know exactly what happened to broken part /// and we are going to remove all covered log entries. /// It's quite dangerous, so clone covered parts to detached. auto broken_part_info = MergeTreePartInfo::fromPartName(part_name, format_version); - auto partition_range = getVisibleDataPartsVectorInPartition(getContext(), broken_part_info.partition_id); + auto partition_range = getDataPartsVectorInPartitionForInternalUsage({MergeTreeDataPartState::Active, MergeTreeDataPartState::Outdated}, + broken_part_info.partition_id); for (const auto & part : partition_range) { if (!broken_part_info.contains(part->info)) continue; - /// Broken part itself either already moved to detached or does not exist. - assert(broken_part_info != part->info); - part->makeCloneInDetached("covered-by-broken", getInMemoryMetadataPtr()); + if (broken_part_info == part->info) + { + chassert(!broken_part); + chassert(!storage_init); + part->was_removed_as_broken = true; + part->makeCloneInDetached("broken", getInMemoryMetadataPtr()); + broken_part = part; + } + else + { + part->makeCloneInDetached("covered-by-broken", getInMemoryMetadataPtr()); + } } ThreadFuzzer::maybeInjectSleep(); + ThreadFuzzer::maybeInjectMemoryLimitException(); /// It's possible that queue contains entries covered by part_name. /// For example, we had GET_PART all_1_42_5 and MUTATE_PART all_1_42_5_63, @@ -3762,6 +3794,7 @@ void StorageReplicatedMergeTree::removePartAndEnqueueFetch(const String & part_n queue.removePartProducingOpsInRange(zookeeper, broken_part_info, /* covering_entry= */ {}); ThreadFuzzer::maybeInjectSleep(); + ThreadFuzzer::maybeInjectMemoryLimitException(); String part_path = fs::path(replica_path) / "parts" / part_name; @@ -3780,7 +3813,7 @@ void StorageReplicatedMergeTree::removePartAndEnqueueFetch(const String & part_n /// but we are going to remove it from /parts and add to queue again. Coordination::Stat is_lost_stat; String is_lost_value = zookeeper->get(replica_path + "/is_lost", &is_lost_stat); - assert(is_lost_value == "0"); + chassert(is_lost_value == "0"); ops.emplace_back(zkutil::makeSetRequest(replica_path + "/is_lost", is_lost_value, is_lost_stat.version)); part_create_time = stat.ctime / 1000; @@ -3802,12 +3835,8 @@ void StorageReplicatedMergeTree::removePartAndEnqueueFetch(const String & part_n ReplicatedMergeTreeMergePredicate merge_pred = queue.getMergePredicate(zookeeper, PartitionIdsHint{broken_part_info.partition_id}); if (merge_pred.isGoingToBeDropped(broken_part_info)) { - LOG_INFO(log, "Broken part {} is covered by drop range, don't need to fetch it, removing it from ZooKeeper", part_name); - - /// But we have to remove it from ZooKeeper because broken parts are not removed from ZK during Outdated parts cleanup - /// There's a chance that DROP_RANGE will remove it, but only if it was not already removed by cleanup thread - if (exists_in_zookeeper) - removePartsFromZooKeeperWithRetries({part_name}); + LOG_INFO(log, "Broken part {} is covered by drop range, don't need to fetch it", part_name); + outdate_broken_part(); return; } @@ -3836,10 +3865,11 @@ void StorageReplicatedMergeTree::removePartAndEnqueueFetch(const String & part_n zkutil::KeeperMultiException::check(rc, ops, results); - String path_created = dynamic_cast(*results.back()).path_created; - log_entry->znode_name = path_created.substr(path_created.find_last_of('/') + 1); - queue.insert(zookeeper, log_entry); - break; + /// Make the part outdated after creating the log entry. + /// Otherwise, if we failed to create the entry, cleanup thread could remove the part from ZooKeeper (leading to diverged replicas) + outdate_broken_part(); + queue_updating_task->schedule(); + return; } } @@ -6841,10 +6871,10 @@ void StorageReplicatedMergeTree::clearOldPartsAndRemoveFromZKImpl(zkutil::ZooKee { /// Broken part can be removed from zk by removePartAndEnqueueFetch(...) only. /// Removal without enqueueing a fetch leads to intersecting parts. - if (part->is_duplicate || part->outdated_because_broken) + if (part->is_duplicate || part->is_unexpected_local_part) { - LOG_WARNING(log, "Will not remove part {} from ZooKeeper (is_duplicate: {}, outdated_because_broken: {})", - part->name, part->is_duplicate, part->outdated_because_broken); + LOG_WARNING(log, "Will not remove part {} from ZooKeeper (is_duplicate: {}, is_unexpected_local_part: {})", + part->name, part->is_duplicate, part->is_unexpected_local_part); parts_to_delete_only_from_filesystem.emplace_back(part); } else @@ -8189,7 +8219,7 @@ CheckResults StorageReplicatedMergeTree::checkData(const ASTPtr & query, Context { try { - results.push_back(part_check_thread.checkPart(part->name)); + results.push_back(part_check_thread.checkPartAndFix(part->name)); } catch (const Exception & ex) { From bb5b47cacf30c84f51e3c8a70040bf5707a5e742 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Tue, 4 Jul 2023 19:07:11 +0000 Subject: [PATCH 077/179] do not access Exception::thread_frame_pointers if not initialized --- src/Daemon/BaseDaemon.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/Daemon/BaseDaemon.cpp b/src/Daemon/BaseDaemon.cpp index 6d29523a3548..a75aac7a08ec 100644 --- a/src/Daemon/BaseDaemon.cpp +++ b/src/Daemon/BaseDaemon.cpp @@ -154,7 +154,10 @@ static void signalHandler(int sig, siginfo_t * info, void * context) writePODBinary(*info, out); writePODBinary(signal_context, out); writePODBinary(stack_trace, out); - writeVectorBinary(Exception::thread_frame_pointers, out); + if (Exception::enable_job_stack_trace) + writeVectorBinary(Exception::thread_frame_pointers, out); + else + writeVarUInt(0, out); writeBinary(static_cast(getThreadId()), out); writePODBinary(current_thread, out); From 6345879cdf4ba9c33f121a17a16e389761791de5 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 4 Jul 2023 22:56:58 +0300 Subject: [PATCH 078/179] Update src/Disks/VolumeJBOD.cpp Co-authored-by: Sergei Trifonov --- src/Disks/VolumeJBOD.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Disks/VolumeJBOD.cpp b/src/Disks/VolumeJBOD.cpp index 885b1d56b0db..519f3378c4ce 100644 --- a/src/Disks/VolumeJBOD.cpp +++ b/src/Disks/VolumeJBOD.cpp @@ -46,11 +46,11 @@ VolumeJBOD::VolumeJBOD( for (const auto & disk : disks) { auto size = disk->getTotalSpace(); - sizes.push_back(*size); if (size) sum_size += *size; else break; + sizes.push_back(*size); } if (sizes.size() == disks.size()) { From 5a3299572626c5ce5fcd53759b134de49287a4e3 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 4 Jul 2023 22:57:39 +0300 Subject: [PATCH 079/179] Update src/Disks/IVolume.cpp Co-authored-by: Sergei Trifonov --- src/Disks/IVolume.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Disks/IVolume.cpp b/src/Disks/IVolume.cpp index 15b52acb4227..43caf07d70ab 100644 --- a/src/Disks/IVolume.cpp +++ b/src/Disks/IVolume.cpp @@ -51,7 +51,7 @@ IVolume::IVolume( std::optional IVolume::getMaxUnreservedFreeSpace() const { - std::optional res = 0; + std::optional res; for (const auto & disk : disks) res = std::max(res, disk->getUnreservedSpace()); return res; From c76cf53391426471d2a374b63c302e2a383258a5 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 4 Jul 2023 22:14:37 +0200 Subject: [PATCH 080/179] Address review comments --- src/Disks/IVolume.cpp | 9 ++++++++- src/Disks/StoragePolicy.cpp | 13 ++++++++----- src/Disks/loadLocalDiskConfig.cpp | 2 +- 3 files changed, 17 insertions(+), 7 deletions(-) diff --git a/src/Disks/IVolume.cpp b/src/Disks/IVolume.cpp index 43caf07d70ab..0b072e6ba8b9 100644 --- a/src/Disks/IVolume.cpp +++ b/src/Disks/IVolume.cpp @@ -53,7 +53,14 @@ std::optional IVolume::getMaxUnreservedFreeSpace() const { std::optional res; for (const auto & disk : disks) - res = std::max(res, disk->getUnreservedSpace()); + { + auto disk_unreserved_space = disk->getUnreservedSpace(); + if (!disk_unreserved_space) + return std::nullopt; /// There is at least one unlimited disk. + + if (!res || *disk_unreserved_space > *res) + res = disk_unreserved_space; + } return res; } diff --git a/src/Disks/StoragePolicy.cpp b/src/Disks/StoragePolicy.cpp index 92cca23ca76b..6b8d7186a159 100644 --- a/src/Disks/StoragePolicy.cpp +++ b/src/Disks/StoragePolicy.cpp @@ -209,14 +209,17 @@ DiskPtr StoragePolicy::tryGetDiskByName(const String & disk_name) const UInt64 StoragePolicy::getMaxUnreservedFreeSpace() const { - UInt64 res = 0; + std::optional res; for (const auto & volume : volumes) { - auto max_unreserved_for_volume = volume->getMaxUnreservedFreeSpace(); - if (max_unreserved_for_volume) - res = std::max(res, *max_unreserved_for_volume); + auto volume_unreserved_space = volume->getMaxUnreservedFreeSpace(); + if (!volume_unreserved_space) + return -1ULL; /// There is at least one unlimited disk. + + if (!res || *volume_unreserved_space > *res) + res = volume_unreserved_space; } - return res; + return res.value_or(-1ULL); } diff --git a/src/Disks/loadLocalDiskConfig.cpp b/src/Disks/loadLocalDiskConfig.cpp index 0e5eca17ca74..0c4a9e7af32e 100644 --- a/src/Disks/loadLocalDiskConfig.cpp +++ b/src/Disks/loadLocalDiskConfig.cpp @@ -56,7 +56,7 @@ void loadDiskLocalConfig(const String & name, tmp_path = context->getPath(); // Create tmp disk for getting total disk space. - keep_free_space_bytes = static_cast(DiskLocal("tmp", tmp_path, 0).getTotalSpace() * ratio); + keep_free_space_bytes = static_cast(*DiskLocal("tmp", tmp_path, 0).getTotalSpace() * ratio); } } From ead43836f7b9f1eb04e8cd4e9c293f39ddf1ec1a Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 4 Jul 2023 22:35:01 +0200 Subject: [PATCH 081/179] Fix the test --- .../02796_calculate_text_stack_trace.sql | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/tests/queries/0_stateless/02796_calculate_text_stack_trace.sql b/tests/queries/0_stateless/02796_calculate_text_stack_trace.sql index 601bd16fb39e..52d55bdbe119 100644 --- a/tests/queries/0_stateless/02796_calculate_text_stack_trace.sql +++ b/tests/queries/0_stateless/02796_calculate_text_stack_trace.sql @@ -1,16 +1,20 @@ -- Tags: no-parallel -TRUNCATE TABLE system.text_log; - SELECT 'Hello', throwIf(1); -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO } SYSTEM FLUSH LOGS; + SELECT length(stack_trace) > 1000 FROM system.query_log WHERE current_database = currentDatabase() AND query LIKE '%SELECT \'Hello\', throwIf(1)%' AND query NOT LIKE '%system%' ORDER BY event_time_microseconds DESC LIMIT 1; -SELECT message LIKE '%Stack trace%' FROM system.text_log WHERE level = 'Error' AND message LIKE '%Exception%throwIf%' ORDER BY event_time_microseconds DESC LIMIT 10; -TRUNCATE TABLE system.text_log; +SELECT message LIKE '%Stack trace%' FROM system.text_log WHERE level = 'Error' AND message LIKE '%Exception%throwIf%' + AND query_id = (SELECT query_id FROM system.query_log WHERE current_database = currentDatabase() AND query LIKE '%SELECT \'Hello\', throwIf(1)%' AND query NOT LIKE '%system%' ORDER BY event_time_microseconds DESC LIMIT 1) + ORDER BY event_time_microseconds DESC LIMIT 10; SET calculate_text_stack_trace = 0; SELECT 'World', throwIf(1); -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO } SYSTEM FLUSH LOGS; + SELECT length(stack_trace) FROM system.query_log WHERE current_database = currentDatabase() AND query LIKE '%SELECT \'World\', throwIf(1)%' AND query NOT LIKE '%system%' ORDER BY event_time_microseconds DESC LIMIT 1; -SELECT message LIKE '%Stack trace%' FROM system.text_log WHERE level = 'Error' AND message LIKE '%Exception%throwIf%' ORDER BY event_time_microseconds DESC LIMIT 10; + +SELECT message LIKE '%Stack trace%' FROM system.text_log WHERE level = 'Error' AND message LIKE '%Exception%throwIf%' + AND query_id = (SELECT query_id FROM system.query_log WHERE current_database = currentDatabase() AND query LIKE '%SELECT \'World\', throwIf(1)%' AND query NOT LIKE '%system%' ORDER BY event_time_microseconds DESC LIMIT 1) + ORDER BY event_time_microseconds DESC LIMIT 10; From 607a8a1c465baf85818ec41b8229f7afda8d6fb8 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Tue, 4 Jul 2023 22:52:59 +0200 Subject: [PATCH 082/179] fix --- src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp index d6f8dbac883d..1cc3736bd2e8 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp @@ -472,8 +472,8 @@ CheckResult ReplicatedMergeTreePartCheckThread::checkPartAndFix(const String & p /// Part is not in ZooKeeper and not on disk (so there's nothing to detach or remove from ZooKeeper). /// Probably we cannot execute some entry from the replication queue (so don't need to enqueue another one). /// Either all replicas having the part are not active, or the part is lost forever. - bool is_lost = searchForMissingPartOnOtherReplicas(part_name); - if (is_lost) + bool found_something = searchForMissingPartOnOtherReplicas(part_name); + if (!found_something) onPartIsLostForever(part_name); break; From 7f1ee68c87160089d70f4cef04c975c38b01218e Mon Sep 17 00:00:00 2001 From: Han Fei Date: Tue, 4 Jul 2023 23:08:54 +0200 Subject: [PATCH 083/179] refine --- src/Functions/DateTimeTransforms.h | 8 +++---- src/Functions/IFunction.h | 4 ++++ ...OrDateTimeConverterWithPreimageVisitor.cpp | 21 +++++++++---------- 3 files changed, 18 insertions(+), 15 deletions(-) diff --git a/src/Functions/DateTimeTransforms.h b/src/Functions/DateTimeTransforms.h index 84c71c89b111..e59a90462770 100644 --- a/src/Functions/DateTimeTransforms.h +++ b/src/Functions/DateTimeTransforms.h @@ -734,11 +734,11 @@ struct ToYearImpl const DateLUTImpl & date_lut = DateLUT::instance(); - auto start_time = date_lut.makeDateTime(year, 1, 1, 0, 0, 0); + auto start_time = date_lut.makeDayNum(year, 1, 1); auto end_time = date_lut.addYears(start_time, 1); if (isDateOrDate32(type) || isDateTime(type) || isDateTime64(type)) - return {std::make_pair(Field(start_time), Field(end_time))}; + return {std::make_pair(Field(Int32(start_time)), Field(Int32(end_time)))}; else throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument of function {}. Should be Date, Date32, DateTime or DateTime64", @@ -1412,11 +1412,11 @@ struct ToYYYYMMImpl const DateLUTImpl & date_lut = DateLUT::instance(); - auto start_time = date_lut.makeDateTime(year, month, 1, 0, 0, 0); + auto start_time = date_lut.makeDayNum(year, month, 1); auto end_time = date_lut.addMonths(start_time, 1); if (isDateOrDate32(type) || isDateTime(type) || isDateTime64(type)) - return {std::make_pair(Field(start_time), Field(end_time))}; + return {std::make_pair(Field(Int32(start_time)), Field(Int32(end_time)))}; else throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument of function {}. Should be Date, Date32, DateTime or DateTime64", diff --git a/src/Functions/IFunction.h b/src/Functions/IFunction.h index 433cb61d04e9..928475652f4e 100644 --- a/src/Functions/IFunction.h +++ b/src/Functions/IFunction.h @@ -11,6 +11,10 @@ #include "config.h" +#if USE_EMBEDDED_COMPILER +# include +#endif + #include /// This file contains user interface for functions. diff --git a/src/Interpreters/OptimizeDateOrDateTimeConverterWithPreimageVisitor.cpp b/src/Interpreters/OptimizeDateOrDateTimeConverterWithPreimageVisitor.cpp index a377bb4bba68..9c2fdf6dee9a 100644 --- a/src/Interpreters/OptimizeDateOrDateTimeConverterWithPreimageVisitor.cpp +++ b/src/Interpreters/OptimizeDateOrDateTimeConverterWithPreimageVisitor.cpp @@ -4,6 +4,7 @@ #include #include #include +#include "base/DayNum.h" #include #include #include @@ -37,20 +38,18 @@ ASTPtr generateOptimizedDateFilterAST(const String & comparator, const NameAndTy const DateLUTImpl & date_lut = DateLUT::instance(); const String & column_name = column.name; - String start_date_or_date_time; - String end_date_or_date_time; - if (isDateOrDate32(column.type.get())) - { - start_date_or_date_time = date_lut.dateToString(range.first.get()); - end_date_or_date_time = date_lut.dateToString(range.second.get()); - } - else if (isDateTime(column.type.get()) || isDateTime64(column.type.get())) + auto start_date = range.first.get(); + auto end_date = range.second.get(); + String start_date_or_date_time = date_lut.dateToString(ExtendedDayNum(static_cast(start_date))); + String end_date_or_date_time = date_lut.dateToString(ExtendedDayNum(static_cast(end_date))); + + if (isDateTime(column.type.get()) || isDateTime64(column.type.get())) { - start_date_or_date_time = date_lut.timeToString(range.first.get()); - end_date_or_date_time = date_lut.timeToString(range.second.get()); + start_date_or_date_time += " 00:00:00"; + end_date_or_date_time += " 00:00:00"; } - else [[unlikely]] return {}; + else if (!isDateOrDate32(column.type.get())) return {}; if (comparator == "equals") { From 9544c035b9d8b4646defd770b829715043b145d7 Mon Sep 17 00:00:00 2001 From: Han Fei Date: Wed, 5 Jul 2023 11:15:31 +0200 Subject: [PATCH 084/179] Revert "refine" This reverts commit 7f1ee68c87160089d70f4cef04c975c38b01218e. --- src/Functions/DateTimeTransforms.h | 8 +++---- src/Functions/IFunction.h | 4 ---- ...OrDateTimeConverterWithPreimageVisitor.cpp | 21 ++++++++++--------- 3 files changed, 15 insertions(+), 18 deletions(-) diff --git a/src/Functions/DateTimeTransforms.h b/src/Functions/DateTimeTransforms.h index e59a90462770..84c71c89b111 100644 --- a/src/Functions/DateTimeTransforms.h +++ b/src/Functions/DateTimeTransforms.h @@ -734,11 +734,11 @@ struct ToYearImpl const DateLUTImpl & date_lut = DateLUT::instance(); - auto start_time = date_lut.makeDayNum(year, 1, 1); + auto start_time = date_lut.makeDateTime(year, 1, 1, 0, 0, 0); auto end_time = date_lut.addYears(start_time, 1); if (isDateOrDate32(type) || isDateTime(type) || isDateTime64(type)) - return {std::make_pair(Field(Int32(start_time)), Field(Int32(end_time)))}; + return {std::make_pair(Field(start_time), Field(end_time))}; else throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument of function {}. Should be Date, Date32, DateTime or DateTime64", @@ -1412,11 +1412,11 @@ struct ToYYYYMMImpl const DateLUTImpl & date_lut = DateLUT::instance(); - auto start_time = date_lut.makeDayNum(year, month, 1); + auto start_time = date_lut.makeDateTime(year, month, 1, 0, 0, 0); auto end_time = date_lut.addMonths(start_time, 1); if (isDateOrDate32(type) || isDateTime(type) || isDateTime64(type)) - return {std::make_pair(Field(Int32(start_time)), Field(Int32(end_time)))}; + return {std::make_pair(Field(start_time), Field(end_time))}; else throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument of function {}. Should be Date, Date32, DateTime or DateTime64", diff --git a/src/Functions/IFunction.h b/src/Functions/IFunction.h index 928475652f4e..433cb61d04e9 100644 --- a/src/Functions/IFunction.h +++ b/src/Functions/IFunction.h @@ -11,10 +11,6 @@ #include "config.h" -#if USE_EMBEDDED_COMPILER -# include -#endif - #include /// This file contains user interface for functions. diff --git a/src/Interpreters/OptimizeDateOrDateTimeConverterWithPreimageVisitor.cpp b/src/Interpreters/OptimizeDateOrDateTimeConverterWithPreimageVisitor.cpp index 9c2fdf6dee9a..a377bb4bba68 100644 --- a/src/Interpreters/OptimizeDateOrDateTimeConverterWithPreimageVisitor.cpp +++ b/src/Interpreters/OptimizeDateOrDateTimeConverterWithPreimageVisitor.cpp @@ -4,7 +4,6 @@ #include #include #include -#include "base/DayNum.h" #include #include #include @@ -38,18 +37,20 @@ ASTPtr generateOptimizedDateFilterAST(const String & comparator, const NameAndTy const DateLUTImpl & date_lut = DateLUT::instance(); const String & column_name = column.name; + String start_date_or_date_time; + String end_date_or_date_time; - auto start_date = range.first.get(); - auto end_date = range.second.get(); - String start_date_or_date_time = date_lut.dateToString(ExtendedDayNum(static_cast(start_date))); - String end_date_or_date_time = date_lut.dateToString(ExtendedDayNum(static_cast(end_date))); - - if (isDateTime(column.type.get()) || isDateTime64(column.type.get())) + if (isDateOrDate32(column.type.get())) + { + start_date_or_date_time = date_lut.dateToString(range.first.get()); + end_date_or_date_time = date_lut.dateToString(range.second.get()); + } + else if (isDateTime(column.type.get()) || isDateTime64(column.type.get())) { - start_date_or_date_time += " 00:00:00"; - end_date_or_date_time += " 00:00:00"; + start_date_or_date_time = date_lut.timeToString(range.first.get()); + end_date_or_date_time = date_lut.timeToString(range.second.get()); } - else if (!isDateOrDate32(column.type.get())) return {}; + else [[unlikely]] return {}; if (comparator == "equals") { From 2e5643cc4133f207b46534a4cf8a7875d7c18a8e Mon Sep 17 00:00:00 2001 From: Han Fei Date: Wed, 5 Jul 2023 11:57:18 +0200 Subject: [PATCH 085/179] use UTC LUT --- src/Functions/DateTimeTransforms.h | 4 ++-- src/Functions/IFunction.h | 4 ++++ .../OptimizeDateOrDateTimeConverterWithPreimageVisitor.cpp | 2 +- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/src/Functions/DateTimeTransforms.h b/src/Functions/DateTimeTransforms.h index 84c71c89b111..510a88db2b68 100644 --- a/src/Functions/DateTimeTransforms.h +++ b/src/Functions/DateTimeTransforms.h @@ -732,7 +732,7 @@ struct ToYearImpl auto year = point.get(); if (year < DATE_LUT_MIN_YEAR || year >= DATE_LUT_MAX_YEAR) return std::nullopt; - const DateLUTImpl & date_lut = DateLUT::instance(); + const DateLUTImpl & date_lut = DateLUT::instance("UTC"); auto start_time = date_lut.makeDateTime(year, 1, 1, 0, 0, 0); auto end_time = date_lut.addYears(start_time, 1); @@ -1410,7 +1410,7 @@ struct ToYYYYMMImpl if (year < DATE_LUT_MIN_YEAR || year > DATE_LUT_MAX_YEAR || month < 1 || month > 12 || (year == DATE_LUT_MAX_YEAR && month == 12)) return std::nullopt; - const DateLUTImpl & date_lut = DateLUT::instance(); + const DateLUTImpl & date_lut = DateLUT::instance("UTC"); auto start_time = date_lut.makeDateTime(year, month, 1, 0, 0, 0); auto end_time = date_lut.addMonths(start_time, 1); diff --git a/src/Functions/IFunction.h b/src/Functions/IFunction.h index 433cb61d04e9..09758d59e4a7 100644 --- a/src/Functions/IFunction.h +++ b/src/Functions/IFunction.h @@ -13,6 +13,10 @@ #include +#if USE_EMBEDDED_COMPILER +# include +#endif + /// This file contains user interface for functions. namespace llvm diff --git a/src/Interpreters/OptimizeDateOrDateTimeConverterWithPreimageVisitor.cpp b/src/Interpreters/OptimizeDateOrDateTimeConverterWithPreimageVisitor.cpp index a377bb4bba68..6a9251cec49e 100644 --- a/src/Interpreters/OptimizeDateOrDateTimeConverterWithPreimageVisitor.cpp +++ b/src/Interpreters/OptimizeDateOrDateTimeConverterWithPreimageVisitor.cpp @@ -34,7 +34,7 @@ namespace ErrorCodes */ ASTPtr generateOptimizedDateFilterAST(const String & comparator, const NameAndTypePair & column, const std::pair& range) { - const DateLUTImpl & date_lut = DateLUT::instance(); + const DateLUTImpl & date_lut = DateLUT::instance("UTC"); const String & column_name = column.name; String start_date_or_date_time; From 86014a60a308ec41c7416bdbbfe6b360dcf1617b Mon Sep 17 00:00:00 2001 From: Dmitry Kardymon Date: Wed, 5 Jul 2023 11:42:02 +0000 Subject: [PATCH 086/179] Fixed case with spaces before delimiter --- src/Processors/Formats/Impl/CSVRowInputFormat.cpp | 1 + tests/queries/0_stateless/00301_csv.sh | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Processors/Formats/Impl/CSVRowInputFormat.cpp b/src/Processors/Formats/Impl/CSVRowInputFormat.cpp index edbc33fb3c30..9731b4ba4650 100644 --- a/src/Processors/Formats/Impl/CSVRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/CSVRowInputFormat.cpp @@ -341,6 +341,7 @@ bool CSVFormatReader::readField( if (is_last_file_column && format_settings.csv.ignore_extra_columns) { // Skip all fields to next line. + skipWhitespacesAndTabs(*buf, format_settings.csv.allow_whitespace_or_tab_as_delimiter); while (checkChar(format_settings.csv.delimiter, *buf)) { skipField(); diff --git a/tests/queries/0_stateless/00301_csv.sh b/tests/queries/0_stateless/00301_csv.sh index dc354433af90..7657745e9f75 100755 --- a/tests/queries/0_stateless/00301_csv.sh +++ b/tests/queries/0_stateless/00301_csv.sh @@ -49,7 +49,7 @@ echo '"Hello", 1, "String1" "Hello", 3, "String3", "2016-01-13" "Hello", 4, , "2016-01-14" "Hello", 5, "String5", "2016-01-15", "2016-01-16" -"Hello", 6, "String6", "line with a +"Hello", 6, "String6" , "line with a break"' | $CLICKHOUSE_CLIENT --input_format_defaults_for_omitted_fields=1 --input_format_csv_empty_as_default=1 --input_format_csv_ignore_extra_columns=1 --query="INSERT INTO csv FORMAT CSV"; $CLICKHOUSE_CLIENT --query="SELECT * FROM csv ORDER BY s, n"; $CLICKHOUSE_CLIENT --query="DROP TABLE csv"; From e957600d5c287c52f93d0f631587852ad0869035 Mon Sep 17 00:00:00 2001 From: Yatsishin Ilya <2159081+qoega@users.noreply.github.com> Date: Wed, 5 Jul 2023 12:28:27 +0000 Subject: [PATCH 087/179] wip --- src/Parsers/ASTColumnDeclaration.cpp | 5 ++ src/Parsers/ASTColumnDeclaration.h | 1 + src/Parsers/ASTCreateQuery.h | 3 +- src/Parsers/ParserCreateQuery.cpp | 26 +++++- src/Parsers/ParserCreateQuery.h | 11 ++- .../02811_primary_key_in_columns.sql | 83 +++++++++++++++++++ 6 files changed, 126 insertions(+), 3 deletions(-) create mode 100644 tests/queries/0_stateless/02811_primary_key_in_columns.sql diff --git a/src/Parsers/ASTColumnDeclaration.cpp b/src/Parsers/ASTColumnDeclaration.cpp index c2396708a73c..12d000d5e9f6 100644 --- a/src/Parsers/ASTColumnDeclaration.cpp +++ b/src/Parsers/ASTColumnDeclaration.cpp @@ -44,6 +44,7 @@ ASTPtr ASTColumnDeclaration::clone() const res->ttl = ttl->clone(); res->children.push_back(res->ttl); } + if (collation) { res->collation = collation->clone(); @@ -76,6 +77,10 @@ void ASTColumnDeclaration::formatImpl(const FormatSettings & settings, FormatSta << (*null_modifier ? "" : "NOT ") << "NULL" << (settings.hilite ? hilite_none : ""); } + if (primary_key_specifier) + settings.ostr << ' ' << (settings.hilite ? hilite_keyword : "") + << "PRIMARY KEY" << (settings.hilite ? hilite_none : ""); + if (default_expression) { settings.ostr << ' ' << (settings.hilite ? hilite_keyword : "") << default_specifier << (settings.hilite ? hilite_none : ""); diff --git a/src/Parsers/ASTColumnDeclaration.h b/src/Parsers/ASTColumnDeclaration.h index 45814551db87..9d4866679110 100644 --- a/src/Parsers/ASTColumnDeclaration.h +++ b/src/Parsers/ASTColumnDeclaration.h @@ -21,6 +21,7 @@ class ASTColumnDeclaration : public IAST ASTPtr codec; ASTPtr ttl; ASTPtr collation; + bool primary_key_specifier = false; String getID(char delim) const override { return "ColumnDeclaration" + (delim + name); } diff --git a/src/Parsers/ASTCreateQuery.h b/src/Parsers/ASTCreateQuery.h index 230996f610e2..ae45a244a03c 100644 --- a/src/Parsers/ASTCreateQuery.h +++ b/src/Parsers/ASTCreateQuery.h @@ -56,6 +56,7 @@ class ASTColumns : public IAST ASTExpressionList * constraints = nullptr; ASTExpressionList * projections = nullptr; IAST * primary_key = nullptr; + IAST * primary_key_from_columns = nullptr; String getID(char) const override { return "Columns definition"; } @@ -76,7 +77,7 @@ class ASTColumns : public IAST f(reinterpret_cast(&primary_key)); f(reinterpret_cast(&constraints)); f(reinterpret_cast(&projections)); - f(reinterpret_cast(&primary_key)); + f(reinterpret_cast(&primary_key_from_columns)); } }; diff --git a/src/Parsers/ParserCreateQuery.cpp b/src/Parsers/ParserCreateQuery.cpp index adf3513ba409..1941bafab0de 100644 --- a/src/Parsers/ParserCreateQuery.cpp +++ b/src/Parsers/ParserCreateQuery.cpp @@ -300,11 +300,21 @@ bool ParserTablePropertiesDeclarationList::parseImpl(Pos & pos, ASTPtr & node, E ASTPtr constraints = std::make_shared(); ASTPtr projections = std::make_shared(); ASTPtr primary_key; + ASTPtr primary_key_from_columns; for (const auto & elem : list->children) { - if (elem->as()) + if (auto *cd = elem->as()) + { + if(cd->primary_key_specifier) + { + if(!primary_key_from_columns) + primary_key_from_columns = makeASTFunction("tuple"); + auto column_identifier = std::make_shared(cd->name); + primary_key_from_columns->children.push_back(column_identifier); + } columns->children.push_back(elem); + } else if (elem->as()) indices->children.push_back(elem); else if (elem->as()) @@ -336,6 +346,8 @@ bool ParserTablePropertiesDeclarationList::parseImpl(Pos & pos, ASTPtr & node, E res->set(res->projections, projections); if (primary_key) res->set(res->primary_key, primary_key); + if (primary_key_from_columns) + res->set(res->primary_key_from_columns, primary_key_from_columns); node = res; @@ -599,6 +611,7 @@ bool ParserCreateTableQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe /// List of columns. if (s_lparen.ignore(pos, expected)) { + /// Columns and all table properties (indices, constraints, projections, primary_key) if (!table_properties_p.parse(pos, columns_list, expected)) return false; @@ -699,6 +712,17 @@ bool ParserCreateTableQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe query->storage->primary_key = query->columns_list->primary_key; } + if (query->columns_list && (query->columns_list->primary_key_from_columns)) + { + /// If engine is not set will use default one + if (!query->storage) + query->set(query->storage, std::make_shared()); + else if (query->storage->primary_key) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Multiple primary keys are not allowed."); + + query->storage->primary_key = query->columns_list->primary_key_from_columns; + } + tryGetIdentifierNameInto(as_database, query->as_database); tryGetIdentifierNameInto(as_table, query->as_table); query->set(query->select, select); diff --git a/src/Parsers/ParserCreateQuery.h b/src/Parsers/ParserCreateQuery.h index 5f79a4b68f61..09935e2b6087 100644 --- a/src/Parsers/ParserCreateQuery.h +++ b/src/Parsers/ParserCreateQuery.h @@ -135,6 +135,7 @@ bool IParserColumnDeclaration::parseImpl(Pos & pos, ASTPtr & node, E ParserKeyword s_remove{"REMOVE"}; ParserKeyword s_type{"TYPE"}; ParserKeyword s_collate{"COLLATE"}; + ParserKeyword s_primary_key{"PRIMARY KEY"}; ParserExpression expr_parser; ParserStringLiteral string_literal_parser; ParserLiteral literal_parser; @@ -177,6 +178,7 @@ bool IParserColumnDeclaration::parseImpl(Pos & pos, ASTPtr & node, E ASTPtr codec_expression; ASTPtr ttl_expression; ASTPtr collation_expression; + bool primary_key_specifier = false; auto null_check_without_moving = [&]() -> bool { @@ -198,6 +200,7 @@ bool IParserColumnDeclaration::parseImpl(Pos & pos, ASTPtr & node, E && !s_ephemeral.checkWithoutMoving(pos, expected) && !s_alias.checkWithoutMoving(pos, expected) && !s_auto_increment.checkWithoutMoving(pos, expected) + && !s_primary_key.checkWithoutMoving(pos, expected) && (require_type || (!s_comment.checkWithoutMoving(pos, expected) && !s_codec.checkWithoutMoving(pos, expected)))) @@ -266,7 +269,6 @@ bool IParserColumnDeclaration::parseImpl(Pos & pos, ASTPtr & node, E ParserDataType().parse(tmp_pos, type, tmp_expected); } } - /// This will rule out unusual expressions like *, t.* that cannot appear in DEFAULT if (default_expression && !dynamic_cast(default_expression.get())) return false; @@ -305,6 +307,11 @@ bool IParserColumnDeclaration::parseImpl(Pos & pos, ASTPtr & node, E return false; } + if (s_primary_key.ignore(pos, expected)) + { + primary_key_specifier = true; + } + node = column_declaration; if (type) @@ -346,6 +353,8 @@ bool IParserColumnDeclaration::parseImpl(Pos & pos, ASTPtr & node, E column_declaration->children.push_back(std::move(collation_expression)); } + column_declaration->primary_key_specifier = primary_key_specifier; + return true; } diff --git a/tests/queries/0_stateless/02811_primary_key_in_columns.sql b/tests/queries/0_stateless/02811_primary_key_in_columns.sql new file mode 100644 index 000000000000..df25fdd14abc --- /dev/null +++ b/tests/queries/0_stateless/02811_primary_key_in_columns.sql @@ -0,0 +1,83 @@ +DROP TABLE IF EXISTS pk_test1; +DROP TABLE IF EXISTS pk_test2; +DROP TABLE IF EXISTS pk_test3; +DROP TABLE IF EXISTS pk_test4; +DROP TABLE IF EXISTS pk_test5; +DROP TABLE IF EXISTS pk_test6; +DROP TABLE IF EXISTS pk_test7; +DROP TABLE IF EXISTS pk_test8; +DROP TABLE IF EXISTS pk_test9; +DROP TABLE IF EXISTS pk_test10; +DROP TABLE IF EXISTS pk_test11; +DROP TABLE IF EXISTS pk_test12; +DROP TABLE IF EXISTS pk_test12; +DROP TABLE IF EXISTS pk_test13; +DROP TABLE IF EXISTS pk_test14; +DROP TABLE IF EXISTS pk_test15; +DROP TABLE IF EXISTS pk_test16; +DROP TABLE IF EXISTS pk_test17; +DROP TABLE IF EXISTS pk_test18; +DROP TABLE IF EXISTS pk_test19; +DROP TABLE IF EXISTS pk_test20; +DROP TABLE IF EXISTS pk_test21; +DROP TABLE IF EXISTS pk_test22; +DROP TABLE IF EXISTS pk_test23; + +SET default_table_engine=MergeTree; + +CREATE TABLE pk_test1 (String a PRIMARY KEY, String b, String c); +CREATE TABLE pk_test2 (String a PRIMARY KEY, String b PRIMARY KEY, String c); +CREATE TABLE pk_test3 (String a PRIMARY KEY, String b PRIMARY KEY, String c PRIMARY KEY); + +CREATE TABLE pk_test4 (String a, String b PRIMARY KEY, String c PRIMARY KEY); +CREATE TABLE pk_test5 (String a, String b PRIMARY KEY, String c); +CREATE TABLE pk_test6 (String a, String b, String c PRIMARY KEY); + +CREATE TABLE pk_test7 (String a PRIMARY KEY, String b, String c, PRIMARY KEY (a)); +CREATE TABLE pk_test8 (String a PRIMARY KEY, String b PRIMARY KEY, String c, PRIMARY KEY (a)); +CREATE TABLE pk_test9 (String a PRIMARY KEY, String b PRIMARY KEY, String c PRIMARY KEY, PRIMARY KEY (a)); + +CREATE TABLE pk_test10 (String a, String b PRIMARY KEY, String c PRIMARY KEY, PRIMARY KEY (a)); +CREATE TABLE pk_test11 (String a, String b PRIMARY KEY, String c, PRIMARY KEY (a)); +CREATE TABLE pk_test12 (String a, String b, String c PRIMARY KEY, PRIMARY KEY (a)); + +CREATE TABLE pk_test12 (String a PRIMARY KEY, String b, String c) PRIMARY KEY (a,b,c); +CREATE TABLE pk_test13 (String a PRIMARY KEY, String b PRIMARY KEY, String c) PRIMARY KEY (a,b,c); +CREATE TABLE pk_test14 (String a PRIMARY KEY, String b PRIMARY KEY, String c PRIMARY KEY) PRIMARY KEY (a,b,c); + +CREATE TABLE pk_test15 (String a, String b PRIMARY KEY, String c PRIMARY KEY) PRIMARY KEY (a,b,c); +CREATE TABLE pk_test16 (String a, String b PRIMARY KEY, String c) PRIMARY KEY (a,b,c); +CREATE TABLE pk_test17 (String a, String b, String c PRIMARY KEY) PRIMARY KEY (a,b,c); + +CREATE TABLE pk_test18 (String a PRIMARY KEY, String b, String c) ORDER BY (a,b,c); +CREATE TABLE pk_test19 (String a PRIMARY KEY, String b PRIMARY KEY, String c) ORDER BY (a,b,c); +CREATE TABLE pk_test20 (String a PRIMARY KEY, String b PRIMARY KEY, String c PRIMARY KEY) ORDER BY (a,b,c); + +CREATE TABLE pk_test21 (String a, String b PRIMARY KEY, String c PRIMARY KEY) ORDER BY (a,b,c); +CREATE TABLE pk_test22 (String a, String b PRIMARY KEY, String c) ORDER BY (a,b,c); +CREATE TABLE pk_test23 (String a, String b, String c PRIMARY KEY) ORDER BY (a,b,c); + +DROP TABLE IF EXISTS pk_test1; +DROP TABLE IF EXISTS pk_test2; +DROP TABLE IF EXISTS pk_test3; +DROP TABLE IF EXISTS pk_test4; +DROP TABLE IF EXISTS pk_test5; +DROP TABLE IF EXISTS pk_test6; +DROP TABLE IF EXISTS pk_test7; +DROP TABLE IF EXISTS pk_test8; +DROP TABLE IF EXISTS pk_test9; +DROP TABLE IF EXISTS pk_test10; +DROP TABLE IF EXISTS pk_test11; +DROP TABLE IF EXISTS pk_test12; +DROP TABLE IF EXISTS pk_test12; +DROP TABLE IF EXISTS pk_test13; +DROP TABLE IF EXISTS pk_test14; +DROP TABLE IF EXISTS pk_test15; +DROP TABLE IF EXISTS pk_test16; +DROP TABLE IF EXISTS pk_test17; +DROP TABLE IF EXISTS pk_test18; +DROP TABLE IF EXISTS pk_test19; +DROP TABLE IF EXISTS pk_test20; +DROP TABLE IF EXISTS pk_test21; +DROP TABLE IF EXISTS pk_test22; +DROP TABLE IF EXISTS pk_test23; \ No newline at end of file From 5512c307db1d43b5902e00ec13fd007e0882a82c Mon Sep 17 00:00:00 2001 From: Alexander Gololobov <440544+davenger@users.noreply.github.com> Date: Fri, 30 Jun 2023 13:23:23 +0200 Subject: [PATCH 088/179] system.jemalloc_bins table --- src/CMakeLists.txt | 5 +- src/Storages/System/StorageSystemJemalloc.cpp | 125 ++++++++++++++++++ src/Storages/System/StorageSystemJemalloc.h | 34 +++++ src/Storages/System/attachSystemTables.cpp | 3 + 4 files changed, 164 insertions(+), 3 deletions(-) create mode 100644 src/Storages/System/StorageSystemJemalloc.cpp create mode 100644 src/Storages/System/StorageSystemJemalloc.h diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index ca428fbff3af..f870993f0805 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -206,11 +206,10 @@ add_library (clickhouse_new_delete STATIC Common/new_delete.cpp) target_link_libraries (clickhouse_new_delete PRIVATE clickhouse_common_io) if (TARGET ch_contrib::jemalloc) target_link_libraries (clickhouse_new_delete PRIVATE ch_contrib::jemalloc) -endif() - -if (TARGET ch_contrib::jemalloc) target_link_libraries (clickhouse_common_io PRIVATE ch_contrib::jemalloc) + target_link_libraries (clickhouse_storages_system PRIVATE ch_contrib::jemalloc) endif() + target_link_libraries (clickhouse_common_io PUBLIC ch_contrib::sparsehash) add_subdirectory(Access/Common) diff --git a/src/Storages/System/StorageSystemJemalloc.cpp b/src/Storages/System/StorageSystemJemalloc.cpp new file mode 100644 index 000000000000..2cb666eb5c38 --- /dev/null +++ b/src/Storages/System/StorageSystemJemalloc.cpp @@ -0,0 +1,125 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "config.h" + +#if USE_JEMALLOC +# include +#endif + + +namespace DB +{ + +#if USE_JEMALLOC + +UInt64 getJeMallocValue(const char * name) +{ + UInt64 value{}; + size_t size = sizeof(value); + mallctl(name, &value, &size, nullptr, 0); + return value; +} + +void fillJemallocBins(MutableColumns & res_columns) +{ + /// Bins for small allocations + auto small_bins_count = getJeMallocValue("arenas.nbins"); + UInt16 bin_index = 0; + for (UInt64 bin = 0; bin < small_bins_count; ++bin, ++bin_index) + { + auto size = getJeMallocValue(fmt::format("arenas.bin.{}.size", bin).c_str()); + auto ndalloc = getJeMallocValue(fmt::format("stats.arenas.{}.bins.{}.ndalloc", MALLCTL_ARENAS_ALL, bin).c_str()); + auto nmalloc = getJeMallocValue(fmt::format("stats.arenas.{}.bins.{}.nmalloc", MALLCTL_ARENAS_ALL, bin).c_str()); + + size_t col_num = 0; + res_columns.at(col_num++)->insert(bin_index); + res_columns.at(col_num++)->insert(0); + res_columns.at(col_num++)->insert(size); + res_columns.at(col_num++)->insert(nmalloc); + res_columns.at(col_num++)->insert(ndalloc); + } + + /// Bins for large allocations + auto large_bins_count = getJeMallocValue("arenas.nlextents"); + for (UInt64 bin = 0; bin < large_bins_count; ++bin, ++bin_index) + { + auto size = getJeMallocValue(fmt::format("arenas.lextent.{}.size", bin).c_str()); + auto ndalloc = getJeMallocValue(fmt::format("stats.arenas.{}.lextents.{}.ndalloc", MALLCTL_ARENAS_ALL, bin).c_str()); + auto nmalloc = getJeMallocValue(fmt::format("stats.arenas.{}.lextents.{}.nmalloc", MALLCTL_ARENAS_ALL, bin).c_str()); + + size_t col_num = 0; + res_columns.at(col_num++)->insert(bin_index); + res_columns.at(col_num++)->insert(1); + res_columns.at(col_num++)->insert(size); + res_columns.at(col_num++)->insert(nmalloc); + res_columns.at(col_num++)->insert(ndalloc); + } +} + +#else + +void fillJemallocBins(MutableColumns &) +{ + LOG_INFO(&Poco::Logger::get("StorageSystemJemallocBins"), "jemalloc is not enabled"); +} + +#endif // USE_JEMALLOC + + +StorageSystemJemallocBins::StorageSystemJemallocBins(const StorageID & table_id_) + : IStorage(table_id_) +{ + StorageInMemoryMetadata storage_metadata; + ColumnsDescription desc; + auto columns = getNamesAndTypes(); + for (const auto & col : columns) + { + ColumnDescription col_desc(col.name, col.type); + desc.add(col_desc); + } + storage_metadata.setColumns(desc); + setInMemoryMetadata(storage_metadata); +} + +NamesAndTypesList StorageSystemJemallocBins::getNamesAndTypes() +{ + return { + { "index", std::make_shared() }, + { "large", std::make_shared() }, + { "size", std::make_shared() }, + { "nmalloc", std::make_shared() }, + { "ndalloc", std::make_shared() }, + }; +} + +Pipe StorageSystemJemallocBins::read( + const Names & column_names, + const StorageSnapshotPtr & storage_snapshot, + SelectQueryInfo &, + ContextPtr /*context*/, + QueryProcessingStage::Enum /*processed_stage*/, + const size_t /*max_block_size*/, + const size_t /*num_streams*/) +{ + storage_snapshot->check(column_names); + + auto header = storage_snapshot->metadata->getSampleBlockWithVirtuals(getVirtuals()); + MutableColumns res_columns = header.cloneEmptyColumns(); + + fillJemallocBins(res_columns); + + UInt64 num_rows = res_columns.at(0)->size(); + Chunk chunk(std::move(res_columns), num_rows); + + return Pipe(std::make_shared(std::move(header), std::move(chunk))); +} + +} diff --git a/src/Storages/System/StorageSystemJemalloc.h b/src/Storages/System/StorageSystemJemalloc.h new file mode 100644 index 000000000000..a4ac2fbcdcbf --- /dev/null +++ b/src/Storages/System/StorageSystemJemalloc.h @@ -0,0 +1,34 @@ +#pragma once + +#include + + +namespace DB +{ + +class Context; + +class StorageSystemJemallocBins final : public IStorage +{ +public: + explicit StorageSystemJemallocBins(const StorageID & table_id_); + + std::string getName() const override { return "SystemJemallocBins"; } + + static NamesAndTypesList getNamesAndTypes(); + + Pipe read( + const Names & column_names, + const StorageSnapshotPtr & storage_snapshot, + SelectQueryInfo & query_info, + ContextPtr context, + QueryProcessingStage::Enum processed_stage, + size_t max_block_size, + size_t num_streams) override; + + bool isSystemStorage() const override { return true; } + + bool supportsTransactions() const override { return true; } +}; + +} diff --git a/src/Storages/System/attachSystemTables.cpp b/src/Storages/System/attachSystemTables.cpp index 7d21d9e39d24..a9873c821ce0 100644 --- a/src/Storages/System/attachSystemTables.cpp +++ b/src/Storages/System/attachSystemTables.cpp @@ -1,3 +1,4 @@ +#include "Storages/System/StorageSystemJemalloc.h" #include "config.h" #include @@ -82,6 +83,7 @@ #include #include #include +#include #ifdef OS_LINUX #include @@ -187,6 +189,7 @@ void attachSystemTablesServer(ContextPtr context, IDatabase & system_database, b attach(context, system_database, "certificates"); attach(context, system_database, "named_collections"); attach(context, system_database, "user_processes"); + attach(context, system_database, "jemalloc_bins"); if (has_zookeeper) { From ff0197543e568125c7f3c75f4930d750d741ff6d Mon Sep 17 00:00:00 2001 From: Alexander Gololobov <440544+davenger@users.noreply.github.com> Date: Mon, 3 Jul 2023 22:39:42 +0200 Subject: [PATCH 089/179] Basic test that stats are non-zero --- .../02810_system_jemalloc_bins.reference | 1 + .../0_stateless/02810_system_jemalloc_bins.sql | 13 +++++++++++++ 2 files changed, 14 insertions(+) create mode 100644 tests/queries/0_stateless/02810_system_jemalloc_bins.reference create mode 100644 tests/queries/0_stateless/02810_system_jemalloc_bins.sql diff --git a/tests/queries/0_stateless/02810_system_jemalloc_bins.reference b/tests/queries/0_stateless/02810_system_jemalloc_bins.reference new file mode 100644 index 000000000000..50d4d226b46e --- /dev/null +++ b/tests/queries/0_stateless/02810_system_jemalloc_bins.reference @@ -0,0 +1 @@ +1 1 1 1 1 diff --git a/tests/queries/0_stateless/02810_system_jemalloc_bins.sql b/tests/queries/0_stateless/02810_system_jemalloc_bins.sql new file mode 100644 index 000000000000..8ecf47e51b5a --- /dev/null +++ b/tests/queries/0_stateless/02810_system_jemalloc_bins.sql @@ -0,0 +1,13 @@ +WITH + (SELECT value IN ('ON', '1') FROM system.build_options WHERE name = 'USE_JEMALLOC') AS jemalloc_enabled, + (SELECT count() FROM system.jemalloc_bins) AS total_bins, + (SELECT count() FROM system.jemalloc_bins WHERE large) AS large_bins, + (SELECT count() FROM system.jemalloc_bins WHERE NOT large) AS small_bins, + (SELECT sum(size * (nmalloc - ndalloc)) FROM system.jemalloc_bins WHERE large) AS large_allocated_bytes, + (SELECT sum(size * (nmalloc - ndalloc)) FROM system.jemalloc_bins WHERE NOT large) AS small_allocated_bytes +SELECT + (total_bins > 0) = jemalloc_enabled, + (large_bins > 0) = jemalloc_enabled, + (small_bins > 0) = jemalloc_enabled, + (large_allocated_bytes > 0) = jemalloc_enabled, + (small_allocated_bytes > 0) = jemalloc_enabled; From bb422b816894769860a60579aea04454f8f1c496 Mon Sep 17 00:00:00 2001 From: Alexander Gololobov <440544+davenger@users.noreply.github.com> Date: Mon, 3 Jul 2023 23:23:06 +0200 Subject: [PATCH 090/179] Added doc --- .../operations/system-tables/jemalloc_bins.md | 45 +++++++++++++++++++ 1 file changed, 45 insertions(+) create mode 100644 docs/en/operations/system-tables/jemalloc_bins.md diff --git a/docs/en/operations/system-tables/jemalloc_bins.md b/docs/en/operations/system-tables/jemalloc_bins.md new file mode 100644 index 000000000000..dfe2ddb01e2e --- /dev/null +++ b/docs/en/operations/system-tables/jemalloc_bins.md @@ -0,0 +1,45 @@ +--- +slug: /en/operations/system-tables/jemalloc_bins +--- +# jemalloc_bins + +Contains information about memory allocations done via jemalloc allocator in different size classes (bins) aggregated from all arenas. +These statistics might not be absolutely accurate because of thread local caching in jemalloc. + +Columns: + +- `index` (UInt64) — Index of the bin ordered by size +- `large` (Bool) — True for large allocations and False for small +- `size` (UInt64) — Size of allocations in this bin +- `nmalloc` (UInt64) — Number of allocations +- `ndalloc` (UInt64) — Number of deallocations + +**Example** + +Find the sizes of allocations that contributed the most to the current overall memory usage. + +``` sql +SELECT + *, + nmalloc - ndalloc AS active_allocations, + size * active_allocations AS allocated_bytes +FROM system.jemalloc_bins +WHERE allocated_bytes > 0 +ORDER BY allocated_bytes DESC +LIMIT 10 +``` + +``` text +┌─index─┬─large─┬─────size─┬──nmalloc─┬──ndalloc─┬─active_allocations─┬─allocated_bytes─┐ +│ 82 │ 1 │ 50331648 │ 1 │ 0 │ 1 │ 50331648 │ +│ 10 │ 0 │ 192 │ 512336 │ 370710 │ 141626 │ 27192192 │ +│ 69 │ 1 │ 5242880 │ 6 │ 2 │ 4 │ 20971520 │ +│ 3 │ 0 │ 48 │ 16938224 │ 16559484 │ 378740 │ 18179520 │ +│ 28 │ 0 │ 4096 │ 122924 │ 119142 │ 3782 │ 15491072 │ +│ 61 │ 1 │ 1310720 │ 44569 │ 44558 │ 11 │ 14417920 │ +│ 39 │ 1 │ 28672 │ 1285 │ 913 │ 372 │ 10665984 │ +│ 4 │ 0 │ 64 │ 2837225 │ 2680568 │ 156657 │ 10026048 │ +│ 6 │ 0 │ 96 │ 2617803 │ 2531435 │ 86368 │ 8291328 │ +│ 36 │ 1 │ 16384 │ 22431 │ 21970 │ 461 │ 7553024 │ +└───────┴───────┴──────────┴──────────┴──────────┴────────────────────┴─────────────────┘ +``` From 8d5ddcbd3094182b44b3641f11acf6ba788faaf7 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 5 Jul 2023 15:40:06 +0200 Subject: [PATCH 091/179] Remove coverity --- .github/workflows/nightly.yml | 45 ----------------------------------- 1 file changed, 45 deletions(-) diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index cf61012f2bc1..9de0444bd837 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -75,51 +75,6 @@ jobs: Codebrowser: needs: [DockerHubPush] uses: ./.github/workflows/woboq.yml - BuilderCoverity: - needs: DockerHubPush - runs-on: [self-hosted, builder] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - BUILD_NAME=coverity - CACHES_PATH=${{runner.temp}}/../ccaches - IMAGES_PATH=${{runner.temp}}/images_path - REPO_COPY=${{runner.temp}}/build_check/ClickHouse - TEMP_PATH=${{runner.temp}}/build_check - EOF - echo "COVERITY_TOKEN=${{ secrets.COVERITY_TOKEN }}" >> "$GITHUB_ENV" - - name: Download changed images - uses: actions/download-artifact@v3 - with: - name: changed_images - path: ${{ env.IMAGES_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - submodules: true - - name: Build - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME" - - name: Upload Coverity Analysis - if: ${{ success() || failure() }} - run: | - curl --form token="${COVERITY_TOKEN}" \ - --form email='security+coverity@clickhouse.com' \ - --form file="@$TEMP_PATH/$BUILD_NAME/coverity-scan.tar.gz" \ - --form version="${GITHUB_REF#refs/heads/}-${GITHUB_SHA::6}" \ - --form description="Nighly Scan: $(date +'%Y-%m-%dT%H:%M:%S')" \ - https://scan.coverity.com/builds?project=ClickHouse%2FClickHouse - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" "$CACHES_PATH" SonarCloud: runs-on: [self-hosted, builder] env: From 1da413e64eaa092b2ab685253f4cb32a93dcc53e Mon Sep 17 00:00:00 2001 From: Duc Canh Le Date: Wed, 5 Jul 2023 14:56:11 +0000 Subject: [PATCH 092/179] fix segfault when create invalid EmbeddedRocksdb table --- src/Storages/checkAndGetLiteralArgument.cpp | 10 +++++++++- .../02811_invalid_embedded_rocksdb_create.reference | 0 .../02811_invalid_embedded_rocksdb_create.sql | 1 + 3 files changed, 10 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/02811_invalid_embedded_rocksdb_create.reference create mode 100644 tests/queries/0_stateless/02811_invalid_embedded_rocksdb_create.sql diff --git a/src/Storages/checkAndGetLiteralArgument.cpp b/src/Storages/checkAndGetLiteralArgument.cpp index 1aa942548a75..78ec1e55b647 100644 --- a/src/Storages/checkAndGetLiteralArgument.cpp +++ b/src/Storages/checkAndGetLiteralArgument.cpp @@ -12,7 +12,15 @@ namespace ErrorCodes template T checkAndGetLiteralArgument(const ASTPtr & arg, const String & arg_name) { - return checkAndGetLiteralArgument(*arg->as(), arg_name); + if (arg->as()) + return checkAndGetLiteralArgument(*arg->as(), arg_name); + + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "Argument '{}' must be a literal, get {} (value: {})", + arg_name, + arg->getID(), + arg->formatForErrorMessage()); } template diff --git a/tests/queries/0_stateless/02811_invalid_embedded_rocksdb_create.reference b/tests/queries/0_stateless/02811_invalid_embedded_rocksdb_create.reference new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/queries/0_stateless/02811_invalid_embedded_rocksdb_create.sql b/tests/queries/0_stateless/02811_invalid_embedded_rocksdb_create.sql new file mode 100644 index 000000000000..aac2652fbfa8 --- /dev/null +++ b/tests/queries/0_stateless/02811_invalid_embedded_rocksdb_create.sql @@ -0,0 +1 @@ +CREATE TABLE dict (`k` String, `v` String) ENGINE = EmbeddedRocksDB(k) PRIMARY KEY k; -- {serverError 36} \ No newline at end of file From 8f1ed5c90de4ada3764ea6384220459359eb7950 Mon Sep 17 00:00:00 2001 From: Duc Canh Le Date: Wed, 5 Jul 2023 15:04:38 +0000 Subject: [PATCH 093/179] add more check + line break --- src/Storages/checkAndGetLiteralArgument.cpp | 6 +++--- .../0_stateless/02811_invalid_embedded_rocksdb_create.sql | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/Storages/checkAndGetLiteralArgument.cpp b/src/Storages/checkAndGetLiteralArgument.cpp index 78ec1e55b647..5baf47fe91a9 100644 --- a/src/Storages/checkAndGetLiteralArgument.cpp +++ b/src/Storages/checkAndGetLiteralArgument.cpp @@ -12,15 +12,15 @@ namespace ErrorCodes template T checkAndGetLiteralArgument(const ASTPtr & arg, const String & arg_name) { - if (arg->as()) + if (arg && arg->as()) return checkAndGetLiteralArgument(*arg->as(), arg_name); throw Exception( ErrorCodes::BAD_ARGUMENTS, "Argument '{}' must be a literal, get {} (value: {})", arg_name, - arg->getID(), - arg->formatForErrorMessage()); + arg ? arg->getID() : "NULL", + arg ? arg->formatForErrorMessage() : "NULL"); } template diff --git a/tests/queries/0_stateless/02811_invalid_embedded_rocksdb_create.sql b/tests/queries/0_stateless/02811_invalid_embedded_rocksdb_create.sql index aac2652fbfa8..bfe4ee0622e0 100644 --- a/tests/queries/0_stateless/02811_invalid_embedded_rocksdb_create.sql +++ b/tests/queries/0_stateless/02811_invalid_embedded_rocksdb_create.sql @@ -1 +1 @@ -CREATE TABLE dict (`k` String, `v` String) ENGINE = EmbeddedRocksDB(k) PRIMARY KEY k; -- {serverError 36} \ No newline at end of file +CREATE TABLE dict (`k` String, `v` String) ENGINE = EmbeddedRocksDB(k) PRIMARY KEY k; -- {serverError 36} From ce8b0cae822f7e049eba7e8967122890510a82c5 Mon Sep 17 00:00:00 2001 From: lcjh <120989324@qq.com> Date: Thu, 6 Jul 2023 02:14:48 +0800 Subject: [PATCH 094/179] remove duplicate condition --- src/Functions/FunctionUnixTimestamp64.h | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Functions/FunctionUnixTimestamp64.h b/src/Functions/FunctionUnixTimestamp64.h index 58a23f7266ee..a20654655014 100644 --- a/src/Functions/FunctionUnixTimestamp64.h +++ b/src/Functions/FunctionUnixTimestamp64.h @@ -155,7 +155,6 @@ class FunctionFromUnixTimestamp64 : public IFunction if (!((executeType(result_column, arguments, input_rows_count)) || (executeType(result_column, arguments, input_rows_count)) || (executeType(result_column, arguments, input_rows_count)) - || (executeType(result_column, arguments, input_rows_count)) || (executeType(result_column, arguments, input_rows_count)) || (executeType(result_column, arguments, input_rows_count)) || (executeType(result_column, arguments, input_rows_count)) From 698c49cd51f406d0a9e619b4c7d971857f1fb59b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 6 Jul 2023 02:19:03 +0300 Subject: [PATCH 095/179] Update 02811_invalid_embedded_rocksdb_create.sql --- .../0_stateless/02811_invalid_embedded_rocksdb_create.sql | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/02811_invalid_embedded_rocksdb_create.sql b/tests/queries/0_stateless/02811_invalid_embedded_rocksdb_create.sql index bfe4ee0622e0..a87ac5e0de08 100644 --- a/tests/queries/0_stateless/02811_invalid_embedded_rocksdb_create.sql +++ b/tests/queries/0_stateless/02811_invalid_embedded_rocksdb_create.sql @@ -1 +1,2 @@ +-- Tags: no-fasttest CREATE TABLE dict (`k` String, `v` String) ENGINE = EmbeddedRocksDB(k) PRIMARY KEY k; -- {serverError 36} From 75d051dd5554022ee7d9c215543c5ffad5c3df63 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 6 Jul 2023 01:49:53 +0200 Subject: [PATCH 096/179] Remove useless packages --- docker/test/sqllogic/run.sh | 4 ++-- docker/test/stateless/Dockerfile | 1 - docker/test/stress/Dockerfile | 3 --- docker/test/upgrade/Dockerfile | 3 --- docker/test/util/Dockerfile | 1 - docs/zh/development/build.md | 7 ------- .../0_stateless/02439_merge_selecting_partitions.sql | 1 - 7 files changed, 2 insertions(+), 18 deletions(-) diff --git a/docker/test/sqllogic/run.sh b/docker/test/sqllogic/run.sh index 8d0252e3c989..444252837a3c 100755 --- a/docker/test/sqllogic/run.sh +++ b/docker/test/sqllogic/run.sh @@ -92,8 +92,8 @@ sudo clickhouse stop ||: for _ in $(seq 1 60); do if [[ $(wget --timeout=1 -q 'localhost:8123' -O-) == 'Ok.' ]]; then sleep 1 ; else break; fi ; done -grep -Fa "Fatal" /var/log/clickhouse-server/clickhouse-server.log ||: -pigz < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.gz & +rg -Fa "Fatal" /var/log/clickhouse-server/clickhouse-server.log ||: +zstd < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.zst & # Compressed (FIXME: remove once only github actions will be left) rm /var/log/clickhouse-server/clickhouse-server.log diff --git a/docker/test/stateless/Dockerfile b/docker/test/stateless/Dockerfile index 32996140521b..e1e84c427ba7 100644 --- a/docker/test/stateless/Dockerfile +++ b/docker/test/stateless/Dockerfile @@ -33,7 +33,6 @@ RUN apt-get update -y \ qemu-user-static \ sqlite3 \ sudo \ - telnet \ tree \ unixodbc \ wget \ diff --git a/docker/test/stress/Dockerfile b/docker/test/stress/Dockerfile index e9712f430fd2..eddeb04758ba 100644 --- a/docker/test/stress/Dockerfile +++ b/docker/test/stress/Dockerfile @@ -8,8 +8,6 @@ RUN apt-get update -y \ apt-get install --yes --no-install-recommends \ bash \ tzdata \ - fakeroot \ - debhelper \ parallel \ expect \ python3 \ @@ -20,7 +18,6 @@ RUN apt-get update -y \ sudo \ openssl \ netcat-openbsd \ - telnet \ brotli \ && apt-get clean diff --git a/docker/test/upgrade/Dockerfile b/docker/test/upgrade/Dockerfile index 8e5890b81a0c..9152230af1cf 100644 --- a/docker/test/upgrade/Dockerfile +++ b/docker/test/upgrade/Dockerfile @@ -8,8 +8,6 @@ RUN apt-get update -y \ apt-get install --yes --no-install-recommends \ bash \ tzdata \ - fakeroot \ - debhelper \ parallel \ expect \ python3 \ @@ -20,7 +18,6 @@ RUN apt-get update -y \ sudo \ openssl \ netcat-openbsd \ - telnet \ brotli \ && apt-get clean diff --git a/docker/test/util/Dockerfile b/docker/test/util/Dockerfile index 85e888f1df7d..6a4c6aa30575 100644 --- a/docker/test/util/Dockerfile +++ b/docker/test/util/Dockerfile @@ -44,7 +44,6 @@ RUN apt-get update \ clang-${LLVM_VERSION} \ clang-tidy-${LLVM_VERSION} \ cmake \ - fakeroot \ gdb \ git \ gperf \ diff --git a/docs/zh/development/build.md b/docs/zh/development/build.md index d76f4b1577c2..bb25755a615c 100644 --- a/docs/zh/development/build.md +++ b/docs/zh/development/build.md @@ -3,13 +3,6 @@ slug: /zh/development/build --- # 如何构建 ClickHouse 发布包 {#ru-he-gou-jian-clickhouse-fa-bu-bao} -## 安装 Git 和 Pbuilder {#an-zhuang-git-he-pbuilder} - -``` bash -sudo apt-get update -sudo apt-get install git pbuilder debhelper lsb-release fakeroot sudo debian-archive-keyring debian-keyring -``` - ## 拉取 ClickHouse 源码 {#la-qu-clickhouse-yuan-ma} ``` bash diff --git a/tests/queries/0_stateless/02439_merge_selecting_partitions.sql b/tests/queries/0_stateless/02439_merge_selecting_partitions.sql index 88ce2834d6b5..bcfcaa2acd34 100644 --- a/tests/queries/0_stateless/02439_merge_selecting_partitions.sql +++ b/tests/queries/0_stateless/02439_merge_selecting_partitions.sql @@ -1,4 +1,3 @@ - drop table if exists rmt; create table rmt (n int, m int) engine=ReplicatedMergeTree('/test/02439/{shard}/{database}', '{replica}') partition by n order by n; From db14b2c54fbd42d1c8123a15d87382fe00938a6a Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 6 Jul 2023 02:16:38 +0200 Subject: [PATCH 097/179] Remove useless logs --- src/Interpreters/executeQuery.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index c52dab722c97..694226af6b0f 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -322,8 +322,8 @@ static std::tuple executeQueryImpl( /// This does not have impact on the final span logs, because these internal queries are issued by external queries, /// we still have enough span logs for the execution of external queries. std::shared_ptr query_span = internal ? nullptr : std::make_shared("query"); - if (query_span) - LOG_DEBUG(&Poco::Logger::get("executeQuery"), "Query span trace_id for opentelemetry log: {}", query_span->trace_id); + if (query_span && query_span->trace_id != UUID{}) + LOG_TRACE(&Poco::Logger::get("executeQuery"), "Query span trace_id for opentelemetry log: {}", query_span->trace_id); auto query_start_time = std::chrono::system_clock::now(); From 5416b7b6df8104440d9d74cbdc68fd0505012654 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 6 Jul 2023 03:04:58 +0200 Subject: [PATCH 098/179] Fix incorrect log level = warning --- programs/server/Server.cpp | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index d2d8a0d07fb7..686c3b90dd60 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -1146,7 +1146,16 @@ try size_t merges_mutations_memory_usage_soft_limit = server_settings_.merges_mutations_memory_usage_soft_limit; size_t default_merges_mutations_server_memory_usage = static_cast(memory_amount * server_settings_.merges_mutations_memory_usage_to_ram_ratio); - if (merges_mutations_memory_usage_soft_limit == 0 || merges_mutations_memory_usage_soft_limit > default_merges_mutations_server_memory_usage) + if (merges_mutations_memory_usage_soft_limit == 0) + { + merges_mutations_memory_usage_soft_limit = default_merges_mutations_server_memory_usage; + LOG_INFO(log, "Setting merges_mutations_memory_usage_soft_limit was set to {}" + " ({} available * {:.2f} merges_mutations_memory_usage_to_ram_ratio)", + formatReadableSizeWithBinarySuffix(merges_mutations_memory_usage_soft_limit), + formatReadableSizeWithBinarySuffix(memory_amount), + server_settings_.merges_mutations_memory_usage_to_ram_ratio); + } + else if (merges_mutations_memory_usage_soft_limit > default_merges_mutations_server_memory_usage) { merges_mutations_memory_usage_soft_limit = default_merges_mutations_server_memory_usage; LOG_WARNING(log, "Setting merges_mutations_memory_usage_soft_limit was set to {}" From 64d5a85f6e731d9e8baba170aa7441555c030545 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 6 Jul 2023 03:16:06 +0200 Subject: [PATCH 099/179] Fix test_replicated_table_attach --- tests/integration/test_replicated_table_attach/test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_replicated_table_attach/test.py b/tests/integration/test_replicated_table_attach/test.py index 2d209ddaf790..dee2be3fcf72 100644 --- a/tests/integration/test_replicated_table_attach/test.py +++ b/tests/integration/test_replicated_table_attach/test.py @@ -54,7 +54,7 @@ def assert_values(): assert_values() with PartitionManager() as pm: pm.drop_instance_zk_connections(node) - node.restart_clickhouse(stop_start_wait_sec=20) + node.restart_clickhouse(stop_start_wait_sec=300) assert_values() # check that we activate it in the end From e2c9f86f39e83b128d0fc82628bdae2ab0b8080b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 6 Jul 2023 03:31:10 +0200 Subject: [PATCH 100/179] Better usability of a test --- tests/queries/0_stateless/02125_many_mutations.sh | 2 ++ tests/queries/0_stateless/02125_many_mutations_2.sh | 2 ++ 2 files changed, 4 insertions(+) diff --git a/tests/queries/0_stateless/02125_many_mutations.sh b/tests/queries/0_stateless/02125_many_mutations.sh index b42d5bb15d3e..54948fa1048f 100755 --- a/tests/queries/0_stateless/02125_many_mutations.sh +++ b/tests/queries/0_stateless/02125_many_mutations.sh @@ -7,6 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # "max_parts_to_merge_at_once = 1" prevents merges to start in background before our own OPTIMIZE FINAL +$CLICKHOUSE_CLIENT -q "drop table if exists many_mutations" $CLICKHOUSE_CLIENT -q "create table many_mutations (x UInt32, y UInt32) engine = MergeTree order by x settings number_of_mutations_to_delay = 0, number_of_mutations_to_throw = 0, max_parts_to_merge_at_once = 1" $CLICKHOUSE_CLIENT -q "insert into many_mutations values (0, 0), (1, 1)" $CLICKHOUSE_CLIENT -q "system stop merges many_mutations" @@ -49,3 +50,4 @@ $CLICKHOUSE_CLIENT -q "system start merges many_mutations" $CLICKHOUSE_CLIENT -q "optimize table many_mutations final" --optimize_throw_if_noop 1 $CLICKHOUSE_CLIENT -q "select count() from system.mutations where database = currentDatabase() and table = 'many_mutations' and not is_done" $CLICKHOUSE_CLIENT -q "select x, y from many_mutations order by x" +$CLICKHOUSE_CLIENT -q "drop table many_mutations" diff --git a/tests/queries/0_stateless/02125_many_mutations_2.sh b/tests/queries/0_stateless/02125_many_mutations_2.sh index e5e3070a9440..0351538b2105 100755 --- a/tests/queries/0_stateless/02125_many_mutations_2.sh +++ b/tests/queries/0_stateless/02125_many_mutations_2.sh @@ -7,6 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # "max_parts_to_merge_at_once = 1" prevents merges to start in background before our own OPTIMIZE FINAL +$CLICKHOUSE_CLIENT -q "drop table if exists many_mutations" $CLICKHOUSE_CLIENT -q "create table many_mutations (x UInt32, y UInt32) engine = MergeTree order by x settings number_of_mutations_to_delay = 0, number_of_mutations_to_throw = 0, max_parts_to_merge_at_once = 1" $CLICKHOUSE_CLIENT -q "insert into many_mutations select number, number + 1 from numbers(2000)" $CLICKHOUSE_CLIENT -q "system stop merges many_mutations" @@ -51,3 +52,4 @@ $CLICKHOUSE_CLIENT -q "system flush logs" $CLICKHOUSE_CLIENT -q "select count() from system.mutations where database = currentDatabase() and table = 'many_mutations' and not is_done" $CLICKHOUSE_CLIENT -q "select count() from many_mutations" $CLICKHOUSE_CLIENT -q "select * from system.part_log where database = currentDatabase() and table == 'many_mutations' and peak_memory_usage > 1e9" +$CLICKHOUSE_CLIENT -q "drop table many_mutations" From 38c163b0662249b4da83e8b812662bf5b6d1a27a Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 6 Jul 2023 03:43:59 +0200 Subject: [PATCH 101/179] Improve test --- .../0_stateless/02125_many_mutations.sh | 32 +++++++++---------- .../0_stateless/02125_many_mutations_2.sh | 32 +++++++++++-------- 2 files changed, 34 insertions(+), 30 deletions(-) diff --git a/tests/queries/0_stateless/02125_many_mutations.sh b/tests/queries/0_stateless/02125_many_mutations.sh index 54948fa1048f..5a139e8b01de 100755 --- a/tests/queries/0_stateless/02125_many_mutations.sh +++ b/tests/queries/0_stateless/02125_many_mutations.sh @@ -7,19 +7,17 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # "max_parts_to_merge_at_once = 1" prevents merges to start in background before our own OPTIMIZE FINAL -$CLICKHOUSE_CLIENT -q "drop table if exists many_mutations" -$CLICKHOUSE_CLIENT -q "create table many_mutations (x UInt32, y UInt32) engine = MergeTree order by x settings number_of_mutations_to_delay = 0, number_of_mutations_to_throw = 0, max_parts_to_merge_at_once = 1" -$CLICKHOUSE_CLIENT -q "insert into many_mutations values (0, 0), (1, 1)" -$CLICKHOUSE_CLIENT -q "system stop merges many_mutations" - -$CLICKHOUSE_CLIENT -q "select x, y from many_mutations order by x" +$CLICKHOUSE_CLIENT --multiquery -q " +drop table if exists many_mutations; +create table many_mutations (x UInt32, y UInt32) engine = MergeTree order by x settings number_of_mutations_to_delay = 0, number_of_mutations_to_throw = 0, max_parts_to_merge_at_once = 1; +insert into many_mutations values (0, 0), (1, 1); +system stop merges many_mutations; +select x, y from many_mutations order by x; +" job() { - for _ in {1..1000} - do - $CLICKHOUSE_CLIENT -q "alter table many_mutations update y = y + 1 where 1" - done + yes "alter table many_mutations update y = y + 1 where 1;" | head -n 1000 | $CLICKHOUSE_CLIENT --multiquery } job & @@ -45,9 +43,11 @@ job & wait -$CLICKHOUSE_CLIENT -q "select count() from system.mutations where database = currentDatabase() and table = 'many_mutations' and not is_done" -$CLICKHOUSE_CLIENT -q "system start merges many_mutations" -$CLICKHOUSE_CLIENT -q "optimize table many_mutations final" --optimize_throw_if_noop 1 -$CLICKHOUSE_CLIENT -q "select count() from system.mutations where database = currentDatabase() and table = 'many_mutations' and not is_done" -$CLICKHOUSE_CLIENT -q "select x, y from many_mutations order by x" -$CLICKHOUSE_CLIENT -q "drop table many_mutations" +$CLICKHOUSE_CLIENT --multiquery -q " +select count() from system.mutations where database = currentDatabase() and table = 'many_mutations' and not is_done; +system start merges many_mutations; +optimize table many_mutations final SETTINGS optimize_throw_if_noop = 1; +select count() from system.mutations where database = currentDatabase() and table = 'many_mutations' and not is_done; +select x, y from many_mutations order by x; +drop table many_mutations; +" diff --git a/tests/queries/0_stateless/02125_many_mutations_2.sh b/tests/queries/0_stateless/02125_many_mutations_2.sh index 0351538b2105..5b779c1b276d 100755 --- a/tests/queries/0_stateless/02125_many_mutations_2.sh +++ b/tests/queries/0_stateless/02125_many_mutations_2.sh @@ -7,10 +7,12 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # "max_parts_to_merge_at_once = 1" prevents merges to start in background before our own OPTIMIZE FINAL -$CLICKHOUSE_CLIENT -q "drop table if exists many_mutations" -$CLICKHOUSE_CLIENT -q "create table many_mutations (x UInt32, y UInt32) engine = MergeTree order by x settings number_of_mutations_to_delay = 0, number_of_mutations_to_throw = 0, max_parts_to_merge_at_once = 1" -$CLICKHOUSE_CLIENT -q "insert into many_mutations select number, number + 1 from numbers(2000)" -$CLICKHOUSE_CLIENT -q "system stop merges many_mutations" +$CLICKHOUSE_CLIENT --multiquery -q " +drop table if exists many_mutations; +create table many_mutations (x UInt32, y UInt32) engine = MergeTree order by x settings number_of_mutations_to_delay = 0, number_of_mutations_to_throw = 0, max_parts_to_merge_at_once = 1; +insert into many_mutations select number, number + 1 from numbers(2000); +system stop merges many_mutations; +" $CLICKHOUSE_CLIENT -q "select count() from many_mutations" @@ -18,8 +20,8 @@ job() { for i in {1..1000} do - $CLICKHOUSE_CLIENT -q "alter table many_mutations delete where y = ${i} * 2 settings mutations_sync=0" - done + echo "alter table many_mutations delete where y = ${i} * 2 settings mutations_sync = 0;" + done | $CLICKHOUSE_CLIENT --multiquery } job & @@ -45,11 +47,13 @@ job & wait -$CLICKHOUSE_CLIENT -q "select count() from system.mutations where database = currentDatabase() and table = 'many_mutations' and not is_done" -$CLICKHOUSE_CLIENT -q "system start merges many_mutations" -$CLICKHOUSE_CLIENT -q "optimize table many_mutations final" --optimize_throw_if_noop 1 -$CLICKHOUSE_CLIENT -q "system flush logs" -$CLICKHOUSE_CLIENT -q "select count() from system.mutations where database = currentDatabase() and table = 'many_mutations' and not is_done" -$CLICKHOUSE_CLIENT -q "select count() from many_mutations" -$CLICKHOUSE_CLIENT -q "select * from system.part_log where database = currentDatabase() and table == 'many_mutations' and peak_memory_usage > 1e9" -$CLICKHOUSE_CLIENT -q "drop table many_mutations" +$CLICKHOUSE_CLIENT --multiquery -q " +select count() from system.mutations where database = currentDatabase() and table = 'many_mutations' and not is_done; +system start merges many_mutations; +optimize table many_mutations final SETTINGS optimize_throw_if_noop = 1; +system flush logs; +select count() from system.mutations where database = currentDatabase() and table = 'many_mutations' and not is_done; +select count() from many_mutations; +select * from system.part_log where database = currentDatabase() and table == 'many_mutations' and peak_memory_usage > 1e9; +drop table many_mutations; +" From d59f68b6009467e891b96e0725ec308aad236c63 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 6 Jul 2023 04:55:46 +0200 Subject: [PATCH 102/179] Remove useless code --- src/Access/Common/AccessType.h | 1 - src/Common/SymbolIndex.cpp | 1 - src/Interpreters/InterpreterSystemQuery.cpp | 15 --------------- src/Parsers/ASTSystemQuery.h | 1 - .../0_stateless/01271_show_privileges.reference | 1 - .../02117_show_create_table_system.reference | 6 +++--- 6 files changed, 3 insertions(+), 22 deletions(-) diff --git a/src/Access/Common/AccessType.h b/src/Access/Common/AccessType.h index f65a77c1d6af..c06bceb87e36 100644 --- a/src/Access/Common/AccessType.h +++ b/src/Access/Common/AccessType.h @@ -157,7 +157,6 @@ enum class AccessType M(SYSTEM_DROP_CACHE, "DROP CACHE", GROUP, SYSTEM) \ M(SYSTEM_RELOAD_CONFIG, "RELOAD CONFIG", GLOBAL, SYSTEM_RELOAD) \ M(SYSTEM_RELOAD_USERS, "RELOAD USERS", GLOBAL, SYSTEM_RELOAD) \ - M(SYSTEM_RELOAD_SYMBOLS, "RELOAD SYMBOLS", GLOBAL, SYSTEM_RELOAD) \ M(SYSTEM_RELOAD_DICTIONARY, "SYSTEM RELOAD DICTIONARIES, RELOAD DICTIONARY, RELOAD DICTIONARIES", GLOBAL, SYSTEM_RELOAD) \ M(SYSTEM_RELOAD_MODEL, "SYSTEM RELOAD MODELS, RELOAD MODEL, RELOAD MODELS", GLOBAL, SYSTEM_RELOAD) \ M(SYSTEM_RELOAD_FUNCTION, "SYSTEM RELOAD FUNCTIONS, RELOAD FUNCTION, RELOAD FUNCTIONS", GLOBAL, SYSTEM_RELOAD) \ diff --git a/src/Common/SymbolIndex.cpp b/src/Common/SymbolIndex.cpp index f1cace5017c2..b4ae16670d83 100644 --- a/src/Common/SymbolIndex.cpp +++ b/src/Common/SymbolIndex.cpp @@ -9,7 +9,6 @@ #include -//#include #include #include diff --git a/src/Interpreters/InterpreterSystemQuery.cpp b/src/Interpreters/InterpreterSystemQuery.cpp index e1ff8676bc77..c74ff0624711 100644 --- a/src/Interpreters/InterpreterSystemQuery.cpp +++ b/src/Interpreters/InterpreterSystemQuery.cpp @@ -470,16 +470,6 @@ BlockIO InterpreterSystemQuery::execute() getContext()->checkAccess(AccessType::SYSTEM_RELOAD_USERS); system_context->getAccessControl().reload(AccessControl::ReloadMode::ALL); break; - case Type::RELOAD_SYMBOLS: - { -#if defined(__ELF__) && !defined(OS_FREEBSD) - getContext()->checkAccess(AccessType::SYSTEM_RELOAD_SYMBOLS); - SymbolIndex::reload(); - break; -#else - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "SYSTEM RELOAD SYMBOLS is not supported on current platform"); -#endif - } case Type::STOP_MERGES: startStopAction(ActionLocks::PartsMerge, false); break; @@ -1056,11 +1046,6 @@ AccessRightsElements InterpreterSystemQuery::getRequiredAccessForDDLOnCluster() required_access.emplace_back(AccessType::SYSTEM_RELOAD_USERS); break; } - case Type::RELOAD_SYMBOLS: - { - required_access.emplace_back(AccessType::SYSTEM_RELOAD_SYMBOLS); - break; - } case Type::STOP_MERGES: case Type::START_MERGES: { diff --git a/src/Parsers/ASTSystemQuery.h b/src/Parsers/ASTSystemQuery.h index ebc3e9cd4308..528fbdce2c26 100644 --- a/src/Parsers/ASTSystemQuery.h +++ b/src/Parsers/ASTSystemQuery.h @@ -56,7 +56,6 @@ class ASTSystemQuery : public IAST, public ASTQueryWithOnCluster RELOAD_EMBEDDED_DICTIONARIES, RELOAD_CONFIG, RELOAD_USERS, - RELOAD_SYMBOLS, RESTART_DISK, STOP_MERGES, START_MERGES, diff --git a/tests/queries/0_stateless/01271_show_privileges.reference b/tests/queries/0_stateless/01271_show_privileges.reference index 9e6249bfcb39..f3c07cf11a7d 100644 --- a/tests/queries/0_stateless/01271_show_privileges.reference +++ b/tests/queries/0_stateless/01271_show_privileges.reference @@ -108,7 +108,6 @@ SYSTEM DROP S3 CLIENT CACHE ['SYSTEM DROP S3 CLIENT','DROP S3 CLIENT CACHE'] GLO SYSTEM DROP CACHE ['DROP CACHE'] \N SYSTEM SYSTEM RELOAD CONFIG ['RELOAD CONFIG'] GLOBAL SYSTEM RELOAD SYSTEM RELOAD USERS ['RELOAD USERS'] GLOBAL SYSTEM RELOAD -SYSTEM RELOAD SYMBOLS ['RELOAD SYMBOLS'] GLOBAL SYSTEM RELOAD SYSTEM RELOAD DICTIONARY ['SYSTEM RELOAD DICTIONARIES','RELOAD DICTIONARY','RELOAD DICTIONARIES'] GLOBAL SYSTEM RELOAD SYSTEM RELOAD MODEL ['SYSTEM RELOAD MODELS','RELOAD MODEL','RELOAD MODELS'] GLOBAL SYSTEM RELOAD SYSTEM RELOAD FUNCTION ['SYSTEM RELOAD FUNCTIONS','RELOAD FUNCTION','RELOAD FUNCTIONS'] GLOBAL SYSTEM RELOAD diff --git a/tests/queries/0_stateless/02117_show_create_table_system.reference b/tests/queries/0_stateless/02117_show_create_table_system.reference index 3834b05601fa..c7aded81ac6d 100644 --- a/tests/queries/0_stateless/02117_show_create_table_system.reference +++ b/tests/queries/0_stateless/02117_show_create_table_system.reference @@ -297,7 +297,7 @@ CREATE TABLE system.grants ( `user_name` Nullable(String), `role_name` Nullable(String), - `access_type` Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SHOW FILESYSTEM CACHES' = 5, 'SELECT' = 6, 'INSERT' = 7, 'ALTER UPDATE' = 8, 'ALTER DELETE' = 9, 'ALTER ADD COLUMN' = 10, 'ALTER MODIFY COLUMN' = 11, 'ALTER DROP COLUMN' = 12, 'ALTER COMMENT COLUMN' = 13, 'ALTER CLEAR COLUMN' = 14, 'ALTER RENAME COLUMN' = 15, 'ALTER MATERIALIZE COLUMN' = 16, 'ALTER COLUMN' = 17, 'ALTER MODIFY COMMENT' = 18, 'ALTER ORDER BY' = 19, 'ALTER SAMPLE BY' = 20, 'ALTER ADD INDEX' = 21, 'ALTER DROP INDEX' = 22, 'ALTER MATERIALIZE INDEX' = 23, 'ALTER CLEAR INDEX' = 24, 'ALTER INDEX' = 25, 'ALTER ADD PROJECTION' = 26, 'ALTER DROP PROJECTION' = 27, 'ALTER MATERIALIZE PROJECTION' = 28, 'ALTER CLEAR PROJECTION' = 29, 'ALTER PROJECTION' = 30, 'ALTER ADD CONSTRAINT' = 31, 'ALTER DROP CONSTRAINT' = 32, 'ALTER CONSTRAINT' = 33, 'ALTER TTL' = 34, 'ALTER MATERIALIZE TTL' = 35, 'ALTER SETTINGS' = 36, 'ALTER MOVE PARTITION' = 37, 'ALTER FETCH PARTITION' = 38, 'ALTER FREEZE PARTITION' = 39, 'ALTER DATABASE SETTINGS' = 40, 'ALTER NAMED COLLECTION' = 41, 'ALTER TABLE' = 42, 'ALTER DATABASE' = 43, 'ALTER VIEW REFRESH' = 44, 'ALTER VIEW MODIFY QUERY' = 45, 'ALTER VIEW' = 46, 'ALTER' = 47, 'CREATE DATABASE' = 48, 'CREATE TABLE' = 49, 'CREATE VIEW' = 50, 'CREATE DICTIONARY' = 51, 'CREATE TEMPORARY TABLE' = 52, 'CREATE ARBITRARY TEMPORARY TABLE' = 53, 'CREATE FUNCTION' = 54, 'CREATE NAMED COLLECTION' = 55, 'CREATE' = 56, 'DROP DATABASE' = 57, 'DROP TABLE' = 58, 'DROP VIEW' = 59, 'DROP DICTIONARY' = 60, 'DROP FUNCTION' = 61, 'DROP NAMED COLLECTION' = 62, 'DROP' = 63, 'UNDROP TABLE' = 64, 'TRUNCATE' = 65, 'OPTIMIZE' = 66, 'BACKUP' = 67, 'KILL QUERY' = 68, 'KILL TRANSACTION' = 69, 'MOVE PARTITION BETWEEN SHARDS' = 70, 'CREATE USER' = 71, 'ALTER USER' = 72, 'DROP USER' = 73, 'CREATE ROLE' = 74, 'ALTER ROLE' = 75, 'DROP ROLE' = 76, 'ROLE ADMIN' = 77, 'CREATE ROW POLICY' = 78, 'ALTER ROW POLICY' = 79, 'DROP ROW POLICY' = 80, 'CREATE QUOTA' = 81, 'ALTER QUOTA' = 82, 'DROP QUOTA' = 83, 'CREATE SETTINGS PROFILE' = 84, 'ALTER SETTINGS PROFILE' = 85, 'DROP SETTINGS PROFILE' = 86, 'SHOW USERS' = 87, 'SHOW ROLES' = 88, 'SHOW ROW POLICIES' = 89, 'SHOW QUOTAS' = 90, 'SHOW SETTINGS PROFILES' = 91, 'SHOW ACCESS' = 92, 'ACCESS MANAGEMENT' = 93, 'SHOW NAMED COLLECTIONS' = 94, 'SHOW NAMED COLLECTIONS SECRETS' = 95, 'NAMED COLLECTION CONTROL' = 96, 'SYSTEM SHUTDOWN' = 97, 'SYSTEM DROP DNS CACHE' = 98, 'SYSTEM DROP MARK CACHE' = 99, 'SYSTEM DROP UNCOMPRESSED CACHE' = 100, 'SYSTEM DROP MMAP CACHE' = 101, 'SYSTEM DROP QUERY CACHE' = 102, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 103, 'SYSTEM DROP FILESYSTEM CACHE' = 104, 'SYSTEM DROP SCHEMA CACHE' = 105, 'SYSTEM DROP S3 CLIENT CACHE' = 106, 'SYSTEM DROP CACHE' = 107, 'SYSTEM RELOAD CONFIG' = 108, 'SYSTEM RELOAD USERS' = 109, 'SYSTEM RELOAD SYMBOLS' = 110, 'SYSTEM RELOAD DICTIONARY' = 111, 'SYSTEM RELOAD MODEL' = 112, 'SYSTEM RELOAD FUNCTION' = 113, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 114, 'SYSTEM RELOAD' = 115, 'SYSTEM RESTART DISK' = 116, 'SYSTEM MERGES' = 117, 'SYSTEM TTL MERGES' = 118, 'SYSTEM FETCHES' = 119, 'SYSTEM MOVES' = 120, 'SYSTEM DISTRIBUTED SENDS' = 121, 'SYSTEM REPLICATED SENDS' = 122, 'SYSTEM SENDS' = 123, 'SYSTEM REPLICATION QUEUES' = 124, 'SYSTEM DROP REPLICA' = 125, 'SYSTEM SYNC REPLICA' = 126, 'SYSTEM RESTART REPLICA' = 127, 'SYSTEM RESTORE REPLICA' = 128, 'SYSTEM WAIT LOADING PARTS' = 129, 'SYSTEM SYNC DATABASE REPLICA' = 130, 'SYSTEM SYNC TRANSACTION LOG' = 131, 'SYSTEM SYNC FILE CACHE' = 132, 'SYSTEM FLUSH DISTRIBUTED' = 133, 'SYSTEM FLUSH LOGS' = 134, 'SYSTEM FLUSH' = 135, 'SYSTEM THREAD FUZZER' = 136, 'SYSTEM UNFREEZE' = 137, 'SYSTEM FAILPOINT' = 138, 'SYSTEM' = 139, 'dictGet' = 140, 'displaySecretsInShowAndSelect' = 141, 'addressToLine' = 142, 'addressToLineWithInlines' = 143, 'addressToSymbol' = 144, 'demangle' = 145, 'INTROSPECTION' = 146, 'FILE' = 147, 'URL' = 148, 'REMOTE' = 149, 'MONGO' = 150, 'REDIS' = 151, 'MEILISEARCH' = 152, 'MYSQL' = 153, 'POSTGRES' = 154, 'SQLITE' = 155, 'ODBC' = 156, 'JDBC' = 157, 'HDFS' = 158, 'S3' = 159, 'HIVE' = 160, 'AZURE' = 161, 'SOURCES' = 162, 'CLUSTER' = 163, 'ALL' = 164, 'NONE' = 165), + `access_type` Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SHOW FILESYSTEM CACHES' = 5, 'SELECT' = 6, 'INSERT' = 7, 'ALTER UPDATE' = 8, 'ALTER DELETE' = 9, 'ALTER ADD COLUMN' = 10, 'ALTER MODIFY COLUMN' = 11, 'ALTER DROP COLUMN' = 12, 'ALTER COMMENT COLUMN' = 13, 'ALTER CLEAR COLUMN' = 14, 'ALTER RENAME COLUMN' = 15, 'ALTER MATERIALIZE COLUMN' = 16, 'ALTER COLUMN' = 17, 'ALTER MODIFY COMMENT' = 18, 'ALTER ORDER BY' = 19, 'ALTER SAMPLE BY' = 20, 'ALTER ADD INDEX' = 21, 'ALTER DROP INDEX' = 22, 'ALTER MATERIALIZE INDEX' = 23, 'ALTER CLEAR INDEX' = 24, 'ALTER INDEX' = 25, 'ALTER ADD PROJECTION' = 26, 'ALTER DROP PROJECTION' = 27, 'ALTER MATERIALIZE PROJECTION' = 28, 'ALTER CLEAR PROJECTION' = 29, 'ALTER PROJECTION' = 30, 'ALTER ADD CONSTRAINT' = 31, 'ALTER DROP CONSTRAINT' = 32, 'ALTER CONSTRAINT' = 33, 'ALTER TTL' = 34, 'ALTER MATERIALIZE TTL' = 35, 'ALTER SETTINGS' = 36, 'ALTER MOVE PARTITION' = 37, 'ALTER FETCH PARTITION' = 38, 'ALTER FREEZE PARTITION' = 39, 'ALTER DATABASE SETTINGS' = 40, 'ALTER NAMED COLLECTION' = 41, 'ALTER TABLE' = 42, 'ALTER DATABASE' = 43, 'ALTER VIEW REFRESH' = 44, 'ALTER VIEW MODIFY QUERY' = 45, 'ALTER VIEW' = 46, 'ALTER' = 47, 'CREATE DATABASE' = 48, 'CREATE TABLE' = 49, 'CREATE VIEW' = 50, 'CREATE DICTIONARY' = 51, 'CREATE TEMPORARY TABLE' = 52, 'CREATE ARBITRARY TEMPORARY TABLE' = 53, 'CREATE FUNCTION' = 54, 'CREATE NAMED COLLECTION' = 55, 'CREATE' = 56, 'DROP DATABASE' = 57, 'DROP TABLE' = 58, 'DROP VIEW' = 59, 'DROP DICTIONARY' = 60, 'DROP FUNCTION' = 61, 'DROP NAMED COLLECTION' = 62, 'DROP' = 63, 'UNDROP TABLE' = 64, 'TRUNCATE' = 65, 'OPTIMIZE' = 66, 'BACKUP' = 67, 'KILL QUERY' = 68, 'KILL TRANSACTION' = 69, 'MOVE PARTITION BETWEEN SHARDS' = 70, 'CREATE USER' = 71, 'ALTER USER' = 72, 'DROP USER' = 73, 'CREATE ROLE' = 74, 'ALTER ROLE' = 75, 'DROP ROLE' = 76, 'ROLE ADMIN' = 77, 'CREATE ROW POLICY' = 78, 'ALTER ROW POLICY' = 79, 'DROP ROW POLICY' = 80, 'CREATE QUOTA' = 81, 'ALTER QUOTA' = 82, 'DROP QUOTA' = 83, 'CREATE SETTINGS PROFILE' = 84, 'ALTER SETTINGS PROFILE' = 85, 'DROP SETTINGS PROFILE' = 86, 'SHOW USERS' = 87, 'SHOW ROLES' = 88, 'SHOW ROW POLICIES' = 89, 'SHOW QUOTAS' = 90, 'SHOW SETTINGS PROFILES' = 91, 'SHOW ACCESS' = 92, 'ACCESS MANAGEMENT' = 93, 'SHOW NAMED COLLECTIONS' = 94, 'SHOW NAMED COLLECTIONS SECRETS' = 95, 'NAMED COLLECTION CONTROL' = 96, 'SYSTEM SHUTDOWN' = 97, 'SYSTEM DROP DNS CACHE' = 98, 'SYSTEM DROP MARK CACHE' = 99, 'SYSTEM DROP UNCOMPRESSED CACHE' = 100, 'SYSTEM DROP MMAP CACHE' = 101, 'SYSTEM DROP QUERY CACHE' = 102, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 103, 'SYSTEM DROP FILESYSTEM CACHE' = 104, 'SYSTEM DROP SCHEMA CACHE' = 105, 'SYSTEM DROP S3 CLIENT CACHE' = 106, 'SYSTEM DROP CACHE' = 107, 'SYSTEM RELOAD CONFIG' = 108, 'SYSTEM RELOAD USERS' = 109, 'SYSTEM RELOAD DICTIONARY' = 110, 'SYSTEM RELOAD MODEL' = 111, 'SYSTEM RELOAD FUNCTION' = 112, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 113, 'SYSTEM RELOAD' = 114, 'SYSTEM RESTART DISK' = 115, 'SYSTEM MERGES' = 116, 'SYSTEM TTL MERGES' = 117, 'SYSTEM FETCHES' = 118, 'SYSTEM MOVES' = 119, 'SYSTEM DISTRIBUTED SENDS' = 120, 'SYSTEM REPLICATED SENDS' = 121, 'SYSTEM SENDS' = 122, 'SYSTEM REPLICATION QUEUES' = 123, 'SYSTEM DROP REPLICA' = 124, 'SYSTEM SYNC REPLICA' = 125, 'SYSTEM RESTART REPLICA' = 126, 'SYSTEM RESTORE REPLICA' = 127, 'SYSTEM WAIT LOADING PARTS' = 128, 'SYSTEM SYNC DATABASE REPLICA' = 129, 'SYSTEM SYNC TRANSACTION LOG' = 130, 'SYSTEM SYNC FILE CACHE' = 131, 'SYSTEM FLUSH DISTRIBUTED' = 132, 'SYSTEM FLUSH LOGS' = 133, 'SYSTEM FLUSH' = 134, 'SYSTEM THREAD FUZZER' = 135, 'SYSTEM UNFREEZE' = 136, 'SYSTEM FAILPOINT' = 137, 'SYSTEM' = 138, 'dictGet' = 139, 'displaySecretsInShowAndSelect' = 140, 'addressToLine' = 141, 'addressToLineWithInlines' = 142, 'addressToSymbol' = 143, 'demangle' = 144, 'INTROSPECTION' = 145, 'FILE' = 146, 'URL' = 147, 'REMOTE' = 148, 'MONGO' = 149, 'REDIS' = 150, 'MEILISEARCH' = 151, 'MYSQL' = 152, 'POSTGRES' = 153, 'SQLITE' = 154, 'ODBC' = 155, 'JDBC' = 156, 'HDFS' = 157, 'S3' = 158, 'HIVE' = 159, 'AZURE' = 160, 'SOURCES' = 161, 'CLUSTER' = 162, 'ALL' = 163, 'NONE' = 164), `database` Nullable(String), `table` Nullable(String), `column` Nullable(String), @@ -582,10 +582,10 @@ ENGINE = SystemPartsColumns COMMENT 'SYSTEM TABLE is built on the fly.' CREATE TABLE system.privileges ( - `privilege` Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SHOW FILESYSTEM CACHES' = 5, 'SELECT' = 6, 'INSERT' = 7, 'ALTER UPDATE' = 8, 'ALTER DELETE' = 9, 'ALTER ADD COLUMN' = 10, 'ALTER MODIFY COLUMN' = 11, 'ALTER DROP COLUMN' = 12, 'ALTER COMMENT COLUMN' = 13, 'ALTER CLEAR COLUMN' = 14, 'ALTER RENAME COLUMN' = 15, 'ALTER MATERIALIZE COLUMN' = 16, 'ALTER COLUMN' = 17, 'ALTER MODIFY COMMENT' = 18, 'ALTER ORDER BY' = 19, 'ALTER SAMPLE BY' = 20, 'ALTER ADD INDEX' = 21, 'ALTER DROP INDEX' = 22, 'ALTER MATERIALIZE INDEX' = 23, 'ALTER CLEAR INDEX' = 24, 'ALTER INDEX' = 25, 'ALTER ADD PROJECTION' = 26, 'ALTER DROP PROJECTION' = 27, 'ALTER MATERIALIZE PROJECTION' = 28, 'ALTER CLEAR PROJECTION' = 29, 'ALTER PROJECTION' = 30, 'ALTER ADD CONSTRAINT' = 31, 'ALTER DROP CONSTRAINT' = 32, 'ALTER CONSTRAINT' = 33, 'ALTER TTL' = 34, 'ALTER MATERIALIZE TTL' = 35, 'ALTER SETTINGS' = 36, 'ALTER MOVE PARTITION' = 37, 'ALTER FETCH PARTITION' = 38, 'ALTER FREEZE PARTITION' = 39, 'ALTER DATABASE SETTINGS' = 40, 'ALTER NAMED COLLECTION' = 41, 'ALTER TABLE' = 42, 'ALTER DATABASE' = 43, 'ALTER VIEW REFRESH' = 44, 'ALTER VIEW MODIFY QUERY' = 45, 'ALTER VIEW' = 46, 'ALTER' = 47, 'CREATE DATABASE' = 48, 'CREATE TABLE' = 49, 'CREATE VIEW' = 50, 'CREATE DICTIONARY' = 51, 'CREATE TEMPORARY TABLE' = 52, 'CREATE ARBITRARY TEMPORARY TABLE' = 53, 'CREATE FUNCTION' = 54, 'CREATE NAMED COLLECTION' = 55, 'CREATE' = 56, 'DROP DATABASE' = 57, 'DROP TABLE' = 58, 'DROP VIEW' = 59, 'DROP DICTIONARY' = 60, 'DROP FUNCTION' = 61, 'DROP NAMED COLLECTION' = 62, 'DROP' = 63, 'UNDROP TABLE' = 64, 'TRUNCATE' = 65, 'OPTIMIZE' = 66, 'BACKUP' = 67, 'KILL QUERY' = 68, 'KILL TRANSACTION' = 69, 'MOVE PARTITION BETWEEN SHARDS' = 70, 'CREATE USER' = 71, 'ALTER USER' = 72, 'DROP USER' = 73, 'CREATE ROLE' = 74, 'ALTER ROLE' = 75, 'DROP ROLE' = 76, 'ROLE ADMIN' = 77, 'CREATE ROW POLICY' = 78, 'ALTER ROW POLICY' = 79, 'DROP ROW POLICY' = 80, 'CREATE QUOTA' = 81, 'ALTER QUOTA' = 82, 'DROP QUOTA' = 83, 'CREATE SETTINGS PROFILE' = 84, 'ALTER SETTINGS PROFILE' = 85, 'DROP SETTINGS PROFILE' = 86, 'SHOW USERS' = 87, 'SHOW ROLES' = 88, 'SHOW ROW POLICIES' = 89, 'SHOW QUOTAS' = 90, 'SHOW SETTINGS PROFILES' = 91, 'SHOW ACCESS' = 92, 'ACCESS MANAGEMENT' = 93, 'SHOW NAMED COLLECTIONS' = 94, 'SHOW NAMED COLLECTIONS SECRETS' = 95, 'NAMED COLLECTION CONTROL' = 96, 'SYSTEM SHUTDOWN' = 97, 'SYSTEM DROP DNS CACHE' = 98, 'SYSTEM DROP MARK CACHE' = 99, 'SYSTEM DROP UNCOMPRESSED CACHE' = 100, 'SYSTEM DROP MMAP CACHE' = 101, 'SYSTEM DROP QUERY CACHE' = 102, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 103, 'SYSTEM DROP FILESYSTEM CACHE' = 104, 'SYSTEM DROP SCHEMA CACHE' = 105, 'SYSTEM DROP S3 CLIENT CACHE' = 106, 'SYSTEM DROP CACHE' = 107, 'SYSTEM RELOAD CONFIG' = 108, 'SYSTEM RELOAD USERS' = 109, 'SYSTEM RELOAD SYMBOLS' = 110, 'SYSTEM RELOAD DICTIONARY' = 111, 'SYSTEM RELOAD MODEL' = 112, 'SYSTEM RELOAD FUNCTION' = 113, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 114, 'SYSTEM RELOAD' = 115, 'SYSTEM RESTART DISK' = 116, 'SYSTEM MERGES' = 117, 'SYSTEM TTL MERGES' = 118, 'SYSTEM FETCHES' = 119, 'SYSTEM MOVES' = 120, 'SYSTEM DISTRIBUTED SENDS' = 121, 'SYSTEM REPLICATED SENDS' = 122, 'SYSTEM SENDS' = 123, 'SYSTEM REPLICATION QUEUES' = 124, 'SYSTEM DROP REPLICA' = 125, 'SYSTEM SYNC REPLICA' = 126, 'SYSTEM RESTART REPLICA' = 127, 'SYSTEM RESTORE REPLICA' = 128, 'SYSTEM WAIT LOADING PARTS' = 129, 'SYSTEM SYNC DATABASE REPLICA' = 130, 'SYSTEM SYNC TRANSACTION LOG' = 131, 'SYSTEM SYNC FILE CACHE' = 132, 'SYSTEM FLUSH DISTRIBUTED' = 133, 'SYSTEM FLUSH LOGS' = 134, 'SYSTEM FLUSH' = 135, 'SYSTEM THREAD FUZZER' = 136, 'SYSTEM UNFREEZE' = 137, 'SYSTEM FAILPOINT' = 138, 'SYSTEM' = 139, 'dictGet' = 140, 'displaySecretsInShowAndSelect' = 141, 'addressToLine' = 142, 'addressToLineWithInlines' = 143, 'addressToSymbol' = 144, 'demangle' = 145, 'INTROSPECTION' = 146, 'FILE' = 147, 'URL' = 148, 'REMOTE' = 149, 'MONGO' = 150, 'REDIS' = 151, 'MEILISEARCH' = 152, 'MYSQL' = 153, 'POSTGRES' = 154, 'SQLITE' = 155, 'ODBC' = 156, 'JDBC' = 157, 'HDFS' = 158, 'S3' = 159, 'HIVE' = 160, 'AZURE' = 161, 'SOURCES' = 162, 'CLUSTER' = 163, 'ALL' = 164, 'NONE' = 165), + `privilege` Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SHOW FILESYSTEM CACHES' = 5, 'SELECT' = 6, 'INSERT' = 7, 'ALTER UPDATE' = 8, 'ALTER DELETE' = 9, 'ALTER ADD COLUMN' = 10, 'ALTER MODIFY COLUMN' = 11, 'ALTER DROP COLUMN' = 12, 'ALTER COMMENT COLUMN' = 13, 'ALTER CLEAR COLUMN' = 14, 'ALTER RENAME COLUMN' = 15, 'ALTER MATERIALIZE COLUMN' = 16, 'ALTER COLUMN' = 17, 'ALTER MODIFY COMMENT' = 18, 'ALTER ORDER BY' = 19, 'ALTER SAMPLE BY' = 20, 'ALTER ADD INDEX' = 21, 'ALTER DROP INDEX' = 22, 'ALTER MATERIALIZE INDEX' = 23, 'ALTER CLEAR INDEX' = 24, 'ALTER INDEX' = 25, 'ALTER ADD PROJECTION' = 26, 'ALTER DROP PROJECTION' = 27, 'ALTER MATERIALIZE PROJECTION' = 28, 'ALTER CLEAR PROJECTION' = 29, 'ALTER PROJECTION' = 30, 'ALTER ADD CONSTRAINT' = 31, 'ALTER DROP CONSTRAINT' = 32, 'ALTER CONSTRAINT' = 33, 'ALTER TTL' = 34, 'ALTER MATERIALIZE TTL' = 35, 'ALTER SETTINGS' = 36, 'ALTER MOVE PARTITION' = 37, 'ALTER FETCH PARTITION' = 38, 'ALTER FREEZE PARTITION' = 39, 'ALTER DATABASE SETTINGS' = 40, 'ALTER NAMED COLLECTION' = 41, 'ALTER TABLE' = 42, 'ALTER DATABASE' = 43, 'ALTER VIEW REFRESH' = 44, 'ALTER VIEW MODIFY QUERY' = 45, 'ALTER VIEW' = 46, 'ALTER' = 47, 'CREATE DATABASE' = 48, 'CREATE TABLE' = 49, 'CREATE VIEW' = 50, 'CREATE DICTIONARY' = 51, 'CREATE TEMPORARY TABLE' = 52, 'CREATE ARBITRARY TEMPORARY TABLE' = 53, 'CREATE FUNCTION' = 54, 'CREATE NAMED COLLECTION' = 55, 'CREATE' = 56, 'DROP DATABASE' = 57, 'DROP TABLE' = 58, 'DROP VIEW' = 59, 'DROP DICTIONARY' = 60, 'DROP FUNCTION' = 61, 'DROP NAMED COLLECTION' = 62, 'DROP' = 63, 'UNDROP TABLE' = 64, 'TRUNCATE' = 65, 'OPTIMIZE' = 66, 'BACKUP' = 67, 'KILL QUERY' = 68, 'KILL TRANSACTION' = 69, 'MOVE PARTITION BETWEEN SHARDS' = 70, 'CREATE USER' = 71, 'ALTER USER' = 72, 'DROP USER' = 73, 'CREATE ROLE' = 74, 'ALTER ROLE' = 75, 'DROP ROLE' = 76, 'ROLE ADMIN' = 77, 'CREATE ROW POLICY' = 78, 'ALTER ROW POLICY' = 79, 'DROP ROW POLICY' = 80, 'CREATE QUOTA' = 81, 'ALTER QUOTA' = 82, 'DROP QUOTA' = 83, 'CREATE SETTINGS PROFILE' = 84, 'ALTER SETTINGS PROFILE' = 85, 'DROP SETTINGS PROFILE' = 86, 'SHOW USERS' = 87, 'SHOW ROLES' = 88, 'SHOW ROW POLICIES' = 89, 'SHOW QUOTAS' = 90, 'SHOW SETTINGS PROFILES' = 91, 'SHOW ACCESS' = 92, 'ACCESS MANAGEMENT' = 93, 'SHOW NAMED COLLECTIONS' = 94, 'SHOW NAMED COLLECTIONS SECRETS' = 95, 'NAMED COLLECTION CONTROL' = 96, 'SYSTEM SHUTDOWN' = 97, 'SYSTEM DROP DNS CACHE' = 98, 'SYSTEM DROP MARK CACHE' = 99, 'SYSTEM DROP UNCOMPRESSED CACHE' = 100, 'SYSTEM DROP MMAP CACHE' = 101, 'SYSTEM DROP QUERY CACHE' = 102, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 103, 'SYSTEM DROP FILESYSTEM CACHE' = 104, 'SYSTEM DROP SCHEMA CACHE' = 105, 'SYSTEM DROP S3 CLIENT CACHE' = 106, 'SYSTEM DROP CACHE' = 107, 'SYSTEM RELOAD CONFIG' = 108, 'SYSTEM RELOAD USERS' = 109, 'SYSTEM RELOAD DICTIONARY' = 110, 'SYSTEM RELOAD MODEL' = 111, 'SYSTEM RELOAD FUNCTION' = 112, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 113, 'SYSTEM RELOAD' = 114, 'SYSTEM RESTART DISK' = 115, 'SYSTEM MERGES' = 116, 'SYSTEM TTL MERGES' = 117, 'SYSTEM FETCHES' = 118, 'SYSTEM MOVES' = 119, 'SYSTEM DISTRIBUTED SENDS' = 120, 'SYSTEM REPLICATED SENDS' = 121, 'SYSTEM SENDS' = 122, 'SYSTEM REPLICATION QUEUES' = 123, 'SYSTEM DROP REPLICA' = 124, 'SYSTEM SYNC REPLICA' = 125, 'SYSTEM RESTART REPLICA' = 126, 'SYSTEM RESTORE REPLICA' = 127, 'SYSTEM WAIT LOADING PARTS' = 128, 'SYSTEM SYNC DATABASE REPLICA' = 129, 'SYSTEM SYNC TRANSACTION LOG' = 130, 'SYSTEM SYNC FILE CACHE' = 131, 'SYSTEM FLUSH DISTRIBUTED' = 132, 'SYSTEM FLUSH LOGS' = 133, 'SYSTEM FLUSH' = 134, 'SYSTEM THREAD FUZZER' = 135, 'SYSTEM UNFREEZE' = 136, 'SYSTEM FAILPOINT' = 137, 'SYSTEM' = 138, 'dictGet' = 139, 'displaySecretsInShowAndSelect' = 140, 'addressToLine' = 141, 'addressToLineWithInlines' = 142, 'addressToSymbol' = 143, 'demangle' = 144, 'INTROSPECTION' = 145, 'FILE' = 146, 'URL' = 147, 'REMOTE' = 148, 'MONGO' = 149, 'REDIS' = 150, 'MEILISEARCH' = 151, 'MYSQL' = 152, 'POSTGRES' = 153, 'SQLITE' = 154, 'ODBC' = 155, 'JDBC' = 156, 'HDFS' = 157, 'S3' = 158, 'HIVE' = 159, 'AZURE' = 160, 'SOURCES' = 161, 'CLUSTER' = 162, 'ALL' = 163, 'NONE' = 164), `aliases` Array(String), `level` Nullable(Enum8('GLOBAL' = 0, 'DATABASE' = 1, 'TABLE' = 2, 'DICTIONARY' = 3, 'VIEW' = 4, 'COLUMN' = 5, 'NAMED_COLLECTION' = 6)), - `parent_group` Nullable(Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SHOW FILESYSTEM CACHES' = 5, 'SELECT' = 6, 'INSERT' = 7, 'ALTER UPDATE' = 8, 'ALTER DELETE' = 9, 'ALTER ADD COLUMN' = 10, 'ALTER MODIFY COLUMN' = 11, 'ALTER DROP COLUMN' = 12, 'ALTER COMMENT COLUMN' = 13, 'ALTER CLEAR COLUMN' = 14, 'ALTER RENAME COLUMN' = 15, 'ALTER MATERIALIZE COLUMN' = 16, 'ALTER COLUMN' = 17, 'ALTER MODIFY COMMENT' = 18, 'ALTER ORDER BY' = 19, 'ALTER SAMPLE BY' = 20, 'ALTER ADD INDEX' = 21, 'ALTER DROP INDEX' = 22, 'ALTER MATERIALIZE INDEX' = 23, 'ALTER CLEAR INDEX' = 24, 'ALTER INDEX' = 25, 'ALTER ADD PROJECTION' = 26, 'ALTER DROP PROJECTION' = 27, 'ALTER MATERIALIZE PROJECTION' = 28, 'ALTER CLEAR PROJECTION' = 29, 'ALTER PROJECTION' = 30, 'ALTER ADD CONSTRAINT' = 31, 'ALTER DROP CONSTRAINT' = 32, 'ALTER CONSTRAINT' = 33, 'ALTER TTL' = 34, 'ALTER MATERIALIZE TTL' = 35, 'ALTER SETTINGS' = 36, 'ALTER MOVE PARTITION' = 37, 'ALTER FETCH PARTITION' = 38, 'ALTER FREEZE PARTITION' = 39, 'ALTER DATABASE SETTINGS' = 40, 'ALTER NAMED COLLECTION' = 41, 'ALTER TABLE' = 42, 'ALTER DATABASE' = 43, 'ALTER VIEW REFRESH' = 44, 'ALTER VIEW MODIFY QUERY' = 45, 'ALTER VIEW' = 46, 'ALTER' = 47, 'CREATE DATABASE' = 48, 'CREATE TABLE' = 49, 'CREATE VIEW' = 50, 'CREATE DICTIONARY' = 51, 'CREATE TEMPORARY TABLE' = 52, 'CREATE ARBITRARY TEMPORARY TABLE' = 53, 'CREATE FUNCTION' = 54, 'CREATE NAMED COLLECTION' = 55, 'CREATE' = 56, 'DROP DATABASE' = 57, 'DROP TABLE' = 58, 'DROP VIEW' = 59, 'DROP DICTIONARY' = 60, 'DROP FUNCTION' = 61, 'DROP NAMED COLLECTION' = 62, 'DROP' = 63, 'UNDROP TABLE' = 64, 'TRUNCATE' = 65, 'OPTIMIZE' = 66, 'BACKUP' = 67, 'KILL QUERY' = 68, 'KILL TRANSACTION' = 69, 'MOVE PARTITION BETWEEN SHARDS' = 70, 'CREATE USER' = 71, 'ALTER USER' = 72, 'DROP USER' = 73, 'CREATE ROLE' = 74, 'ALTER ROLE' = 75, 'DROP ROLE' = 76, 'ROLE ADMIN' = 77, 'CREATE ROW POLICY' = 78, 'ALTER ROW POLICY' = 79, 'DROP ROW POLICY' = 80, 'CREATE QUOTA' = 81, 'ALTER QUOTA' = 82, 'DROP QUOTA' = 83, 'CREATE SETTINGS PROFILE' = 84, 'ALTER SETTINGS PROFILE' = 85, 'DROP SETTINGS PROFILE' = 86, 'SHOW USERS' = 87, 'SHOW ROLES' = 88, 'SHOW ROW POLICIES' = 89, 'SHOW QUOTAS' = 90, 'SHOW SETTINGS PROFILES' = 91, 'SHOW ACCESS' = 92, 'ACCESS MANAGEMENT' = 93, 'SHOW NAMED COLLECTIONS' = 94, 'SHOW NAMED COLLECTIONS SECRETS' = 95, 'NAMED COLLECTION CONTROL' = 96, 'SYSTEM SHUTDOWN' = 97, 'SYSTEM DROP DNS CACHE' = 98, 'SYSTEM DROP MARK CACHE' = 99, 'SYSTEM DROP UNCOMPRESSED CACHE' = 100, 'SYSTEM DROP MMAP CACHE' = 101, 'SYSTEM DROP QUERY CACHE' = 102, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 103, 'SYSTEM DROP FILESYSTEM CACHE' = 104, 'SYSTEM DROP SCHEMA CACHE' = 105, 'SYSTEM DROP S3 CLIENT CACHE' = 106, 'SYSTEM DROP CACHE' = 107, 'SYSTEM RELOAD CONFIG' = 108, 'SYSTEM RELOAD USERS' = 109, 'SYSTEM RELOAD SYMBOLS' = 110, 'SYSTEM RELOAD DICTIONARY' = 111, 'SYSTEM RELOAD MODEL' = 112, 'SYSTEM RELOAD FUNCTION' = 113, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 114, 'SYSTEM RELOAD' = 115, 'SYSTEM RESTART DISK' = 116, 'SYSTEM MERGES' = 117, 'SYSTEM TTL MERGES' = 118, 'SYSTEM FETCHES' = 119, 'SYSTEM MOVES' = 120, 'SYSTEM DISTRIBUTED SENDS' = 121, 'SYSTEM REPLICATED SENDS' = 122, 'SYSTEM SENDS' = 123, 'SYSTEM REPLICATION QUEUES' = 124, 'SYSTEM DROP REPLICA' = 125, 'SYSTEM SYNC REPLICA' = 126, 'SYSTEM RESTART REPLICA' = 127, 'SYSTEM RESTORE REPLICA' = 128, 'SYSTEM WAIT LOADING PARTS' = 129, 'SYSTEM SYNC DATABASE REPLICA' = 130, 'SYSTEM SYNC TRANSACTION LOG' = 131, 'SYSTEM SYNC FILE CACHE' = 132, 'SYSTEM FLUSH DISTRIBUTED' = 133, 'SYSTEM FLUSH LOGS' = 134, 'SYSTEM FLUSH' = 135, 'SYSTEM THREAD FUZZER' = 136, 'SYSTEM UNFREEZE' = 137, 'SYSTEM FAILPOINT' = 138, 'SYSTEM' = 139, 'dictGet' = 140, 'displaySecretsInShowAndSelect' = 141, 'addressToLine' = 142, 'addressToLineWithInlines' = 143, 'addressToSymbol' = 144, 'demangle' = 145, 'INTROSPECTION' = 146, 'FILE' = 147, 'URL' = 148, 'REMOTE' = 149, 'MONGO' = 150, 'REDIS' = 151, 'MEILISEARCH' = 152, 'MYSQL' = 153, 'POSTGRES' = 154, 'SQLITE' = 155, 'ODBC' = 156, 'JDBC' = 157, 'HDFS' = 158, 'S3' = 159, 'HIVE' = 160, 'AZURE' = 161, 'SOURCES' = 162, 'CLUSTER' = 163, 'ALL' = 164, 'NONE' = 165)) + `parent_group` Nullable(Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SHOW FILESYSTEM CACHES' = 5, 'SELECT' = 6, 'INSERT' = 7, 'ALTER UPDATE' = 8, 'ALTER DELETE' = 9, 'ALTER ADD COLUMN' = 10, 'ALTER MODIFY COLUMN' = 11, 'ALTER DROP COLUMN' = 12, 'ALTER COMMENT COLUMN' = 13, 'ALTER CLEAR COLUMN' = 14, 'ALTER RENAME COLUMN' = 15, 'ALTER MATERIALIZE COLUMN' = 16, 'ALTER COLUMN' = 17, 'ALTER MODIFY COMMENT' = 18, 'ALTER ORDER BY' = 19, 'ALTER SAMPLE BY' = 20, 'ALTER ADD INDEX' = 21, 'ALTER DROP INDEX' = 22, 'ALTER MATERIALIZE INDEX' = 23, 'ALTER CLEAR INDEX' = 24, 'ALTER INDEX' = 25, 'ALTER ADD PROJECTION' = 26, 'ALTER DROP PROJECTION' = 27, 'ALTER MATERIALIZE PROJECTION' = 28, 'ALTER CLEAR PROJECTION' = 29, 'ALTER PROJECTION' = 30, 'ALTER ADD CONSTRAINT' = 31, 'ALTER DROP CONSTRAINT' = 32, 'ALTER CONSTRAINT' = 33, 'ALTER TTL' = 34, 'ALTER MATERIALIZE TTL' = 35, 'ALTER SETTINGS' = 36, 'ALTER MOVE PARTITION' = 37, 'ALTER FETCH PARTITION' = 38, 'ALTER FREEZE PARTITION' = 39, 'ALTER DATABASE SETTINGS' = 40, 'ALTER NAMED COLLECTION' = 41, 'ALTER TABLE' = 42, 'ALTER DATABASE' = 43, 'ALTER VIEW REFRESH' = 44, 'ALTER VIEW MODIFY QUERY' = 45, 'ALTER VIEW' = 46, 'ALTER' = 47, 'CREATE DATABASE' = 48, 'CREATE TABLE' = 49, 'CREATE VIEW' = 50, 'CREATE DICTIONARY' = 51, 'CREATE TEMPORARY TABLE' = 52, 'CREATE ARBITRARY TEMPORARY TABLE' = 53, 'CREATE FUNCTION' = 54, 'CREATE NAMED COLLECTION' = 55, 'CREATE' = 56, 'DROP DATABASE' = 57, 'DROP TABLE' = 58, 'DROP VIEW' = 59, 'DROP DICTIONARY' = 60, 'DROP FUNCTION' = 61, 'DROP NAMED COLLECTION' = 62, 'DROP' = 63, 'UNDROP TABLE' = 64, 'TRUNCATE' = 65, 'OPTIMIZE' = 66, 'BACKUP' = 67, 'KILL QUERY' = 68, 'KILL TRANSACTION' = 69, 'MOVE PARTITION BETWEEN SHARDS' = 70, 'CREATE USER' = 71, 'ALTER USER' = 72, 'DROP USER' = 73, 'CREATE ROLE' = 74, 'ALTER ROLE' = 75, 'DROP ROLE' = 76, 'ROLE ADMIN' = 77, 'CREATE ROW POLICY' = 78, 'ALTER ROW POLICY' = 79, 'DROP ROW POLICY' = 80, 'CREATE QUOTA' = 81, 'ALTER QUOTA' = 82, 'DROP QUOTA' = 83, 'CREATE SETTINGS PROFILE' = 84, 'ALTER SETTINGS PROFILE' = 85, 'DROP SETTINGS PROFILE' = 86, 'SHOW USERS' = 87, 'SHOW ROLES' = 88, 'SHOW ROW POLICIES' = 89, 'SHOW QUOTAS' = 90, 'SHOW SETTINGS PROFILES' = 91, 'SHOW ACCESS' = 92, 'ACCESS MANAGEMENT' = 93, 'SHOW NAMED COLLECTIONS' = 94, 'SHOW NAMED COLLECTIONS SECRETS' = 95, 'NAMED COLLECTION CONTROL' = 96, 'SYSTEM SHUTDOWN' = 97, 'SYSTEM DROP DNS CACHE' = 98, 'SYSTEM DROP MARK CACHE' = 99, 'SYSTEM DROP UNCOMPRESSED CACHE' = 100, 'SYSTEM DROP MMAP CACHE' = 101, 'SYSTEM DROP QUERY CACHE' = 102, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 103, 'SYSTEM DROP FILESYSTEM CACHE' = 104, 'SYSTEM DROP SCHEMA CACHE' = 105, 'SYSTEM DROP S3 CLIENT CACHE' = 106, 'SYSTEM DROP CACHE' = 107, 'SYSTEM RELOAD CONFIG' = 108, 'SYSTEM RELOAD USERS' = 109, 'SYSTEM RELOAD DICTIONARY' = 110, 'SYSTEM RELOAD MODEL' = 111, 'SYSTEM RELOAD FUNCTION' = 112, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 113, 'SYSTEM RELOAD' = 114, 'SYSTEM RESTART DISK' = 115, 'SYSTEM MERGES' = 116, 'SYSTEM TTL MERGES' = 117, 'SYSTEM FETCHES' = 118, 'SYSTEM MOVES' = 119, 'SYSTEM DISTRIBUTED SENDS' = 120, 'SYSTEM REPLICATED SENDS' = 121, 'SYSTEM SENDS' = 122, 'SYSTEM REPLICATION QUEUES' = 123, 'SYSTEM DROP REPLICA' = 124, 'SYSTEM SYNC REPLICA' = 125, 'SYSTEM RESTART REPLICA' = 126, 'SYSTEM RESTORE REPLICA' = 127, 'SYSTEM WAIT LOADING PARTS' = 128, 'SYSTEM SYNC DATABASE REPLICA' = 129, 'SYSTEM SYNC TRANSACTION LOG' = 130, 'SYSTEM SYNC FILE CACHE' = 131, 'SYSTEM FLUSH DISTRIBUTED' = 132, 'SYSTEM FLUSH LOGS' = 133, 'SYSTEM FLUSH' = 134, 'SYSTEM THREAD FUZZER' = 135, 'SYSTEM UNFREEZE' = 136, 'SYSTEM FAILPOINT' = 137, 'SYSTEM' = 138, 'dictGet' = 139, 'displaySecretsInShowAndSelect' = 140, 'addressToLine' = 141, 'addressToLineWithInlines' = 142, 'addressToSymbol' = 143, 'demangle' = 144, 'INTROSPECTION' = 145, 'FILE' = 146, 'URL' = 147, 'REMOTE' = 148, 'MONGO' = 149, 'REDIS' = 150, 'MEILISEARCH' = 151, 'MYSQL' = 152, 'POSTGRES' = 153, 'SQLITE' = 154, 'ODBC' = 155, 'JDBC' = 156, 'HDFS' = 157, 'S3' = 158, 'HIVE' = 159, 'AZURE' = 160, 'SOURCES' = 161, 'CLUSTER' = 162, 'ALL' = 163, 'NONE' = 164)) ) ENGINE = SystemPrivileges COMMENT 'SYSTEM TABLE is built on the fly.' From e8718e04cb2cfed00365f6e75c2c4e5bf2baa925 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 6 Jul 2023 05:58:05 +0300 Subject: [PATCH 103/179] Update --- src/Common/SymbolIndex.cpp | 7 ------- src/Common/SymbolIndex.h | 1 - 2 files changed, 8 deletions(-) diff --git a/src/Common/SymbolIndex.cpp b/src/Common/SymbolIndex.cpp index b4ae16670d83..4c7f3827125c 100644 --- a/src/Common/SymbolIndex.cpp +++ b/src/Common/SymbolIndex.cpp @@ -560,13 +560,6 @@ MultiVersion::Version SymbolIndex::instance() return instanceImpl().get(); } -void SymbolIndex::reload() -{ - instanceImpl().set(std::unique_ptr(new SymbolIndex)); - /// Also drop stacktrace cache. - StackTrace::dropCache(); -} - } #endif diff --git a/src/Common/SymbolIndex.h b/src/Common/SymbolIndex.h index 471623319468..773f59b7914a 100644 --- a/src/Common/SymbolIndex.h +++ b/src/Common/SymbolIndex.h @@ -24,7 +24,6 @@ class SymbolIndex : private boost::noncopyable public: static MultiVersion::Version instance(); - static void reload(); struct Symbol { From d8e87f6c1df5c113fdd6026466caf8fccebd5150 Mon Sep 17 00:00:00 2001 From: Amos Bird Date: Fri, 30 Jun 2023 14:48:20 +0800 Subject: [PATCH 104/179] Make common macros extendable --- src/Common/CurrentMetrics.cpp | 10 ++++++++-- src/Common/ErrorCodes.cpp | 8 +++++++- src/Common/ProfileEvents.cpp | 7 ++++++- src/Common/StatusInfo.cpp | 7 ++++++- 4 files changed, 27 insertions(+), 5 deletions(-) diff --git a/src/Common/CurrentMetrics.cpp b/src/Common/CurrentMetrics.cpp index 105a7c0548f8..4f0d55a9cb66 100644 --- a/src/Common/CurrentMetrics.cpp +++ b/src/Common/CurrentMetrics.cpp @@ -2,7 +2,7 @@ /// Available metrics. Add something here as you wish. -#define APPLY_FOR_METRICS(M) \ +#define APPLY_FOR_BUILTIN_METRICS(M) \ M(Query, "Number of executing queries") \ M(Merge, "Number of executing background merges") \ M(Move, "Number of currently executing moves") \ @@ -200,7 +200,13 @@ M(MergeTreeReadTaskRequestsSent, "The current number of callback requests in flight from the remote server back to the initiator server to choose the read task (for MergeTree tables). Measured on the remote server side.") \ M(MergeTreeAllRangesAnnouncementsSent, "The current number of announcement being sent in flight from the remote server to the initiator server about the set of data parts (for MergeTree tables). Measured on the remote server side.") \ M(CreatedTimersInQueryProfiler, "Number of Created thread local timers in QueryProfiler") \ - M(ActiveTimersInQueryProfiler, "Number of Active thread local timers in QueryProfiler") + M(ActiveTimersInQueryProfiler, "Number of Active thread local timers in QueryProfiler") \ + +#ifdef APPLY_FOR_EXTERNAL_METRICS + #define APPLY_FOR_METRICS(M) APPLY_FOR_BUILTIN_METRICS(M) APPLY_FOR_EXTERNAL_METRICS(M) +#else + #define APPLY_FOR_METRICS(M) APPLY_FOR_BUILTIN_METRICS(M) +#endif namespace CurrentMetrics { diff --git a/src/Common/ErrorCodes.cpp b/src/Common/ErrorCodes.cpp index 4c08d762df2d..87619cdafad3 100644 --- a/src/Common/ErrorCodes.cpp +++ b/src/Common/ErrorCodes.cpp @@ -13,7 +13,7 @@ * - system.errors table */ -#define APPLY_FOR_ERROR_CODES(M) \ +#define APPLY_FOR_BUILTIN_ERROR_CODES(M) \ M(0, OK) \ M(1, UNSUPPORTED_METHOD) \ M(2, UNSUPPORTED_PARAMETER) \ @@ -589,6 +589,12 @@ M(1002, UNKNOWN_EXCEPTION) \ /* See END */ +#ifdef APPLY_FOR_EXTERNAL_ERROR_CODES + #define APPLY_FOR_ERROR_CODES(M) APPLY_FOR_BUILTIN_ERROR_CODES(M) APPLY_FOR_EXTERNAL_ERROR_CODES(M) +#else + #define APPLY_FOR_ERROR_CODES(M) APPLY_FOR_BUILTIN_ERROR_CODES(M) +#endif + namespace DB { namespace ErrorCodes diff --git a/src/Common/ProfileEvents.cpp b/src/Common/ProfileEvents.cpp index 0838e0366df1..ecec11798752 100644 --- a/src/Common/ProfileEvents.cpp +++ b/src/Common/ProfileEvents.cpp @@ -4,7 +4,7 @@ /// Available events. Add something here as you wish. -#define APPLY_FOR_EVENTS(M) \ +#define APPLY_FOR_BUILTIN_EVENTS(M) \ M(Query, "Number of queries to be interpreted and potentially executed. Does not include queries that failed to parse or were rejected due to AST size limits, quota limits or limits on the number of simultaneously running queries. May include internal queries initiated by ClickHouse itself. Does not count subqueries.") \ M(SelectQuery, "Same as Query, but only for SELECT queries.") \ M(InsertQuery, "Same as Query, but only for INSERT queries.") \ @@ -536,6 +536,11 @@ The server successfully detected this situation and will download merged part fr M(LogError, "Number of log messages with level Error") \ M(LogFatal, "Number of log messages with level Fatal") \ +#ifdef APPLY_FOR_EXTERNAL_EVENTS + #define APPLY_FOR_EVENTS(M) APPLY_FOR_BUILTIN_EVENTS(M) APPLY_FOR_EXTERNAL_EVENTS(M) +#else + #define APPLY_FOR_EVENTS(M) APPLY_FOR_BUILTIN_EVENTS(M) +#endif namespace ProfileEvents { diff --git a/src/Common/StatusInfo.cpp b/src/Common/StatusInfo.cpp index 1f9ddfaf4b9e..07828cd0aaf0 100644 --- a/src/Common/StatusInfo.cpp +++ b/src/Common/StatusInfo.cpp @@ -2,9 +2,14 @@ #include /// Available status. Add something here as you wish. -#define APPLY_FOR_STATUS(M) \ +#define APPLY_FOR_BUILTIN_STATUS(M) \ M(DictionaryStatus, "Dictionary Status.", DB::getStatusEnumAllPossibleValues()) \ +#ifdef APPLY_FOR_EXTERNAL_STATUS + #define APPLY_FOR_STATUS(M) APPLY_FOR_BUILTIN_STATUS(M) APPLY_FOR_EXTERNAL_STATUS(M) +#else + #define APPLY_FOR_STATUS(M) APPLY_FOR_BUILTIN_STATUS(M) +#endif namespace CurrentStatusInfo { From 06553452ed1135a74f00ba9bb177e7c57954ea77 Mon Sep 17 00:00:00 2001 From: Amos Bird Date: Fri, 30 Jun 2023 22:33:26 +0800 Subject: [PATCH 105/179] StatusInfo will be deprecated --- src/Common/StatusInfo.cpp | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/src/Common/StatusInfo.cpp b/src/Common/StatusInfo.cpp index 07828cd0aaf0..1f9ddfaf4b9e 100644 --- a/src/Common/StatusInfo.cpp +++ b/src/Common/StatusInfo.cpp @@ -2,14 +2,9 @@ #include /// Available status. Add something here as you wish. -#define APPLY_FOR_BUILTIN_STATUS(M) \ +#define APPLY_FOR_STATUS(M) \ M(DictionaryStatus, "Dictionary Status.", DB::getStatusEnumAllPossibleValues()) \ -#ifdef APPLY_FOR_EXTERNAL_STATUS - #define APPLY_FOR_STATUS(M) APPLY_FOR_BUILTIN_STATUS(M) APPLY_FOR_EXTERNAL_STATUS(M) -#else - #define APPLY_FOR_STATUS(M) APPLY_FOR_BUILTIN_STATUS(M) -#endif namespace CurrentStatusInfo { From 5af28315e233561b196a1e05d5bb2d185288c747 Mon Sep 17 00:00:00 2001 From: Amos Bird Date: Fri, 30 Jun 2023 22:34:47 +0800 Subject: [PATCH 106/179] Try to fix style issues --- src/Common/CurrentMetrics.cpp | 4 ++-- src/Common/ErrorCodes.cpp | 4 ++-- src/Common/ProfileEvents.cpp | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/Common/CurrentMetrics.cpp b/src/Common/CurrentMetrics.cpp index 4f0d55a9cb66..8b88555d78ae 100644 --- a/src/Common/CurrentMetrics.cpp +++ b/src/Common/CurrentMetrics.cpp @@ -203,9 +203,9 @@ M(ActiveTimersInQueryProfiler, "Number of Active thread local timers in QueryProfiler") \ #ifdef APPLY_FOR_EXTERNAL_METRICS - #define APPLY_FOR_METRICS(M) APPLY_FOR_BUILTIN_METRICS(M) APPLY_FOR_EXTERNAL_METRICS(M) + #define APPLY_FOR_METRICS(M) APPLY_FOR_BUILTIN_METRICS(M) APPLY_FOR_EXTERNAL_METRICS(M) #else - #define APPLY_FOR_METRICS(M) APPLY_FOR_BUILTIN_METRICS(M) + #define APPLY_FOR_METRICS(M) APPLY_FOR_BUILTIN_METRICS(M) #endif namespace CurrentMetrics diff --git a/src/Common/ErrorCodes.cpp b/src/Common/ErrorCodes.cpp index 87619cdafad3..ae8d5f8796d8 100644 --- a/src/Common/ErrorCodes.cpp +++ b/src/Common/ErrorCodes.cpp @@ -590,9 +590,9 @@ /* See END */ #ifdef APPLY_FOR_EXTERNAL_ERROR_CODES - #define APPLY_FOR_ERROR_CODES(M) APPLY_FOR_BUILTIN_ERROR_CODES(M) APPLY_FOR_EXTERNAL_ERROR_CODES(M) + #define APPLY_FOR_ERROR_CODES(M) APPLY_FOR_BUILTIN_ERROR_CODES(M) APPLY_FOR_EXTERNAL_ERROR_CODES(M) #else - #define APPLY_FOR_ERROR_CODES(M) APPLY_FOR_BUILTIN_ERROR_CODES(M) + #define APPLY_FOR_ERROR_CODES(M) APPLY_FOR_BUILTIN_ERROR_CODES(M) #endif namespace DB diff --git a/src/Common/ProfileEvents.cpp b/src/Common/ProfileEvents.cpp index ecec11798752..c8570b7921b0 100644 --- a/src/Common/ProfileEvents.cpp +++ b/src/Common/ProfileEvents.cpp @@ -537,9 +537,9 @@ The server successfully detected this situation and will download merged part fr M(LogFatal, "Number of log messages with level Fatal") \ #ifdef APPLY_FOR_EXTERNAL_EVENTS - #define APPLY_FOR_EVENTS(M) APPLY_FOR_BUILTIN_EVENTS(M) APPLY_FOR_EXTERNAL_EVENTS(M) + #define APPLY_FOR_EVENTS(M) APPLY_FOR_BUILTIN_EVENTS(M) APPLY_FOR_EXTERNAL_EVENTS(M) #else - #define APPLY_FOR_EVENTS(M) APPLY_FOR_BUILTIN_EVENTS(M) + #define APPLY_FOR_EVENTS(M) APPLY_FOR_BUILTIN_EVENTS(M) #endif namespace ProfileEvents From 24b5c9c204dcc0f3c181d13528d46d012dae86c9 Mon Sep 17 00:00:00 2001 From: Dmitry Kardymon Date: Thu, 6 Jul 2023 06:05:43 +0000 Subject: [PATCH 107/179] Use one setting input_format_csv_allow_variable_number_of_colums and code in RowInput --- docs/en/interfaces/formats.md | 3 +- .../operations/settings/settings-formats.md | 10 +--- docs/ru/interfaces/formats.md | 3 +- docs/ru/operations/settings/settings.md | 10 +--- src/Core/Settings.h | 3 +- src/Formats/FormatFactory.cpp | 3 +- src/Formats/FormatSettings.h | 3 +- .../Formats/Impl/CSVRowInputFormat.cpp | 56 ++++++------------- .../Formats/Impl/CSVRowInputFormat.h | 6 +- .../RowInputFormatWithNamesAndTypes.cpp | 23 ++++++++ .../Formats/RowInputFormatWithNamesAndTypes.h | 4 ++ 11 files changed, 57 insertions(+), 67 deletions(-) diff --git a/docs/en/interfaces/formats.md b/docs/en/interfaces/formats.md index 79790cef5b22..34f9abb91d43 100644 --- a/docs/en/interfaces/formats.md +++ b/docs/en/interfaces/formats.md @@ -471,8 +471,7 @@ The CSV format supports the output of totals and extremes the same way as `TabSe - [input_format_csv_skip_trailing_empty_lines](/docs/en/operations/settings/settings-formats.md/#input_format_csv_skip_trailing_empty_lines) - skip trailing empty lines at the end of data. Default value - `false`. - [input_format_csv_trim_whitespaces](/docs/en/operations/settings/settings-formats.md/#input_format_csv_trim_whitespaces) - trim spaces and tabs in non-quoted CSV strings. Default value - `true`. - [input_format_csv_allow_whitespace_or_tab_as_delimiter](/docs/en/operations/settings/settings-formats.md/# input_format_csv_allow_whitespace_or_tab_as_delimiter) - Allow to use whitespace or tab as field delimiter in CSV strings. Default value - `false`. -- [input_format_csv_ignore_extra_columns](/docs/en/operations/settings/settings-formats.md/#input_format_csv_ignore_extra_columns) - ignore extra columns in CSV input (if file has more columns than expected). Default value - `false`. -- [input_format_csv_missing_as_default](/docs/en/operations/settings/settings-formats.md/#input_format_csv_missing_as_default) - treat missing fields in CSV input as default values. Default value - `false`. +- [input_format_csv_allow_variable_number_of_colums](/docs/en/operations/settings/settings-formats.md/#input_format_csv_allow_variable_number_of_colums) - ignore extra columns in CSV input (if file has more columns than expected) and treat missing fields in CSV input as default values. Default value - `false`. ## CSVWithNames {#csvwithnames} diff --git a/docs/en/operations/settings/settings-formats.md b/docs/en/operations/settings/settings-formats.md index 6b05f41666cc..43e410ceee8d 100644 --- a/docs/en/operations/settings/settings-formats.md +++ b/docs/en/operations/settings/settings-formats.md @@ -931,15 +931,9 @@ Result ```text " string " ``` -### input_format_csv_ignore_extra_columns {#input_format_csv_ignore_extra_columns} +### input_format_csv_allow_variable_number_of_colums {#input_format_csv_allow_variable_number_of_colums} -Ignore extra columns in CSV input (if file has more columns than expected). - -Disabled by default. - -### input_format_csv_missing_as_default {#input_format_csv_missing_as_default} - -Treat missing fields in CSV input as default values. +ignore extra columns in CSV input (if file has more columns than expected) and treat missing fields in CSV input as default values. Disabled by default. diff --git a/docs/ru/interfaces/formats.md b/docs/ru/interfaces/formats.md index 7e3bb3f7d266..e7c57fff7495 100644 --- a/docs/ru/interfaces/formats.md +++ b/docs/ru/interfaces/formats.md @@ -402,8 +402,7 @@ $ clickhouse-client --format_csv_delimiter="|" --query="INSERT INTO test.csv FOR - [input_format_csv_skip_first_lines](../operations/settings/settings.md#input_format_csv_skip_first_lines) - пропустить указанное количество строк в начале данных. Значение по умолчанию - `0`. - [input_format_csv_detect_header](../operations/settings/settings.md#input_format_csv_detect_header) - обнаружить заголовок с именами и типами в формате CSV. Значение по умолчанию - `true`. - [input_format_csv_trim_whitespaces](../operations/settings/settings.md#input_format_csv_trim_whitespaces) - удалить пробелы и символы табуляции из строк без кавычек. Значение по умолчанию - `true`. -- [input_format_csv_ignore_extra_columns](../operations/settings/settings.md/#input_format_csv_ignore_extra_columns) - игнорировать дополнительные столбцы (если файл содержит больше столбцов чем ожидается). Значение по умолчанию - `false`. -- [input_format_csv_missing_as_default](../operations/settings/settings.md/#input_format_csv_missing_as_default) - рассматривать отсутствующие поля в CSV в качестве значений по умолчанию. Значение по умолчанию - `false`. +- [input_format_csv_allow_variable_number_of_colums](../operations/settings/settings.md/#input_format_csv_allow_variable_number_of_colums) - игнорировать дополнительные столбцы (если файл содержит больше столбцов чем ожидается) и рассматривать отсутствующие поля в CSV в качестве значений по умолчанию. Значение по умолчанию - `false`. ## CSVWithNames {#csvwithnames} diff --git a/docs/ru/operations/settings/settings.md b/docs/ru/operations/settings/settings.md index e679ce6abe1a..ddc101c69913 100644 --- a/docs/ru/operations/settings/settings.md +++ b/docs/ru/operations/settings/settings.md @@ -1727,15 +1727,9 @@ echo ' string ' | ./clickhouse local -q "select * from table FORMAT CSV" --in " string " ``` -## input_format_csv_ignore_extra_columns {#input_format_csv_ignore_extra_columns} +## input_format_csv_allow_variable_number_of_colums {#input_format_csv_allow_variable_number_of_colums} -Игнорировать дополнительные столбцы (если файл содержит больше столбцов чем ожидается). - -Выключено по умолчанию. - -## input_format_csv_missing_as_default {#input_format_csv_missing_as_default} - -Рассматривать отсутствующие поля в CSV в качестве значений по умолчанию. +Игнорировать дополнительные столбцы (если файл содержит больше столбцов чем ожидается) и рассматривать отсутствующие поля в CSV в качестве значений по умолчанию. Выключено по умолчанию. diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 044b3c34dc23..df2a916b7cf0 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -1009,8 +1009,7 @@ class IColumn; M(Bool, regexp_dict_allow_hyperscan, true, "Allow regexp_tree dictionary using Hyperscan library.", 0) \ \ M(Bool, dictionary_use_async_executor, false, "Execute a pipeline for reading from a dictionary with several threads. It's supported only by DIRECT dictionary with CLICKHOUSE source.", 0) \ - M(Bool, input_format_csv_ignore_extra_columns, false, "Ignore extra columns in CSV input (if file has more columns than expected)", 0) \ - M(Bool, input_format_csv_missing_as_default, false, "Treat missing fields in CSV input as default values", 0) \ + M(Bool, input_format_csv_allow_variable_number_of_colums, false, "Ignore extra columns in CSV input (if file has more columns than expected) and treat missing fields in CSV input as default values", 0) \ // End of FORMAT_FACTORY_SETTINGS // Please add settings non-related to formats into the COMMON_SETTINGS above. diff --git a/src/Formats/FormatFactory.cpp b/src/Formats/FormatFactory.cpp index 04b095a92d6e..af9823dde73f 100644 --- a/src/Formats/FormatFactory.cpp +++ b/src/Formats/FormatFactory.cpp @@ -72,8 +72,7 @@ FormatSettings getFormatSettings(ContextPtr context, const Settings & settings) format_settings.csv.skip_trailing_empty_lines = settings.input_format_csv_skip_trailing_empty_lines; format_settings.csv.trim_whitespaces = settings.input_format_csv_trim_whitespaces; format_settings.csv.allow_whitespace_or_tab_as_delimiter = settings.input_format_csv_allow_whitespace_or_tab_as_delimiter; - format_settings.csv.ignore_extra_columns = settings.input_format_csv_ignore_extra_columns; - format_settings.csv.missing_as_default = settings.input_format_csv_missing_as_default; + format_settings.csv.allow_variable_number_of_colums = settings.input_format_csv_allow_variable_number_of_colums; format_settings.hive_text.fields_delimiter = settings.input_format_hive_text_fields_delimiter; format_settings.hive_text.collection_items_delimiter = settings.input_format_hive_text_collection_items_delimiter; format_settings.hive_text.map_keys_delimiter = settings.input_format_hive_text_map_keys_delimiter; diff --git a/src/Formats/FormatSettings.h b/src/Formats/FormatSettings.h index 4bdc9077a0bb..653578f84962 100644 --- a/src/Formats/FormatSettings.h +++ b/src/Formats/FormatSettings.h @@ -140,8 +140,7 @@ struct FormatSettings bool skip_trailing_empty_lines = false; bool trim_whitespaces = true; bool allow_whitespace_or_tab_as_delimiter = false; - bool ignore_extra_columns = false; - bool missing_as_default = false; + bool allow_variable_number_of_colums = false; } csv; struct HiveText diff --git a/src/Processors/Formats/Impl/CSVRowInputFormat.cpp b/src/Processors/Formats/Impl/CSVRowInputFormat.cpp index 9731b4ba4650..57e05ae7cd3d 100644 --- a/src/Processors/Formats/Impl/CSVRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/CSVRowInputFormat.cpp @@ -155,18 +155,7 @@ CSVFormatReader::CSVFormatReader(PeekableReadBuffer & buf_, const FormatSettings void CSVFormatReader::skipFieldDelimiter() { skipWhitespacesAndTabs(*buf, format_settings.csv.allow_whitespace_or_tab_as_delimiter); - - bool res = checkChar(format_settings.csv.delimiter, *buf); - if (res) - return; - - if (!format_settings.csv.missing_as_default) - { - char err[2] = {format_settings.csv.delimiter, '\0'}; - throwAtAssertionFailed(err, *buf); - } - else - current_row_has_missing_fields = true; + assertChar(format_settings.csv.delimiter, *buf); } template @@ -206,7 +195,6 @@ void CSVFormatReader::skipRowEndDelimiter() return; skipEndOfLine(*buf); - current_row_has_missing_fields = false; } void CSVFormatReader::skipHeaderRow() @@ -295,6 +283,11 @@ bool CSVFormatReader::parseRowEndWithDiagnosticInfo(WriteBuffer & out) return true; } +bool CSVFormatReader::allowVariableNumberOfColumns() +{ + return format_settings.csv.allow_variable_number_of_colums; +} + bool CSVFormatReader::readField( IColumn & column, const DataTypePtr & type, @@ -308,8 +301,6 @@ bool CSVFormatReader::readField( const bool at_delimiter = !buf->eof() && *buf->position() == format_settings.csv.delimiter; const bool at_last_column_line_end = is_last_file_column && (buf->eof() || *buf->position() == '\n' || *buf->position() == '\r'); - bool res = false; - /// Note: Tuples are serialized in CSV as separate columns, but with empty_as_default or null_as_default /// only one empty or NULL column will be expected if (format_settings.csv.empty_as_default && (at_delimiter || at_last_column_line_end)) @@ -321,34 +312,18 @@ bool CSVFormatReader::readField( /// they do not contain empty unquoted fields, so this check /// works for tuples as well. column.insertDefault(); + return false; } - else if (current_row_has_missing_fields) - { - column.insertDefault(); - } - else if (format_settings.null_as_default && !isNullableOrLowCardinalityNullable(type)) + + if (format_settings.null_as_default && !isNullableOrLowCardinalityNullable(type)) { /// If value is null but type is not nullable then use default value instead. - res = SerializationNullable::deserializeTextCSVImpl(column, *buf, format_settings, serialization); - } - else - { - /// Read the column normally. - serialization->deserializeTextCSV(column, *buf, format_settings); - res = true; + return SerializationNullable::deserializeTextCSVImpl(column, *buf, format_settings, serialization); } - if (is_last_file_column && format_settings.csv.ignore_extra_columns) - { - // Skip all fields to next line. - skipWhitespacesAndTabs(*buf, format_settings.csv.allow_whitespace_or_tab_as_delimiter); - while (checkChar(format_settings.csv.delimiter, *buf)) - { - skipField(); - skipWhitespacesAndTabs(*buf, format_settings.csv.allow_whitespace_or_tab_as_delimiter); - } - } - return res; + /// Read the column normally. + serialization->deserializeTextCSV(column, *buf, format_settings); + return true; } void CSVFormatReader::skipPrefixBeforeHeader() @@ -377,6 +352,11 @@ bool CSVFormatReader::checkForSuffix() return false; } +bool CSVFormatReader::checkForEndOfRow() +{ + return buf->eof() || *buf->position() == '\n' || *buf->position() == '\r'; +} + CSVSchemaReader::CSVSchemaReader(ReadBuffer & in_, bool with_names_, bool with_types_, const FormatSettings & format_settings_) : FormatWithNamesAndTypesSchemaReader( buf, diff --git a/src/Processors/Formats/Impl/CSVRowInputFormat.h b/src/Processors/Formats/Impl/CSVRowInputFormat.h index 82e03c453e78..8ccf04feed37 100644 --- a/src/Processors/Formats/Impl/CSVRowInputFormat.h +++ b/src/Processors/Formats/Impl/CSVRowInputFormat.h @@ -69,6 +69,9 @@ class CSVFormatReader : public FormatWithNamesAndTypesReader void skipRowEndDelimiter() override; void skipPrefixBeforeHeader() override; + bool checkForEndOfRow() override; + bool allowVariableNumberOfColumns() override; + std::vector readNames() override { return readHeaderRow(); } std::vector readTypes() override { return readHeaderRow(); } std::vector readHeaderRow() { return readRowImpl(); } @@ -89,9 +92,6 @@ class CSVFormatReader : public FormatWithNamesAndTypesReader protected: PeekableReadBuffer * buf; - -private: - bool current_row_has_missing_fields = false; }; class CSVSchemaReader : public FormatWithNamesAndTypesSchemaReader diff --git a/src/Processors/Formats/RowInputFormatWithNamesAndTypes.cpp b/src/Processors/Formats/RowInputFormatWithNamesAndTypes.cpp index eaedbbb4a1e5..fb49779e0af5 100644 --- a/src/Processors/Formats/RowInputFormatWithNamesAndTypes.cpp +++ b/src/Processors/Formats/RowInputFormatWithNamesAndTypes.cpp @@ -227,7 +227,30 @@ bool RowInputFormatWithNamesAndTypes::readRow(MutableColumns & columns, RowReadE format_reader->skipField(file_column); if (!is_last_file_column) + { + if (format_reader->allowVariableNumberOfColumns() && format_reader->checkForEndOfRow()) + { + ++file_column; + while (file_column < column_mapping->column_indexes_for_input_fields.size()) + { + const auto & rem_column_index = column_mapping->column_indexes_for_input_fields[file_column]; + columns[*rem_column_index]->insertDefault(); + ++file_column; + } + } + else + format_reader->skipFieldDelimiter(); + } + } + + if (format_reader->allowVariableNumberOfColumns() && !format_reader->checkForEndOfRow()) + { + do + { format_reader->skipFieldDelimiter(); + format_reader->skipField(1); + } + while (!format_reader->checkForEndOfRow()); } format_reader->skipRowEndDelimiter(); diff --git a/src/Processors/Formats/RowInputFormatWithNamesAndTypes.h b/src/Processors/Formats/RowInputFormatWithNamesAndTypes.h index 5648acd392dd..b5103d3db396 100644 --- a/src/Processors/Formats/RowInputFormatWithNamesAndTypes.h +++ b/src/Processors/Formats/RowInputFormatWithNamesAndTypes.h @@ -119,6 +119,10 @@ class FormatWithNamesAndTypesReader /// Check suffix. virtual bool checkForSuffix() { return in->eof(); } + virtual bool checkForEndOfRow() { throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method checkForEndOfRow is not implemented"); } + + virtual bool allowVariableNumberOfColumns() { return false; } + const FormatSettings & getFormatSettings() const { return format_settings; } virtual void setReadBuffer(ReadBuffer & in_) { in = &in_; } From 1336a9ec6770ae709f956a019c778346b8475162 Mon Sep 17 00:00:00 2001 From: Alexander Gololobov <440544+davenger@users.noreply.github.com> Date: Thu, 6 Jul 2023 09:09:55 +0200 Subject: [PATCH 108/179] Better naming --- .../operations/system-tables/jemalloc_bins.md | 30 +++++++++---------- src/Storages/System/StorageSystemJemalloc.cpp | 4 +-- .../02810_system_jemalloc_bins.sql | 4 +-- 3 files changed, 19 insertions(+), 19 deletions(-) diff --git a/docs/en/operations/system-tables/jemalloc_bins.md b/docs/en/operations/system-tables/jemalloc_bins.md index dfe2ddb01e2e..06d9ba57dfc3 100644 --- a/docs/en/operations/system-tables/jemalloc_bins.md +++ b/docs/en/operations/system-tables/jemalloc_bins.md @@ -11,8 +11,8 @@ Columns: - `index` (UInt64) — Index of the bin ordered by size - `large` (Bool) — True for large allocations and False for small - `size` (UInt64) — Size of allocations in this bin -- `nmalloc` (UInt64) — Number of allocations -- `ndalloc` (UInt64) — Number of deallocations +- `allocations` (UInt64) — Number of allocations +- `deallocations` (UInt64) — Number of deallocations **Example** @@ -21,7 +21,7 @@ Find the sizes of allocations that contributed the most to the current overall m ``` sql SELECT *, - nmalloc - ndalloc AS active_allocations, + allocations - deallocations AS active_allocations, size * active_allocations AS allocated_bytes FROM system.jemalloc_bins WHERE allocated_bytes > 0 @@ -30,16 +30,16 @@ LIMIT 10 ``` ``` text -┌─index─┬─large─┬─────size─┬──nmalloc─┬──ndalloc─┬─active_allocations─┬─allocated_bytes─┐ -│ 82 │ 1 │ 50331648 │ 1 │ 0 │ 1 │ 50331648 │ -│ 10 │ 0 │ 192 │ 512336 │ 370710 │ 141626 │ 27192192 │ -│ 69 │ 1 │ 5242880 │ 6 │ 2 │ 4 │ 20971520 │ -│ 3 │ 0 │ 48 │ 16938224 │ 16559484 │ 378740 │ 18179520 │ -│ 28 │ 0 │ 4096 │ 122924 │ 119142 │ 3782 │ 15491072 │ -│ 61 │ 1 │ 1310720 │ 44569 │ 44558 │ 11 │ 14417920 │ -│ 39 │ 1 │ 28672 │ 1285 │ 913 │ 372 │ 10665984 │ -│ 4 │ 0 │ 64 │ 2837225 │ 2680568 │ 156657 │ 10026048 │ -│ 6 │ 0 │ 96 │ 2617803 │ 2531435 │ 86368 │ 8291328 │ -│ 36 │ 1 │ 16384 │ 22431 │ 21970 │ 461 │ 7553024 │ -└───────┴───────┴──────────┴──────────┴──────────┴────────────────────┴─────────────────┘ +┌─index─┬─large─┬─────size─┬─allocactions─┬─deallocations─┬─active_allocations─┬─allocated_bytes─┐ +│ 82 │ 1 │ 50331648 │ 1 │ 0 │ 1 │ 50331648 │ +│ 10 │ 0 │ 192 │ 512336 │ 370710 │ 141626 │ 27192192 │ +│ 69 │ 1 │ 5242880 │ 6 │ 2 │ 4 │ 20971520 │ +│ 3 │ 0 │ 48 │ 16938224 │ 16559484 │ 378740 │ 18179520 │ +│ 28 │ 0 │ 4096 │ 122924 │ 119142 │ 3782 │ 15491072 │ +│ 61 │ 1 │ 1310720 │ 44569 │ 44558 │ 11 │ 14417920 │ +│ 39 │ 1 │ 28672 │ 1285 │ 913 │ 372 │ 10665984 │ +│ 4 │ 0 │ 64 │ 2837225 │ 2680568 │ 156657 │ 10026048 │ +│ 6 │ 0 │ 96 │ 2617803 │ 2531435 │ 86368 │ 8291328 │ +│ 36 │ 1 │ 16384 │ 22431 │ 21970 │ 461 │ 7553024 │ +└───────┴───────┴──────────┴──────────────┴───────────────┴────────────────────┴─────────────────┘ ``` diff --git a/src/Storages/System/StorageSystemJemalloc.cpp b/src/Storages/System/StorageSystemJemalloc.cpp index 2cb666eb5c38..4348349ebbc6 100644 --- a/src/Storages/System/StorageSystemJemalloc.cpp +++ b/src/Storages/System/StorageSystemJemalloc.cpp @@ -95,8 +95,8 @@ NamesAndTypesList StorageSystemJemallocBins::getNamesAndTypes() { "index", std::make_shared() }, { "large", std::make_shared() }, { "size", std::make_shared() }, - { "nmalloc", std::make_shared() }, - { "ndalloc", std::make_shared() }, + { "allocations", std::make_shared() }, + { "deallocations", std::make_shared() }, }; } diff --git a/tests/queries/0_stateless/02810_system_jemalloc_bins.sql b/tests/queries/0_stateless/02810_system_jemalloc_bins.sql index 8ecf47e51b5a..03062e70aa3a 100644 --- a/tests/queries/0_stateless/02810_system_jemalloc_bins.sql +++ b/tests/queries/0_stateless/02810_system_jemalloc_bins.sql @@ -3,8 +3,8 @@ WITH (SELECT count() FROM system.jemalloc_bins) AS total_bins, (SELECT count() FROM system.jemalloc_bins WHERE large) AS large_bins, (SELECT count() FROM system.jemalloc_bins WHERE NOT large) AS small_bins, - (SELECT sum(size * (nmalloc - ndalloc)) FROM system.jemalloc_bins WHERE large) AS large_allocated_bytes, - (SELECT sum(size * (nmalloc - ndalloc)) FROM system.jemalloc_bins WHERE NOT large) AS small_allocated_bytes + (SELECT sum(size * (allocations - deallocations)) FROM system.jemalloc_bins WHERE large) AS large_allocated_bytes, + (SELECT sum(size * (allocations - deallocations)) FROM system.jemalloc_bins WHERE NOT large) AS small_allocated_bytes SELECT (total_bins > 0) = jemalloc_enabled, (large_bins > 0) = jemalloc_enabled, From 32f5a7830229b53df80f9e788b860066a4a86947 Mon Sep 17 00:00:00 2001 From: Dmitry Kardymon Date: Thu, 6 Jul 2023 07:32:46 +0000 Subject: [PATCH 109/179] Fix setting name --- docs/en/interfaces/formats.md | 2 +- docs/en/operations/settings/settings-formats.md | 2 +- docs/ru/interfaces/formats.md | 2 +- docs/ru/operations/settings/settings.md | 2 +- src/Core/Settings.h | 2 +- src/Formats/FormatFactory.cpp | 2 +- src/Formats/FormatSettings.h | 2 +- src/Processors/Formats/Impl/CSVRowInputFormat.cpp | 2 +- tests/queries/0_stateless/00301_csv.reference | 4 ++-- tests/queries/0_stateless/00301_csv.sh | 8 ++++---- 10 files changed, 14 insertions(+), 14 deletions(-) diff --git a/docs/en/interfaces/formats.md b/docs/en/interfaces/formats.md index 34f9abb91d43..ed2f010a6325 100644 --- a/docs/en/interfaces/formats.md +++ b/docs/en/interfaces/formats.md @@ -471,7 +471,7 @@ The CSV format supports the output of totals and extremes the same way as `TabSe - [input_format_csv_skip_trailing_empty_lines](/docs/en/operations/settings/settings-formats.md/#input_format_csv_skip_trailing_empty_lines) - skip trailing empty lines at the end of data. Default value - `false`. - [input_format_csv_trim_whitespaces](/docs/en/operations/settings/settings-formats.md/#input_format_csv_trim_whitespaces) - trim spaces and tabs in non-quoted CSV strings. Default value - `true`. - [input_format_csv_allow_whitespace_or_tab_as_delimiter](/docs/en/operations/settings/settings-formats.md/# input_format_csv_allow_whitespace_or_tab_as_delimiter) - Allow to use whitespace or tab as field delimiter in CSV strings. Default value - `false`. -- [input_format_csv_allow_variable_number_of_colums](/docs/en/operations/settings/settings-formats.md/#input_format_csv_allow_variable_number_of_colums) - ignore extra columns in CSV input (if file has more columns than expected) and treat missing fields in CSV input as default values. Default value - `false`. +- [input_format_csv_allow_variable_number_of_columns](/docs/en/operations/settings/settings-formats.md/#input_format_csv_allow_variable_number_of_columns) - ignore extra columns in CSV input (if file has more columns than expected) and treat missing fields in CSV input as default values. Default value - `false`. ## CSVWithNames {#csvwithnames} diff --git a/docs/en/operations/settings/settings-formats.md b/docs/en/operations/settings/settings-formats.md index 43e410ceee8d..3eea5ef4ad99 100644 --- a/docs/en/operations/settings/settings-formats.md +++ b/docs/en/operations/settings/settings-formats.md @@ -931,7 +931,7 @@ Result ```text " string " ``` -### input_format_csv_allow_variable_number_of_colums {#input_format_csv_allow_variable_number_of_colums} +### input_format_csv_allow_variable_number_of_columns {#input_format_csv_allow_variable_number_of_columns} ignore extra columns in CSV input (if file has more columns than expected) and treat missing fields in CSV input as default values. diff --git a/docs/ru/interfaces/formats.md b/docs/ru/interfaces/formats.md index e7c57fff7495..e232b63f0496 100644 --- a/docs/ru/interfaces/formats.md +++ b/docs/ru/interfaces/formats.md @@ -402,7 +402,7 @@ $ clickhouse-client --format_csv_delimiter="|" --query="INSERT INTO test.csv FOR - [input_format_csv_skip_first_lines](../operations/settings/settings.md#input_format_csv_skip_first_lines) - пропустить указанное количество строк в начале данных. Значение по умолчанию - `0`. - [input_format_csv_detect_header](../operations/settings/settings.md#input_format_csv_detect_header) - обнаружить заголовок с именами и типами в формате CSV. Значение по умолчанию - `true`. - [input_format_csv_trim_whitespaces](../operations/settings/settings.md#input_format_csv_trim_whitespaces) - удалить пробелы и символы табуляции из строк без кавычек. Значение по умолчанию - `true`. -- [input_format_csv_allow_variable_number_of_colums](../operations/settings/settings.md/#input_format_csv_allow_variable_number_of_colums) - игнорировать дополнительные столбцы (если файл содержит больше столбцов чем ожидается) и рассматривать отсутствующие поля в CSV в качестве значений по умолчанию. Значение по умолчанию - `false`. +- [input_format_csv_allow_variable_number_of_columns](../operations/settings/settings.md/#input_format_csv_allow_variable_number_of_columns) - игнорировать дополнительные столбцы (если файл содержит больше столбцов чем ожидается) и рассматривать отсутствующие поля в CSV в качестве значений по умолчанию. Значение по умолчанию - `false`. ## CSVWithNames {#csvwithnames} diff --git a/docs/ru/operations/settings/settings.md b/docs/ru/operations/settings/settings.md index ddc101c69913..42e21f6140b8 100644 --- a/docs/ru/operations/settings/settings.md +++ b/docs/ru/operations/settings/settings.md @@ -1727,7 +1727,7 @@ echo ' string ' | ./clickhouse local -q "select * from table FORMAT CSV" --in " string " ``` -## input_format_csv_allow_variable_number_of_colums {#input_format_csv_allow_variable_number_of_colums} +## input_format_csv_allow_variable_number_of_columns {#input_format_csv_allow_variable_number_of_columns} Игнорировать дополнительные столбцы (если файл содержит больше столбцов чем ожидается) и рассматривать отсутствующие поля в CSV в качестве значений по умолчанию. diff --git a/src/Core/Settings.h b/src/Core/Settings.h index df2a916b7cf0..7f8a52c69fa2 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -1009,7 +1009,7 @@ class IColumn; M(Bool, regexp_dict_allow_hyperscan, true, "Allow regexp_tree dictionary using Hyperscan library.", 0) \ \ M(Bool, dictionary_use_async_executor, false, "Execute a pipeline for reading from a dictionary with several threads. It's supported only by DIRECT dictionary with CLICKHOUSE source.", 0) \ - M(Bool, input_format_csv_allow_variable_number_of_colums, false, "Ignore extra columns in CSV input (if file has more columns than expected) and treat missing fields in CSV input as default values", 0) \ + M(Bool, input_format_csv_allow_variable_number_of_columns, false, "Ignore extra columns in CSV input (if file has more columns than expected) and treat missing fields in CSV input as default values", 0) \ // End of FORMAT_FACTORY_SETTINGS // Please add settings non-related to formats into the COMMON_SETTINGS above. diff --git a/src/Formats/FormatFactory.cpp b/src/Formats/FormatFactory.cpp index af9823dde73f..182abc84ffe4 100644 --- a/src/Formats/FormatFactory.cpp +++ b/src/Formats/FormatFactory.cpp @@ -72,7 +72,7 @@ FormatSettings getFormatSettings(ContextPtr context, const Settings & settings) format_settings.csv.skip_trailing_empty_lines = settings.input_format_csv_skip_trailing_empty_lines; format_settings.csv.trim_whitespaces = settings.input_format_csv_trim_whitespaces; format_settings.csv.allow_whitespace_or_tab_as_delimiter = settings.input_format_csv_allow_whitespace_or_tab_as_delimiter; - format_settings.csv.allow_variable_number_of_colums = settings.input_format_csv_allow_variable_number_of_colums; + format_settings.csv.allow_variable_number_of_columns = settings.input_format_csv_allow_variable_number_of_columns; format_settings.hive_text.fields_delimiter = settings.input_format_hive_text_fields_delimiter; format_settings.hive_text.collection_items_delimiter = settings.input_format_hive_text_collection_items_delimiter; format_settings.hive_text.map_keys_delimiter = settings.input_format_hive_text_map_keys_delimiter; diff --git a/src/Formats/FormatSettings.h b/src/Formats/FormatSettings.h index 653578f84962..dd4608227d03 100644 --- a/src/Formats/FormatSettings.h +++ b/src/Formats/FormatSettings.h @@ -140,7 +140,7 @@ struct FormatSettings bool skip_trailing_empty_lines = false; bool trim_whitespaces = true; bool allow_whitespace_or_tab_as_delimiter = false; - bool allow_variable_number_of_colums = false; + bool allow_variable_number_of_columns = false; } csv; struct HiveText diff --git a/src/Processors/Formats/Impl/CSVRowInputFormat.cpp b/src/Processors/Formats/Impl/CSVRowInputFormat.cpp index 57e05ae7cd3d..60f1cbe1f801 100644 --- a/src/Processors/Formats/Impl/CSVRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/CSVRowInputFormat.cpp @@ -285,7 +285,7 @@ bool CSVFormatReader::parseRowEndWithDiagnosticInfo(WriteBuffer & out) bool CSVFormatReader::allowVariableNumberOfColumns() { - return format_settings.csv.allow_variable_number_of_colums; + return format_settings.csv.allow_variable_number_of_columns; } bool CSVFormatReader::readField( diff --git a/tests/queries/0_stateless/00301_csv.reference b/tests/queries/0_stateless/00301_csv.reference index 804ccf0c713a..ec8c5f2b3713 100644 --- a/tests/queries/0_stateless/00301_csv.reference +++ b/tests/queries/0_stateless/00301_csv.reference @@ -14,14 +14,14 @@ default-eof 1 2019-06-19 2016-01-01 01:02:03 NUL 2016-01-02 01:02:03 Nhello \N \N -=== Test input_format_csv_ignore_extra_columns +=== Test ignore extra columns Hello 1 String1 Hello 2 String2 Hello 3 String3 Hello 4 String4 Hello 5 String5 Hello 6 String6 -=== Test input_format_csv_missing_as_default +=== Test missing as default 0 0 33 \N 55 Default 0 0 33 \N 55 Default Hello 0 0 33 \N 55 Default diff --git a/tests/queries/0_stateless/00301_csv.sh b/tests/queries/0_stateless/00301_csv.sh index 7657745e9f75..776bd39fc03d 100755 --- a/tests/queries/0_stateless/00301_csv.sh +++ b/tests/queries/0_stateless/00301_csv.sh @@ -41,7 +41,7 @@ $CLICKHOUSE_CLIENT --query="SELECT * FROM csv ORDER BY s NULLS LAST"; $CLICKHOUSE_CLIENT --query="DROP TABLE csv"; -echo === Test input_format_csv_ignore_extra_columns +echo === Test ignore extra columns $CLICKHOUSE_CLIENT --query="CREATE TABLE csv (s String, n UInt64 DEFAULT 3, d String DEFAULT 'String4') ENGINE = Memory"; echo '"Hello", 1, "String1" @@ -50,12 +50,12 @@ echo '"Hello", 1, "String1" "Hello", 4, , "2016-01-14" "Hello", 5, "String5", "2016-01-15", "2016-01-16" "Hello", 6, "String6" , "line with a -break"' | $CLICKHOUSE_CLIENT --input_format_defaults_for_omitted_fields=1 --input_format_csv_empty_as_default=1 --input_format_csv_ignore_extra_columns=1 --query="INSERT INTO csv FORMAT CSV"; +break"' | $CLICKHOUSE_CLIENT --input_format_defaults_for_omitted_fields=1 --input_format_csv_empty_as_default=1 --input_format_csv_allow_variable_number_of_columns=1 --query="INSERT INTO csv FORMAT CSV"; $CLICKHOUSE_CLIENT --query="SELECT * FROM csv ORDER BY s, n"; $CLICKHOUSE_CLIENT --query="DROP TABLE csv"; -echo === Test input_format_csv_missing_as_default +echo === Test missing as default $CLICKHOUSE_CLIENT --query="CREATE TABLE csv (f1 String, f2 UInt64, f3 UInt256, f4 UInt64 Default 33, f5 Nullable(UInt64), f6 Nullable(UInt64) Default 55, f7 String DEFAULT 'Default') ENGINE = Memory"; echo ' @@ -65,6 +65,6 @@ echo ' "Hello", 1, 3, 2 "Hello",1,4,2,3,4,"String" "Hello", 1, 4, 2, 3, 4, "String" -"Hello", 1, 5, 2, 3, 4, "String",'| $CLICKHOUSE_CLIENT --input_format_defaults_for_omitted_fields=1 --input_format_csv_missing_as_default=1 --query="INSERT INTO csv FORMAT CSV"; +"Hello", 1, 5, 2, 3, 4, "String",'| $CLICKHOUSE_CLIENT --input_format_defaults_for_omitted_fields=1 --input_format_csv_allow_variable_number_of_columns=1 --query="INSERT INTO csv FORMAT CSV"; $CLICKHOUSE_CLIENT --query="SELECT * FROM csv ORDER BY f1, f2, f3, f4, f5 NULLS FIRST, f6, f7"; $CLICKHOUSE_CLIENT --query="DROP TABLE csv"; From 24e77083b38fbfdbec0d5a6fa8da65cb6a33a602 Mon Sep 17 00:00:00 2001 From: Yatsishin Ilya <2159081+qoega@users.noreply.github.com> Date: Thu, 6 Jul 2023 09:50:44 +0000 Subject: [PATCH 110/179] Commit tests --- src/Parsers/ParserCreateQuery.cpp | 3 +- .../02811_primary_key_in_columns.reference | 0 .../02811_primary_key_in_columns.sql | 50 +++++++++---------- 3 files changed, 27 insertions(+), 26 deletions(-) create mode 100644 tests/queries/0_stateless/02811_primary_key_in_columns.reference diff --git a/src/Parsers/ParserCreateQuery.cpp b/src/Parsers/ParserCreateQuery.cpp index 1941bafab0de..60e15cb92f4a 100644 --- a/src/Parsers/ParserCreateQuery.cpp +++ b/src/Parsers/ParserCreateQuery.cpp @@ -311,7 +311,7 @@ bool ParserTablePropertiesDeclarationList::parseImpl(Pos & pos, ASTPtr & node, E if(!primary_key_from_columns) primary_key_from_columns = makeASTFunction("tuple"); auto column_identifier = std::make_shared(cd->name); - primary_key_from_columns->children.push_back(column_identifier); + primary_key_from_columns->children[0]->as()->children.push_back(column_identifier); } columns->children.push_back(elem); } @@ -710,6 +710,7 @@ bool ParserCreateTableQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe throw Exception(ErrorCodes::BAD_ARGUMENTS, "Multiple primary keys are not allowed."); query->storage->primary_key = query->columns_list->primary_key; + } if (query->columns_list && (query->columns_list->primary_key_from_columns)) diff --git a/tests/queries/0_stateless/02811_primary_key_in_columns.reference b/tests/queries/0_stateless/02811_primary_key_in_columns.reference new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/queries/0_stateless/02811_primary_key_in_columns.sql b/tests/queries/0_stateless/02811_primary_key_in_columns.sql index df25fdd14abc..0519f4c820ba 100644 --- a/tests/queries/0_stateless/02811_primary_key_in_columns.sql +++ b/tests/queries/0_stateless/02811_primary_key_in_columns.sql @@ -23,39 +23,39 @@ DROP TABLE IF EXISTS pk_test21; DROP TABLE IF EXISTS pk_test22; DROP TABLE IF EXISTS pk_test23; -SET default_table_engine=MergeTree; +SET default_table_engine='MergeTree'; -CREATE TABLE pk_test1 (String a PRIMARY KEY, String b, String c); -CREATE TABLE pk_test2 (String a PRIMARY KEY, String b PRIMARY KEY, String c); -CREATE TABLE pk_test3 (String a PRIMARY KEY, String b PRIMARY KEY, String c PRIMARY KEY); +CREATE TABLE pk_test1 (a String PRIMARY KEY, b String, c String); +CREATE TABLE pk_test2 (a String PRIMARY KEY, b String PRIMARY KEY, c String); +CREATE TABLE pk_test3 (a String PRIMARY KEY, b String PRIMARY KEY, c String PRIMARY KEY); -CREATE TABLE pk_test4 (String a, String b PRIMARY KEY, String c PRIMARY KEY); -CREATE TABLE pk_test5 (String a, String b PRIMARY KEY, String c); -CREATE TABLE pk_test6 (String a, String b, String c PRIMARY KEY); +CREATE TABLE pk_test4 (a String, b String PRIMARY KEY, c String PRIMARY KEY); +CREATE TABLE pk_test5 (a String, b String PRIMARY KEY, c String); +CREATE TABLE pk_test6 (a String, b String, c String PRIMARY KEY); -CREATE TABLE pk_test7 (String a PRIMARY KEY, String b, String c, PRIMARY KEY (a)); -CREATE TABLE pk_test8 (String a PRIMARY KEY, String b PRIMARY KEY, String c, PRIMARY KEY (a)); -CREATE TABLE pk_test9 (String a PRIMARY KEY, String b PRIMARY KEY, String c PRIMARY KEY, PRIMARY KEY (a)); +CREATE TABLE pk_test7 (a String PRIMARY KEY, b String, c String, PRIMARY KEY (a)); -- { clientError BAD_ARGUMENTS } +CREATE TABLE pk_test8 (a String PRIMARY KEY, b String PRIMARY KEY, c String, PRIMARY KEY (a)); -- { clientError BAD_ARGUMENTS } +CREATE TABLE pk_test9 (a String PRIMARY KEY, b String PRIMARY KEY, c String PRIMARY KEY, PRIMARY KEY (a)); -- { clientError BAD_ARGUMENTS } -CREATE TABLE pk_test10 (String a, String b PRIMARY KEY, String c PRIMARY KEY, PRIMARY KEY (a)); -CREATE TABLE pk_test11 (String a, String b PRIMARY KEY, String c, PRIMARY KEY (a)); -CREATE TABLE pk_test12 (String a, String b, String c PRIMARY KEY, PRIMARY KEY (a)); +CREATE TABLE pk_test10 (a String, b String PRIMARY KEY, c String PRIMARY KEY, PRIMARY KEY (a)); -- { clientError BAD_ARGUMENTS } +CREATE TABLE pk_test11 (a String, b String PRIMARY KEY, c String, PRIMARY KEY (a)); -- { clientError BAD_ARGUMENTS } +CREATE TABLE pk_test12 (a String, b String, c String PRIMARY KEY, PRIMARY KEY (a)); -- { clientError BAD_ARGUMENTS } -CREATE TABLE pk_test12 (String a PRIMARY KEY, String b, String c) PRIMARY KEY (a,b,c); -CREATE TABLE pk_test13 (String a PRIMARY KEY, String b PRIMARY KEY, String c) PRIMARY KEY (a,b,c); -CREATE TABLE pk_test14 (String a PRIMARY KEY, String b PRIMARY KEY, String c PRIMARY KEY) PRIMARY KEY (a,b,c); +CREATE TABLE pk_test12 (a String PRIMARY KEY, b String, c String) PRIMARY KEY (a,b,c); -- { clientError BAD_ARGUMENTS } +CREATE TABLE pk_test13 (a String PRIMARY KEY, b String PRIMARY KEY, c String) PRIMARY KEY (a,b,c); -- { clientError BAD_ARGUMENTS } +CREATE TABLE pk_test14 (a String PRIMARY KEY, b String PRIMARY KEY, c String PRIMARY KEY) PRIMARY KEY (a,b,c); -- { clientError BAD_ARGUMENTS } -CREATE TABLE pk_test15 (String a, String b PRIMARY KEY, String c PRIMARY KEY) PRIMARY KEY (a,b,c); -CREATE TABLE pk_test16 (String a, String b PRIMARY KEY, String c) PRIMARY KEY (a,b,c); -CREATE TABLE pk_test17 (String a, String b, String c PRIMARY KEY) PRIMARY KEY (a,b,c); +CREATE TABLE pk_test15 (a String, b String PRIMARY KEY, c String PRIMARY KEY) PRIMARY KEY (a,b,c); -- { clientError BAD_ARGUMENTS } +CREATE TABLE pk_test16 (a String, b String PRIMARY KEY, c String) PRIMARY KEY (a,b,c); -- { clientError BAD_ARGUMENTS } +CREATE TABLE pk_test17 (a String, b String, c String PRIMARY KEY) PRIMARY KEY (a,b,c); -- { clientError BAD_ARGUMENTS } -CREATE TABLE pk_test18 (String a PRIMARY KEY, String b, String c) ORDER BY (a,b,c); -CREATE TABLE pk_test19 (String a PRIMARY KEY, String b PRIMARY KEY, String c) ORDER BY (a,b,c); -CREATE TABLE pk_test20 (String a PRIMARY KEY, String b PRIMARY KEY, String c PRIMARY KEY) ORDER BY (a,b,c); +CREATE TABLE pk_test18 (a String PRIMARY KEY, b String, c String) ORDER BY (a,b,c); +CREATE TABLE pk_test19 (a String PRIMARY KEY, b String PRIMARY KEY, c String) ORDER BY (a,b,c); +CREATE TABLE pk_test20 (a String PRIMARY KEY, b String PRIMARY KEY, c String PRIMARY KEY) ORDER BY (a,b,c); -CREATE TABLE pk_test21 (String a, String b PRIMARY KEY, String c PRIMARY KEY) ORDER BY (a,b,c); -CREATE TABLE pk_test22 (String a, String b PRIMARY KEY, String c) ORDER BY (a,b,c); -CREATE TABLE pk_test23 (String a, String b, String c PRIMARY KEY) ORDER BY (a,b,c); +CREATE TABLE pk_test21 (a String, b String PRIMARY KEY, c String PRIMARY KEY) ORDER BY (a,b,c); -- { serverError BAD_ARGUMENTS } +CREATE TABLE pk_test22 (a String, b String PRIMARY KEY, c String) ORDER BY (a,b,c); -- { serverError BAD_ARGUMENTS } +CREATE TABLE pk_test23 (a String, b String, c String PRIMARY KEY) ORDER BY (a,b,c); -- { serverError BAD_ARGUMENTS } DROP TABLE IF EXISTS pk_test1; DROP TABLE IF EXISTS pk_test2; From e80f2a0acc91d9003880f4054f05b3e23b9a2679 Mon Sep 17 00:00:00 2001 From: Yatsishin Ilya <2159081+qoega@users.noreply.github.com> Date: Thu, 6 Jul 2023 09:55:30 +0000 Subject: [PATCH 111/179] Define default_table_engine in sqllogictest --- tests/sqllogic/connection.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/sqllogic/connection.py b/tests/sqllogic/connection.py index a9976a7beca7..5e2634787d84 100644 --- a/tests/sqllogic/connection.py +++ b/tests/sqllogic/connection.py @@ -62,6 +62,7 @@ def default_clickhouse_odbc_conn_str(): return str( OdbcConnectingArgs.create_from_kw( dsn="ClickHouse DSN (ANSI)", + Url="http://localhost:8123/query?default_format=ODBCDriver2&default_table_engine=MergeTree" ) ) From 12ebb30781e8427a1e797464c3bd4675787c87e9 Mon Sep 17 00:00:00 2001 From: Yatsishin Ilya <2159081+qoega@users.noreply.github.com> Date: Thu, 6 Jul 2023 10:34:34 +0000 Subject: [PATCH 112/179] style --- src/Parsers/ParserCreateQuery.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Parsers/ParserCreateQuery.cpp b/src/Parsers/ParserCreateQuery.cpp index 60e15cb92f4a..c4c02ab74177 100644 --- a/src/Parsers/ParserCreateQuery.cpp +++ b/src/Parsers/ParserCreateQuery.cpp @@ -306,9 +306,9 @@ bool ParserTablePropertiesDeclarationList::parseImpl(Pos & pos, ASTPtr & node, E { if (auto *cd = elem->as()) { - if(cd->primary_key_specifier) + if (cd->primary_key_specifier) { - if(!primary_key_from_columns) + if (!primary_key_from_columns) primary_key_from_columns = makeASTFunction("tuple"); auto column_identifier = std::make_shared(cd->name); primary_key_from_columns->children[0]->as()->children.push_back(column_identifier); From c7ccf23a24a7fb2bb1245b76fc9169649cd474c3 Mon Sep 17 00:00:00 2001 From: Yatsishin Ilya <2159081+qoega@users.noreply.github.com> Date: Thu, 6 Jul 2023 10:44:06 +0000 Subject: [PATCH 113/179] Update CREATE TABLE docs --- .../mergetree-family/mergetree.md | 50 +++++++++---------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/docs/en/engines/table-engines/mergetree-family/mergetree.md b/docs/en/engines/table-engines/mergetree-family/mergetree.md index 67043ef10625..4f5061266827 100644 --- a/docs/en/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/mergetree.md @@ -37,8 +37,8 @@ The [Merge](/docs/en/engines/table-engines/special/merge.md/#merge) engine does ``` sql CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] ( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [TTL expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [TTL expr2], + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS|EPHEMERAL expr1] [TTL expr1] [CODEC(codec1)] [[NOT] NULL|PRIMARY KEY], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS|EPHEMERAL expr2] [TTL expr2] [CODEC(codec2)] [[NOT] NULL|PRIMARY KEY], ... INDEX index_name1 expr1 TYPE type1(...) [GRANULARITY value1], INDEX index_name2 expr2 TYPE type2(...) [GRANULARITY value2], @@ -439,41 +439,41 @@ Syntax: `ngrambf_v1(n, size_of_bloom_filter_in_bytes, number_of_hash_functions, - `number_of_hash_functions` — The number of hash functions used in the Bloom filter. - `random_seed` — The seed for Bloom filter hash functions. -Users can create [UDF](/docs/en/sql-reference/statements/create/function.md) to estimate the parameters set of `ngrambf_v1`. Query statements are as follows: +Users can create [UDF](/docs/en/sql-reference/statements/create/function.md) to estimate the parameters set of `ngrambf_v1`. Query statements are as follows: ```sql -CREATE FUNCTION bfEstimateFunctions [ON CLUSTER cluster] -AS -(total_nubmer_of_all_grams, size_of_bloom_filter_in_bits) -> round((size_of_bloom_filter_in_bits / total_nubmer_of_all_grams) * log(2)); - -CREATE FUNCTION bfEstimateBmSize [ON CLUSTER cluster] -AS -(total_nubmer_of_all_grams, probability_of_false_positives) -> ceil((total_nubmer_of_all_grams * log(probability_of_false_positives)) / log(1 / pow(2, log(2)))); - -CREATE FUNCTION bfEstimateFalsePositive [ON CLUSTER cluster] -AS -(total_nubmer_of_all_grams, number_of_hash_functions, size_of_bloom_filter_in_bytes) -> pow(1 - exp(-number_of_hash_functions/ (size_of_bloom_filter_in_bytes / total_nubmer_of_all_grams)), number_of_hash_functions); - -CREATE FUNCTION bfEstimateGramNumber [ON CLUSTER cluster] -AS +CREATE FUNCTION bfEstimateFunctions [ON CLUSTER cluster] +AS +(total_nubmer_of_all_grams, size_of_bloom_filter_in_bits) -> round((size_of_bloom_filter_in_bits / total_nubmer_of_all_grams) * log(2)); + +CREATE FUNCTION bfEstimateBmSize [ON CLUSTER cluster] +AS +(total_nubmer_of_all_grams, probability_of_false_positives) -> ceil((total_nubmer_of_all_grams * log(probability_of_false_positives)) / log(1 / pow(2, log(2)))); + +CREATE FUNCTION bfEstimateFalsePositive [ON CLUSTER cluster] +AS +(total_nubmer_of_all_grams, number_of_hash_functions, size_of_bloom_filter_in_bytes) -> pow(1 - exp(-number_of_hash_functions/ (size_of_bloom_filter_in_bytes / total_nubmer_of_all_grams)), number_of_hash_functions); + +CREATE FUNCTION bfEstimateGramNumber [ON CLUSTER cluster] +AS (number_of_hash_functions, probability_of_false_positives, size_of_bloom_filter_in_bytes) -> ceil(size_of_bloom_filter_in_bytes / (-number_of_hash_functions / log(1 - exp(log(probability_of_false_positives) / number_of_hash_functions)))) -``` +``` To use those functions,we need to specify two parameter at least. -For example, if there 4300 ngrams in the granule and we expect false positives to be less than 0.0001. The other parameters can be estimated by executing following queries: - +For example, if there 4300 ngrams in the granule and we expect false positives to be less than 0.0001. The other parameters can be estimated by executing following queries: + ```sql --- estimate number of bits in the filter -SELECT bfEstimateBmSize(4300, 0.0001) / 8 as size_of_bloom_filter_in_bytes; +SELECT bfEstimateBmSize(4300, 0.0001) / 8 as size_of_bloom_filter_in_bytes; ┌─size_of_bloom_filter_in_bytes─┐ │ 10304 │ └───────────────────────────────┘ - + --- estimate number of hash functions SELECT bfEstimateFunctions(4300, bfEstimateBmSize(4300, 0.0001)) as number_of_hash_functions - + ┌─number_of_hash_functions─┐ │ 13 │ └──────────────────────────┘ @@ -991,7 +991,7 @@ use a local disk to cache data from a table stored at a URL. Neither the cache d nor the web storage is configured in the ClickHouse configuration files; both are configured in the CREATE/ATTACH query settings. -In the settings highlighted below notice that the disk of `type=web` is nested within +In the settings highlighted below notice that the disk of `type=web` is nested within the disk of `type=cache`. ```sql @@ -1308,7 +1308,7 @@ configuration file. In this sample configuration: - the disk is of type `web` - the data is hosted at `http://nginx:80/test1/` -- a cache on local storage is used +- a cache on local storage is used ```xml From 86fc70223693db8aac9edfa7c85e7e80286042ec Mon Sep 17 00:00:00 2001 From: Dmitry Kardymon Date: Thu, 6 Jul 2023 15:14:18 +0300 Subject: [PATCH 114/179] Add skipWhitespacesAndTabs() Co-authored-by: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> --- src/Processors/Formats/Impl/CSVRowInputFormat.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Processors/Formats/Impl/CSVRowInputFormat.cpp b/src/Processors/Formats/Impl/CSVRowInputFormat.cpp index 60f1cbe1f801..79ce2549b4d8 100644 --- a/src/Processors/Formats/Impl/CSVRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/CSVRowInputFormat.cpp @@ -354,6 +354,7 @@ bool CSVFormatReader::checkForSuffix() bool CSVFormatReader::checkForEndOfRow() { + skipWhitespacesAndTabs(*buf, format_settings.csv.allow_whitespace_or_tab_as_delimiter); return buf->eof() || *buf->position() == '\n' || *buf->position() == '\r'; } From c23e29d6aa836980337683800c6c2b029cfb7c40 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Tue, 20 Jun 2023 20:27:56 +0200 Subject: [PATCH 115/179] don't account session's memory in thread/user mem tracker --- src/Common/MemoryTrackerSwitcher.h | 42 ++++++++++++++++++++ src/IO/HTTPCommon.cpp | 4 ++ src/Interpreters/AsynchronousInsertQueue.cpp | 2 +- src/Interpreters/AsynchronousInsertQueue.h | 33 +++------------ src/Server/InterserverIOHTTPHandler.cpp | 1 + 5 files changed, 53 insertions(+), 29 deletions(-) create mode 100644 src/Common/MemoryTrackerSwitcher.h diff --git a/src/Common/MemoryTrackerSwitcher.h b/src/Common/MemoryTrackerSwitcher.h new file mode 100644 index 000000000000..0fefcbb280a7 --- /dev/null +++ b/src/Common/MemoryTrackerSwitcher.h @@ -0,0 +1,42 @@ +#pragma once + +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + +struct MemoryTrackerSwitcher +{ + explicit MemoryTrackerSwitcher(MemoryTracker * new_tracker) + { + if (!current_thread) + throw Exception(ErrorCodes::LOGICAL_ERROR, "current_thread is not initialized"); + + auto * thread_tracker = CurrentThread::getMemoryTracker(); + prev_untracked_memory = current_thread->untracked_memory; + prev_memory_tracker_parent = thread_tracker->getParent(); + + current_thread->untracked_memory = 0; + thread_tracker->setParent(new_tracker); + } + + ~MemoryTrackerSwitcher() + { + CurrentThread::flushUntrackedMemory(); + auto * thread_tracker = CurrentThread::getMemoryTracker(); + + current_thread->untracked_memory = prev_untracked_memory; + thread_tracker->setParent(prev_memory_tracker_parent); + } + + MemoryTracker * prev_memory_tracker_parent = nullptr; + Int64 prev_untracked_memory = 0; +}; + +} diff --git a/src/IO/HTTPCommon.cpp b/src/IO/HTTPCommon.cpp index f3e2064c8bfa..1731b4022ea0 100644 --- a/src/IO/HTTPCommon.cpp +++ b/src/IO/HTTPCommon.cpp @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -107,6 +108,9 @@ namespace ObjectPtr allocObject() override { + /// Pool is global, we shouldn't attribute this memory to query/user. + MemoryTrackerSwitcher switcher{&total_memory_tracker}; + auto session = makeHTTPSessionImpl(host, port, https, true, resolve_host); if (!proxy_host.empty()) { diff --git a/src/Interpreters/AsynchronousInsertQueue.cpp b/src/Interpreters/AsynchronousInsertQueue.cpp index dc2310cfebf9..e6417de53b4f 100644 --- a/src/Interpreters/AsynchronousInsertQueue.cpp +++ b/src/Interpreters/AsynchronousInsertQueue.cpp @@ -125,7 +125,7 @@ void AsynchronousInsertQueue::InsertData::Entry::finish(std::exception_ptr excep // Entries data must be destroyed in context of user who runs async insert. // Each entry in the list may correspond to a different user, // so we need to switch current thread's MemoryTracker. - UserMemoryTrackerSwitcher switcher(user_memory_tracker); + MemoryTrackerSwitcher switcher(user_memory_tracker); bytes = ""; } diff --git a/src/Interpreters/AsynchronousInsertQueue.h b/src/Interpreters/AsynchronousInsertQueue.h index bc60c86d067c..f18db69a7bb7 100644 --- a/src/Interpreters/AsynchronousInsertQueue.h +++ b/src/Interpreters/AsynchronousInsertQueue.h @@ -1,10 +1,12 @@ #pragma once +#include #include +#include #include +#include #include -#include -#include + #include namespace DB @@ -60,31 +62,6 @@ class AsynchronousInsertQueue : public WithContext UInt128 calculateHash() const; }; - struct UserMemoryTrackerSwitcher - { - explicit UserMemoryTrackerSwitcher(MemoryTracker * new_tracker) - { - auto * thread_tracker = CurrentThread::getMemoryTracker(); - prev_untracked_memory = current_thread->untracked_memory; - prev_memory_tracker_parent = thread_tracker->getParent(); - - current_thread->untracked_memory = 0; - thread_tracker->setParent(new_tracker); - } - - ~UserMemoryTrackerSwitcher() - { - CurrentThread::flushUntrackedMemory(); - auto * thread_tracker = CurrentThread::getMemoryTracker(); - - current_thread->untracked_memory = prev_untracked_memory; - thread_tracker->setParent(prev_memory_tracker_parent); - } - - MemoryTracker * prev_memory_tracker_parent; - Int64 prev_untracked_memory; - }; - struct InsertData { struct Entry @@ -114,7 +91,7 @@ class AsynchronousInsertQueue : public WithContext // so we need to switch current thread's MemoryTracker parent on each iteration. while (it != entries.end()) { - UserMemoryTrackerSwitcher switcher((*it)->user_memory_tracker); + MemoryTrackerSwitcher switcher((*it)->user_memory_tracker); it = entries.erase(it); } } diff --git a/src/Server/InterserverIOHTTPHandler.cpp b/src/Server/InterserverIOHTTPHandler.cpp index ea71d954cc05..9741592868a6 100644 --- a/src/Server/InterserverIOHTTPHandler.cpp +++ b/src/Server/InterserverIOHTTPHandler.cpp @@ -80,6 +80,7 @@ void InterserverIOHTTPHandler::processQuery(HTTPServerRequest & request, HTTPSer void InterserverIOHTTPHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) { setThreadName("IntersrvHandler"); + ThreadStatus thread_status; /// In order to work keep-alive. if (request.getVersion() == HTTPServerRequest::HTTP_1_1) From aec720563612e3d7faa09bcb2c4b2cc4e5e8935c Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Mon, 3 Jul 2023 23:11:32 +0200 Subject: [PATCH 116/179] rework pool usage --- src/IO/HTTPCommon.cpp | 44 ++++++++----- src/IO/HTTPCommon.h | 12 ++++ src/IO/ReadBufferFromS3.cpp | 29 ++++++--- src/IO/ReadWriteBufferFromHTTP.cpp | 65 ++++++++----------- .../Formats/Impl/AvroRowInputFormat.cpp | 22 +++---- 5 files changed, 95 insertions(+), 77 deletions(-) diff --git a/src/IO/HTTPCommon.cpp b/src/IO/HTTPCommon.cpp index 1731b4022ea0..2f5e0a172a09 100644 --- a/src/IO/HTTPCommon.cpp +++ b/src/IO/HTTPCommon.cpp @@ -1,6 +1,7 @@ #include #include +#include #include #include #include @@ -41,6 +42,7 @@ namespace ErrorCodes extern const int RECEIVED_ERROR_TOO_MANY_REQUESTS; extern const int FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME; extern const int UNSUPPORTED_URI_SCHEME; + extern const int LOGICAL_ERROR; } @@ -271,27 +273,17 @@ namespace auto retry_timeout = timeouts.connection_timeout.totalMicroseconds(); auto session = pool_ptr->second->get(retry_timeout); - /// We store exception messages in session data. - /// Poco HTTPSession also stores exception, but it can be removed at any time. const auto & session_data = session->sessionData(); - if (!session_data.empty()) + if (session_data.empty() || !Poco::AnyCast(&session_data)) { - auto msg = Poco::AnyCast(session_data); - if (!msg.empty()) - { - LOG_TRACE((&Poco::Logger::get("HTTPCommon")), "Failed communicating with {} with error '{}' will try to reconnect session", host, msg); + session->reset(); - if (resolve_host) - { - updateHostIfIpChanged(session, DNSResolver::instance().resolveHost(host).toString()); - } - } - /// Reset the message, once it has been printed, - /// otherwise you will get report for failed parts on and on, - /// even for different tables (since they uses the same session). - session->attachSessionData({}); + if (resolve_host) + updateHostIfIpChanged(session, DNSResolver::instance().resolveHost(host).toString()); } + session->attachSessionData({}); + setTimeouts(*session, timeouts); return session; @@ -388,4 +380,24 @@ Exception HTTPException::makeExceptionMessage( uri, static_cast(http_status), reason, body); } +void markSessionForReuse(Poco::Net::HTTPSession & session) +{ + const auto & session_data = session.sessionData(); + if (!session_data.empty() && !Poco::AnyCast(&session_data)) + throw Exception( + ErrorCodes::LOGICAL_ERROR, "Data of an unexpected type ({}) is attached to the session", session_data.type().name()); + + session.attachSessionData(HTTPSessionReuseTag{}); +} + +void markSessionForReuse(HTTPSessionPtr session) +{ + markSessionForReuse(*session); +} + +void markSessionForReuse(PooledHTTPSessionPtr session) +{ + markSessionForReuse(static_cast(*session)); +} + } diff --git a/src/IO/HTTPCommon.h b/src/IO/HTTPCommon.h index db8fc2a2a40b..4733f366c8a8 100644 --- a/src/IO/HTTPCommon.h +++ b/src/IO/HTTPCommon.h @@ -55,6 +55,18 @@ class HTTPException : public Exception using PooledHTTPSessionPtr = PoolBase::Entry; // SingleEndpointHTTPSessionPool::Entry using HTTPSessionPtr = std::shared_ptr; +/// If a session have this tag attached, it will be reused without calling `reset()` on it. +/// All pooled sessions don't have this tag attached after being taken from a pool. +/// If the request and the response were fully written/read, the client code should add this tag +/// explicitly by calling `markSessionForReuse()`. +struct HTTPSessionReuseTag +{ +}; + +void markSessionForReuse(HTTPSessionPtr session); +void markSessionForReuse(PooledHTTPSessionPtr session); + + void setResponseDefaultHeaders(HTTPServerResponse & response, size_t keep_alive_timeout); /// Create session object to perform requests and set required parameters. diff --git a/src/IO/ReadBufferFromS3.cpp b/src/IO/ReadBufferFromS3.cpp index fdbe1a4ba57c..5c562d32fbc6 100644 --- a/src/IO/ReadBufferFromS3.cpp +++ b/src/IO/ReadBufferFromS3.cpp @@ -1,3 +1,4 @@ +#include #include #include "config.h" @@ -35,31 +36,41 @@ namespace ProfileEvents namespace { -void resetSession(Aws::S3::Model::GetObjectResult & read_result) +DB::PooledHTTPSessionPtr getSession(Aws::S3::Model::GetObjectResult & read_result) { if (auto * session_aware_stream = dynamic_cast *>(&read_result.GetBody())) - { - auto & session - = static_cast(*static_cast(session_aware_stream->getSession())); - session.reset(); - } + return static_cast(session_aware_stream->getSession()); else if (!dynamic_cast *>(&read_result.GetBody())) - { throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "Session of unexpected type encountered"); + return {}; +} + +void resetSession(Aws::S3::Model::GetObjectResult & read_result) +{ + if (auto session = getSession(read_result); !session.isNull()) + { + auto & http_session = static_cast(*session); + http_session.reset(); } } void resetSessionIfNeeded(bool read_all_range_successfully, std::optional & read_result) { - if (!read_all_range_successfully && read_result) + if (!read_result) + return; + + if (!read_all_range_successfully) { /// When we abandon a session with an ongoing GetObject request and there is another one trying to delete the same object this delete /// operation will hang until GetObject's session idle timeouts. So we have to call `reset()` on GetObject's session session immediately. resetSession(*read_result); ProfileEvents::increment(ProfileEvents::ReadBufferFromS3ResetSessions); } - else + else if (auto session = getSession(*read_result); !session.isNull()) + { + DB::markSessionForReuse(session); ProfileEvents::increment(ProfileEvents::ReadBufferFromS3PreservedSessions); + } } } diff --git a/src/IO/ReadWriteBufferFromHTTP.cpp b/src/IO/ReadWriteBufferFromHTTP.cpp index cf1159bfb4b1..b834c17ab6cf 100644 --- a/src/IO/ReadWriteBufferFromHTTP.cpp +++ b/src/IO/ReadWriteBufferFromHTTP.cpp @@ -1,5 +1,7 @@ #include "ReadWriteBufferFromHTTP.h" +#include + namespace ProfileEvents { extern const Event ReadBufferSeekCancelConnection; @@ -146,30 +148,20 @@ std::istream * ReadWriteBufferFromHTTPBase::callImpl( LOG_TRACE(log, "Sending request to {}", uri_.toString()); auto sess = current_session->getSession(); - try - { - auto & stream_out = sess->sendRequest(request); + auto & stream_out = sess->sendRequest(request); - if (out_stream_callback) - out_stream_callback(stream_out); + if (out_stream_callback) + out_stream_callback(stream_out); - auto result_istr = receiveResponse(*sess, request, response, true); - response.getCookies(cookies); + auto result_istr = receiveResponse(*sess, request, response, true); + response.getCookies(cookies); - /// we can fetch object info while the request is being processed - /// and we don't want to override any context used by it - if (!for_object_info) - content_encoding = response.get("Content-Encoding", ""); + /// we can fetch object info while the request is being processed + /// and we don't want to override any context used by it + if (!for_object_info) + content_encoding = response.get("Content-Encoding", ""); - return result_istr; - } - catch (const Poco::Exception & e) - { - /// We use session data storage as storage for exception text - /// Depend on it we can deduce to reconnect session or reresolve session host - sess->attachSessionData(e.message()); - throw; - } + return result_istr; } template @@ -429,23 +421,10 @@ void ReadWriteBufferFromHTTPBase::initialize() if (!read_range.end && response.hasContentLength()) file_info = parseFileInfo(response, withPartialContent(read_range) ? getOffset() : 0); - try - { - impl = std::make_unique(*istr, buffer_size); + impl = std::make_unique(*istr, buffer_size); - if (use_external_buffer) - { - setupExternalBuffer(); - } - } - catch (const Poco::Exception & e) - { - /// We use session data storage as storage for exception text - /// Depend on it we can deduce to reconnect session or reresolve session host - auto sess = session->getSession(); - sess->attachSessionData(e.message()); - throw; - } + if (use_external_buffer) + setupExternalBuffer(); } template @@ -460,7 +439,11 @@ bool ReadWriteBufferFromHTTPBase::nextImpl() if ((read_range.end && getOffset() > read_range.end.value()) || (file_info && file_info->file_size && getOffset() >= file_info->file_size.value())) + { + /// Response was fully read. + markSessionForReuse(session->getSession()); return false; + } if (impl) { @@ -582,7 +565,11 @@ bool ReadWriteBufferFromHTTPBase::nextImpl() std::rethrow_exception(exception); if (!result) + { + /// Eof is reached, i.e response was fully read. + markSessionForReuse(session->getSession()); return false; + } internal_buffer = impl->buffer(); working_buffer = internal_buffer; @@ -635,12 +622,14 @@ size_t ReadWriteBufferFromHTTPBase::readBigAt(char * to, si bool cancelled; size_t r = copyFromIStreamWithProgressCallback(*result_istr, to, n, progress_callback, &cancelled); + if (!cancelled) + /// Response was fully read. + markSessionForReuse(sess); + return r; } catch (const Poco::Exception & e) { - sess->attachSessionData(e.message()); - LOG_ERROR( log, "HTTP request (positioned) to `{}` with range [{}, {}) failed at try {}/{}: {}", diff --git a/src/Processors/Formats/Impl/AvroRowInputFormat.cpp b/src/Processors/Formats/Impl/AvroRowInputFormat.cpp index 1ec7491658e7..4cd73cb23b59 100644 --- a/src/Processors/Formats/Impl/AvroRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/AvroRowInputFormat.cpp @@ -935,23 +935,17 @@ class AvroConfluentRowInputFormat::SchemaRegistry request.setHost(url.getHost()); auto session = makePooledHTTPSession(url, timeouts, 1); - std::istream * response_body{}; - try - { - session->sendRequest(request); + session->sendRequest(request); + + Poco::Net::HTTPResponse response; + std::istream * response_body = receiveResponse(*session, request, response, false); - Poco::Net::HTTPResponse response; - response_body = receiveResponse(*session, request, response, false); - } - catch (const Poco::Exception & e) - { - /// We use session data storage as storage for exception text - /// Depend on it we can deduce to reconnect session or reresolve session host - session->attachSessionData(e.message()); - throw; - } Poco::JSON::Parser parser; auto json_body = parser.parse(*response_body).extract(); + + /// Response was fully read. + markSessionForReuse(session); + auto schema = json_body->getValue("schema"); LOG_TRACE((&Poco::Logger::get("AvroConfluentRowInputFormat")), "Successfully fetched schema id = {}\n{}", id, schema); return avro::compileJsonSchemaFromString(schema); From 5a6957d95e46861f39bdb1c39e442951b1e26d47 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Thu, 6 Jul 2023 13:02:03 +0000 Subject: [PATCH 117/179] Disable ThinLTO on non-Linux MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Cross-compiling on Linux for Mac failed with CMake parameters -DCMAKE_BUILD_TYPE=None -DENABLE_CLICKHOUSE_SELF_EXTRACTING=1 -DENABLE_TESTS=0 (see below). This happened e.g. in #51243. The problem was that ThinLTO enabled/disabled depends on ENABLE_TESTS (see the top-level CMakeLists.txt). If ENABLE_TESTS=0 then ThinLTO is activated. On Linux, building/linking works with or without ThinLTO but on Mac building/linking the self-extracting compressor binary doesn’t work if ThinLTO is on. This is quite weird, as a workaround restrict ThinLTO to Linux. ------- [185/187] Linking CXX static library base/glibc-compatibility/libglibc-compatibility.a [186/187] Linking CXX static library contrib/zstd-cmake/lib_zstd.a [187/187] Linking CXX executable utils/self-extracting-executable/pre_compressor -- Configuring done -- Generating done -- Build files have been written to: /home/ubuntu/repo/ch4/build [0/2] Re-checking globbed directories... [108/108] Linking CXX executable utils/self-extracting-executable/pre_compressor FAILED: utils/self-extracting-executable/pre_compressor : && /usr/bin/clang++-16 --target=x86_64-apple-darwin -std=c++20 -fdiagnostics-color=always -Xclang -fuse-ctor-homing -Wno-enum-constexpr-conversion -fsized-deallocation -gdwarf-aranges -pipe -mssse3 -msse4.1 -msse4.2 -mpclmul -mpopcnt -fasynchronous-unwind-tables -ffile-prefix-map=/home/ubuntu/repo/ch4=. -falign-functions=32 -mbranches-within-32B-boundaries -stdlib=libc++ -fdiagnostics-absolute-paths -fstrict -vtable-pointers -Wall -Wextra -Wframe-larger-than=65536 -Weverything -Wpedantic -Wno-zero-length-array -Wno-c++98-compat-pedantic -Wno-c++98-compat -Wno-c++20-compat -Wno-sign-conversion -Wno-implicit-int-conversion -Wno-implicit-int-float-conversion -Wno-ctad-maybe-unsupported -Wno-disabled-macro-expansion -Wno-documentation-unknown-command -Wno-double-promotion -Wno-exit-time-destructors -Wno-float-equal -Wn o-global-constructors -Wno-missing-prototypes -Wno-missing-variable-declarations -Wno-padded -Wno-switch-enum -Wno-undefined-func-template -Wno-unused-template -Wno-vla -Wno-weak-template-vtables -Wno-weak-vtables -Wno-thread-safety-negative -Wno-enum-constexpr-conversion -Wno-unsafe-buffer-usage -O2 -g -DNDEBUG -O3 -g -gdwarf-4 -flto=thin -fwhole-program-vtables -isysroot /home/ubuntu/repo/ch4/cmake/darwin/.. /toolchain/darwin-x86_64 -mmacosx-version-min=10.15 -Wl,-headerpad_max_install_names --ld-path=/home/ubuntu/cctools/bin/x86_64-apple-darwin-ld -rdynamic -Wl,-U,_inside_main -flto=thin -fwhole-program-vtables utils/self-extracting-executable/CMakeFiles/pre_compressor.dir/compressor.cpp.o -o utils/self-extracting-executable/pre_compressor contrib/zstd-cmake/lib_zstd.a contrib/libcxx-cmake/libcxx.a contrib/lib cxxabi-cmake/libcxxabi.a -nodefaultlibs -lc -lm -lpthread -ldl && : clang: warning: argument unused during compilation: '-stdlib=libc++' [-Wunused-command-line-argument] ld: warning: ignoring file utils/self-extracting-executable/CMakeFiles/pre_compressor.dir/compressor.cpp.o, building for macOS-x86_64 but attempting to link with file built for unknown-unsupported file format ( 0xDE 0xC0 0x17 0x0B 0x00 0x00 0x00 0x00 0x14 0x00 0x00 0x00 0x88 0x3E 0x03 0x00 ) ld: warning: ignoring file contrib/zstd-cmake/lib_zstd.a, building for macOS-x86_64 but attempting to link with file built for macOS-x86_64 ld: warning: ignoring file contrib/libcxxabi-cmake/libcxxabi.a, building for macOS-x86_64 but attempting to link with file built for unknown-unsupported file format ( 0x21 0x3C 0x61 0x72 0x63 0x68 0x3E 0x0A 0x23 0x31 0x2F 0x31 0x32 0x20 0x20 0x20 ) ld: warning: ignoring file contrib/libcxx-cmake/libcxx.a, building for macOS-x86_64 but attempting to link with file built for unknown-unsupported file format ( 0x21 0x3C 0x61 0x72 0x63 0x68 0x3E 0x0A 0x23 0x31 0x2F 0x31 0x32 0x20 0x20 0x20 ) Undefined symbols for architecture x86_64: "_main", referenced from: implicit entry/start for main executable ld: symbol(s) not found for architecture x86_64 clang: error: linker command failed with exit code 1 (use -v to see invocation) ninja: build stopped: subcommand failed. --- CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 5d6ed75bb296..06ee98b5ee1d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -344,9 +344,9 @@ if (COMPILER_CLANG) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fdiagnostics-absolute-paths") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fdiagnostics-absolute-paths") - if (NOT ENABLE_TESTS AND NOT SANITIZE) + if (NOT ENABLE_TESTS AND NOT SANITIZE AND OS_LINUX) # https://clang.llvm.org/docs/ThinLTO.html - # Applies to clang only. + # Applies to clang and linux only. # Disabled when building with tests or sanitizers. option(ENABLE_THINLTO "Clang-specific link time optimization" ON) endif() From 28332076054cc77660a4dbc3e13dcea1999a6342 Mon Sep 17 00:00:00 2001 From: Dmitry Kardymon Date: Thu, 6 Jul 2023 13:09:49 +0000 Subject: [PATCH 118/179] Edit tests to test last commit --- tests/queries/0_stateless/00301_csv.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/00301_csv.sh b/tests/queries/0_stateless/00301_csv.sh index 776bd39fc03d..80053c99a17a 100755 --- a/tests/queries/0_stateless/00301_csv.sh +++ b/tests/queries/0_stateless/00301_csv.sh @@ -44,7 +44,7 @@ $CLICKHOUSE_CLIENT --query="DROP TABLE csv"; echo === Test ignore extra columns $CLICKHOUSE_CLIENT --query="CREATE TABLE csv (s String, n UInt64 DEFAULT 3, d String DEFAULT 'String4') ENGINE = Memory"; -echo '"Hello", 1, "String1" +echo '"Hello", 1, "String1" "Hello", 2, "String2", "Hello", 3, "String3", "2016-01-13" "Hello", 4, , "2016-01-14" From 7644f0b37c88cd924f20ecec4acc599e50491423 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Thu, 6 Jul 2023 14:44:06 +0000 Subject: [PATCH 119/179] Cosmetics: move code around --- src/IO/VarInt.h | 222 ++++++++++++++++++++++-------------------------- 1 file changed, 102 insertions(+), 120 deletions(-) diff --git a/src/IO/VarInt.h b/src/IO/VarInt.h index f6441391c8fc..a88347d68eb1 100644 --- a/src/IO/VarInt.h +++ b/src/IO/VarInt.h @@ -12,93 +12,93 @@ namespace DB /// Variable-Length Quantity (VLQ) Base-128 compression, also known as Variable Byte (VB) or Varint encoding. -/// Write UInt64 in variable length format (base128) -void writeVarUInt(UInt64 x, std::ostream & ostr); -void writeVarUInt(UInt64 x, WriteBuffer & ostr); -char * writeVarUInt(UInt64 x, char * ostr); - -/// Read UInt64, written in variable length format (base128) -void readVarUInt(UInt64 & x, std::istream & istr); -void readVarUInt(UInt64 & x, ReadBuffer & istr); -const char * readVarUInt(UInt64 & x, const char * istr, size_t size); - -/// Get the length of an variable-length-encoded integer -size_t getLengthOfVarUInt(UInt64 x); -size_t getLengthOfVarInt(Int64 x); - [[noreturn]] void throwReadAfterEOF(); [[noreturn]] void throwValueTooLargeForVarIntEncoding(UInt64 x); -/// Write Int64 in variable length format (base128) -template -inline void writeVarInt(Int64 x, Out & ostr) -{ - writeVarUInt(static_cast((x << 1) ^ (x >> 63)), ostr); -} -inline char * writeVarInt(Int64 x, char * ostr) +/// NOTE: Due to historical reasons, only values up to 1<<63-1 can be safely encoded/decoded (bigger values are not idempotent under +/// encoding/decoding). This cannot be changed without breaking backward compatibility (some drivers, e.g. clickhouse-rs (Rust), have the +/// same limitation, others support the full 1<<64 range, e.g. clickhouse-driver (Python)) +constexpr UInt64 VAR_UINT_MAX = (1ULL<<63) - 1; + +inline void writeVarUInt(UInt64 x, WriteBuffer & ostr) { - return writeVarUInt(static_cast((x << 1) ^ (x >> 63)), ostr); -} + if (x > VAR_UINT_MAX) [[unlikely]] + throwValueTooLargeForVarIntEncoding(x); + for (size_t i = 0; i < 9; ++i) + { + uint8_t byte = x & 0x7F; + if (x > 0x7F) + byte |= 0x80; -/// Read Int64, written in variable length format (base128) -template -inline void readVarInt(Int64 & x, In & istr) -{ - readVarUInt(*reinterpret_cast(&x), istr); - x = (static_cast(x) >> 1) ^ -(x & 1); + ostr.nextIfAtEnd(); + *ostr.position() = byte; + ++ostr.position(); + + x >>= 7; + if (!x) + return; + } } -inline const char * readVarInt(Int64 & x, const char * istr, size_t size) +inline void writeVarUInt(UInt64 x, std::ostream & ostr) { - const char * res = readVarUInt(*reinterpret_cast(&x), istr, size); - x = (static_cast(x) >> 1) ^ -(x & 1); - return res; -} + if (x > VAR_UINT_MAX) [[unlikely]] + throwValueTooLargeForVarIntEncoding(x); + for (size_t i = 0; i < 9; ++i) + { + uint8_t byte = x & 0x7F; + if (x > 0x7F) + byte |= 0x80; -/// For [U]Int32, [U]Int16, size_t. + ostr.put(byte); -inline void readVarUInt(UInt32 & x, ReadBuffer & istr) -{ - UInt64 tmp; - readVarUInt(tmp, istr); - x = static_cast(tmp); + x >>= 7; + if (!x) + return; + } } -inline void readVarInt(Int32 & x, ReadBuffer & istr) +inline char * writeVarUInt(UInt64 x, char * ostr) { - Int64 tmp; - readVarInt(tmp, istr); - x = static_cast(tmp); + if (x > VAR_UINT_MAX) [[unlikely]] + throwValueTooLargeForVarIntEncoding(x); + + for (size_t i = 0; i < 9; ++i) + { + uint8_t byte = x & 0x7F; + if (x > 0x7F) + byte |= 0x80; + + *ostr = byte; + ++ostr; + + x >>= 7; + if (!x) + return ostr; + } + + return ostr; } -inline void readVarUInt(UInt16 & x, ReadBuffer & istr) +template +inline void writeVarInt(Int64 x, Out & ostr) { - UInt64 tmp; - readVarUInt(tmp, istr); - x = tmp; + writeVarUInt(static_cast((x << 1) ^ (x >> 63)), ostr); } -inline void readVarInt(Int16 & x, ReadBuffer & istr) +inline char * writeVarInt(Int64 x, char * ostr) { - Int64 tmp; - readVarInt(tmp, istr); - x = tmp; + return writeVarUInt(static_cast((x << 1) ^ (x >> 63)), ostr); } -template -requires (!std::is_same_v) -inline void readVarUInt(T & x, ReadBuffer & istr) +namespace impl { - UInt64 tmp; - readVarUInt(tmp, istr); - x = tmp; -} template -inline void readVarUIntImpl(UInt64 & x, ReadBuffer & istr) +inline void readVarUInt(UInt64 & x, ReadBuffer & istr) { x = 0; for (size_t i = 0; i < 9; ++i) @@ -116,14 +116,15 @@ inline void readVarUIntImpl(UInt64 & x, ReadBuffer & istr) } } +} + inline void readVarUInt(UInt64 & x, ReadBuffer & istr) { if (istr.buffer().end() - istr.position() >= 9) - return readVarUIntImpl(x, istr); - return readVarUIntImpl(x, istr); + return impl::readVarUInt(x, istr); + return impl::readVarUInt(x, istr); } - inline void readVarUInt(UInt64 & x, std::istream & istr) { x = 0; @@ -158,75 +159,56 @@ inline const char * readVarUInt(UInt64 & x, const char * istr, size_t size) return istr; } -/// NOTE: Due to historical reasons, only values up to 1<<63-1 can be safely encoded/decoded (bigger values are not idempotent under -/// encoding/decoding). This cannot be changed without breaking backward compatibility (some drivers, e.g. clickhouse-rs (Rust), have the -/// same limitation, others support the full 1<<64 range, e.g. clickhouse-driver (Python)) -constexpr UInt64 VAR_UINT_MAX = (1ULL<<63) - 1; - -inline void writeVarUInt(UInt64 x, WriteBuffer & ostr) +template +inline void readVarInt(Int64 & x, In & istr) { - if (x > VAR_UINT_MAX) [[unlikely]] - throwValueTooLargeForVarIntEncoding(x); - - for (size_t i = 0; i < 9; ++i) - { - uint8_t byte = x & 0x7F; - if (x > 0x7F) - byte |= 0x80; - - ostr.nextIfAtEnd(); - *ostr.position() = byte; - ++ostr.position(); - - x >>= 7; - if (!x) - return; - } + readVarUInt(*reinterpret_cast(&x), istr); + x = (static_cast(x) >> 1) ^ -(x & 1); } - -inline void writeVarUInt(UInt64 x, std::ostream & ostr) +inline const char * readVarInt(Int64 & x, const char * istr, size_t size) { - if (x > VAR_UINT_MAX) [[unlikely]] - throwValueTooLargeForVarIntEncoding(x); - - for (size_t i = 0; i < 9; ++i) - { - uint8_t byte = x & 0x7F; - if (x > 0x7F) - byte |= 0x80; - - ostr.put(byte); - - x >>= 7; - if (!x) - return; - } + const char * res = readVarUInt(*reinterpret_cast(&x), istr, size); + x = (static_cast(x) >> 1) ^ -(x & 1); + return res; } - -inline char * writeVarUInt(UInt64 x, char * ostr) +inline void readVarUInt(UInt32 & x, ReadBuffer & istr) { - if (x > VAR_UINT_MAX) [[unlikely]] - throwValueTooLargeForVarIntEncoding(x); - - for (size_t i = 0; i < 9; ++i) - { - uint8_t byte = x & 0x7F; - if (x > 0x7F) - byte |= 0x80; + UInt64 tmp; + readVarUInt(tmp, istr); + x = static_cast(tmp); +} - *ostr = byte; - ++ostr; +inline void readVarInt(Int32 & x, ReadBuffer & istr) +{ + Int64 tmp; + readVarInt(tmp, istr); + x = static_cast(tmp); +} - x >>= 7; - if (!x) - return ostr; - } +inline void readVarUInt(UInt16 & x, ReadBuffer & istr) +{ + UInt64 tmp; + readVarUInt(tmp, istr); + x = tmp; +} - return ostr; +inline void readVarInt(Int16 & x, ReadBuffer & istr) +{ + Int64 tmp; + readVarInt(tmp, istr); + x = tmp; } +template +requires (!std::is_same_v) +inline void readVarUInt(T & x, ReadBuffer & istr) +{ + UInt64 tmp; + readVarUInt(tmp, istr); + x = tmp; +} inline size_t getLengthOfVarUInt(UInt64 x) { From 3f744c1e14ba7350c2dab4a8ccf145c26762f0c3 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Thu, 6 Jul 2023 14:47:40 +0000 Subject: [PATCH 120/179] Cosmetics: rename template parameter --- src/IO/VarInt.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/IO/VarInt.h b/src/IO/VarInt.h index a88347d68eb1..9099b5e7f6ab 100644 --- a/src/IO/VarInt.h +++ b/src/IO/VarInt.h @@ -97,13 +97,13 @@ inline char * writeVarInt(Int64 x, char * ostr) namespace impl { -template +template inline void readVarUInt(UInt64 & x, ReadBuffer & istr) { x = 0; for (size_t i = 0; i < 9; ++i) { - if constexpr (!fast) + if constexpr (check_eof) if (istr.eof()) [[unlikely]] throwReadAfterEOF(); @@ -121,8 +121,8 @@ inline void readVarUInt(UInt64 & x, ReadBuffer & istr) inline void readVarUInt(UInt64 & x, ReadBuffer & istr) { if (istr.buffer().end() - istr.position() >= 9) - return impl::readVarUInt(x, istr); - return impl::readVarUInt(x, istr); + return impl::readVarUInt(x, istr); + return impl::readVarUInt(x, istr); } inline void readVarUInt(UInt64 & x, std::istream & istr) From abf36065b7bbddeba2b80f76ad966a9167852089 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Thu, 6 Jul 2023 17:24:06 +0200 Subject: [PATCH 121/179] fix --- .../ReplicatedMergeTreePartCheckThread.cpp | 89 ++++++++++--------- .../ReplicatedMergeTreePartCheckThread.h | 4 +- src/Storages/StorageReplicatedMergeTree.cpp | 17 +++- .../__init__.py | 0 .../configs/testkeeper.xml | 6 -- .../test.py | 65 -------------- .../02254_projection_broken_part.reference | 6 ++ .../02254_projection_broken_part.sh | 44 +++++++++ 8 files changed, 115 insertions(+), 116 deletions(-) delete mode 100644 tests/integration/test_projection_report_broken_part/__init__.py delete mode 100644 tests/integration/test_projection_report_broken_part/configs/testkeeper.xml delete mode 100644 tests/integration/test_projection_report_broken_part/test.py create mode 100644 tests/queries/0_stateless/02254_projection_broken_part.reference create mode 100755 tests/queries/0_stateless/02254_projection_broken_part.sh diff --git a/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp index 1cc3736bd2e8..ffe3f883f80d 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp @@ -63,6 +63,7 @@ void ReplicatedMergeTreePartCheckThread::enqueuePart(const String & name, time_t if (parts_set.contains(name)) return; + LOG_TRACE(log, "Enqueueing {} for check after after {}s", name, delay_to_check_seconds); parts_queue.emplace_back(name, time(nullptr) + delay_to_check_seconds); parts_set.insert(name); task->schedule(); @@ -423,7 +424,7 @@ ReplicatedCheckResult ReplicatedMergeTreePartCheckThread::checkPartImpl(const St } -CheckResult ReplicatedMergeTreePartCheckThread::checkPartAndFix(const String & part_name) +CheckResult ReplicatedMergeTreePartCheckThread::checkPartAndFix(const String & part_name, std::optional * recheck_after) { LOG_INFO(log, "Checking part {}", part_name); ProfileEvents::increment(ProfileEvents::ReplicatedPartChecks); @@ -438,7 +439,11 @@ CheckResult ReplicatedMergeTreePartCheckThread::checkPartAndFix(const String & p break; case ReplicatedCheckResult::RecheckLater: - enqueuePart(part_name, result.recheck_after); + /// NOTE We cannot enqueue it from the check thread itself + if (recheck_after) + *recheck_after = result.recheck_after; + else + enqueuePart(part_name, result.recheck_after); break; case ReplicatedCheckResult::DetachUnexpected: @@ -471,10 +476,22 @@ CheckResult ReplicatedMergeTreePartCheckThread::checkPartAndFix(const String & p /// Part is not in ZooKeeper and not on disk (so there's nothing to detach or remove from ZooKeeper). /// Probably we cannot execute some entry from the replication queue (so don't need to enqueue another one). - /// Either all replicas having the part are not active, or the part is lost forever. + /// Either all replicas having the part are not active... bool found_something = searchForMissingPartOnOtherReplicas(part_name); - if (!found_something) - onPartIsLostForever(part_name); + if (found_something) + break; + + /// ... or the part is lost forever + bool handled_lost_part = onPartIsLostForever(part_name); + if (handled_lost_part) + break; + + /// We failed to create empty part, need retry + constexpr time_t retry_after_seconds = 30; + if (recheck_after) + *recheck_after = retry_after_seconds; + else + enqueuePart(part_name, retry_after_seconds); break; } @@ -483,7 +500,7 @@ CheckResult ReplicatedMergeTreePartCheckThread::checkPartAndFix(const String & p return result.status; } -void ReplicatedMergeTreePartCheckThread::onPartIsLostForever(const String & part_name) +bool ReplicatedMergeTreePartCheckThread::onPartIsLostForever(const String & part_name) { auto lost_part_info = MergeTreePartInfo::fromPartName(part_name, storage.format_version); if (lost_part_info.level != 0 || lost_part_info.mutation != 0) @@ -499,7 +516,7 @@ void ReplicatedMergeTreePartCheckThread::onPartIsLostForever(const String & part for (const String & source_part_name : source_parts) enqueuePart(source_part_name); - return; + return true; } } @@ -512,13 +529,11 @@ void ReplicatedMergeTreePartCheckThread::onPartIsLostForever(const String & part */ LOG_ERROR(log, "Part {} is lost forever.", part_name); ProfileEvents::increment(ProfileEvents::ReplicatedDataLoss); + return true; } - else - { - LOG_WARNING(log, "Cannot create empty part {} instead of lost. Will retry later", part_name); - constexpr time_t retry_after_seconds = 30; - enqueuePart(part_name, retry_after_seconds); - } + + LOG_WARNING(log, "Cannot create empty part {} instead of lost. Will retry later", part_name); + return false; } @@ -533,42 +548,29 @@ void ReplicatedMergeTreePartCheckThread::run() /// Take part from the queue for verification. PartsToCheckQueue::iterator selected = parts_queue.end(); /// end from std::list is not get invalidated - time_t min_check_time = std::numeric_limits::max(); { std::lock_guard lock(parts_mutex); - if (parts_queue.empty()) + if (parts_queue.empty() && !parts_set.empty()) { - if (!parts_set.empty()) - { - parts_set.clear(); - throw Exception(ErrorCodes::LOGICAL_ERROR, "Non-empty parts_set with empty parts_queue. This is a bug."); - } + parts_set.clear(); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Non-empty parts_set with empty parts_queue. This is a bug."); } - else + + selected = std::find_if(parts_queue.begin(), parts_queue.end(), [current_time](const auto & elem) { - for (auto it = parts_queue.begin(); it != parts_queue.end(); ++it) - { - if (it->second <= current_time) - { - selected = it; - break; - } - - if (it->second < min_check_time) - { - min_check_time = it->second; - selected = it; - } - } - } - } + return elem.second <= current_time; + }); + if (selected == parts_queue.end()) + return; - if (selected == parts_queue.end()) - return; + /// Move selected part to the end of the queue + parts_queue.splice(parts_queue.end(), parts_queue, selected); + } - checkPartAndFix(selected->first); + std::optional recheck_after; + checkPartAndFix(selected->first, &recheck_after); if (need_stop) return; @@ -581,6 +583,11 @@ void ReplicatedMergeTreePartCheckThread::run() { throw Exception(ErrorCodes::LOGICAL_ERROR, "Someone erased checking part from parts_queue. This is a bug."); } + else if (recheck_after.has_value()) + { + LOG_TRACE(log, "Will recheck part {} after after {}s", selected->first, *recheck_after); + selected->second = time(nullptr) + *recheck_after; + } else { parts_set.erase(selected->first); @@ -596,7 +603,7 @@ void ReplicatedMergeTreePartCheckThread::run() { tryLogCurrentException(log, __PRETTY_FUNCTION__); - if (e.code == Coordination::Error::ZSESSIONEXPIRED) + if (Coordination::isHardwareError(e.code)) return; task->scheduleAfter(PART_CHECK_ERROR_SLEEP_MS); diff --git a/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.h b/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.h index 0a8fbc75c059..fc76cbad4ed4 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.h @@ -65,7 +65,7 @@ class ReplicatedMergeTreePartCheckThread size_t size() const; /// Check part by name - CheckResult checkPartAndFix(const String & part_name); + CheckResult checkPartAndFix(const String & part_name, std::optional * recheck_after = nullptr); ReplicatedCheckResult checkPartImpl(const String & part_name); @@ -77,7 +77,7 @@ class ReplicatedMergeTreePartCheckThread private: void run(); - void onPartIsLostForever(const String & part_name); + bool onPartIsLostForever(const String & part_name); std::pair findLocalPart(const String & part_name); diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index ff319e47946d..e8176ac1d5f4 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -3366,6 +3366,10 @@ bool StorageReplicatedMergeTree::canExecuteFetch(const ReplicatedMergeTreeLogEnt { disable_reason = fmt::format("Not executing fetch of part {} because we still have broken part with that name. " "Waiting for the broken part to be removed first.", entry.new_part_name); + + constexpr time_t min_interval_to_wakeup_cleanup_s = 30; + if (entry.last_postpone_time + min_interval_to_wakeup_cleanup_s < time(nullptr)) + const_cast(this)->cleanup_thread.wakeup(); return false; } } @@ -3753,11 +3757,13 @@ void StorageReplicatedMergeTree::removePartAndEnqueueFetch(const String & part_n DataPartPtr broken_part; auto outdate_broken_part = [this, &broken_part]() { - if (broken_part) + if (!broken_part) return; DataPartsLock lock = lockParts(); if (broken_part->getState() == DataPartState::Active) removePartsFromWorkingSet(NO_TRANSACTION_RAW, {broken_part}, true, &lock); + broken_part.reset(); + cleanup_thread.wakeup(); }; /// We don't know exactly what happened to broken part @@ -3767,6 +3773,7 @@ void StorageReplicatedMergeTree::removePartAndEnqueueFetch(const String & part_n auto partition_range = getDataPartsVectorInPartitionForInternalUsage({MergeTreeDataPartState::Active, MergeTreeDataPartState::Outdated}, broken_part_info.partition_id); + Strings detached_parts; for (const auto & part : partition_range) { if (!broken_part_info.contains(part->info)) @@ -3784,7 +3791,9 @@ void StorageReplicatedMergeTree::removePartAndEnqueueFetch(const String & part_n { part->makeCloneInDetached("covered-by-broken", getInMemoryMetadataPtr()); } + detached_parts.push_back(part->name); } + LOG_WARNING(log, "Detached {} parts covered by broken part {}: {}", detached_parts.size(), part_name, fmt::join(detached_parts, ", ")); ThreadFuzzer::maybeInjectSleep(); ThreadFuzzer::maybeInjectMemoryLimitException(); @@ -3873,10 +3882,14 @@ void StorageReplicatedMergeTree::removePartAndEnqueueFetch(const String & part_n zkutil::KeeperMultiException::check(rc, ops, results); + String path_created = dynamic_cast(*results.back()).path_created; + log_entry->znode_name = path_created.substr(path_created.find_last_of('/') + 1); + LOG_DEBUG(log, "Created entry {} to fetch missing part {}", log_entry->znode_name, part_name); + queue.insert(zookeeper, log_entry); + /// Make the part outdated after creating the log entry. /// Otherwise, if we failed to create the entry, cleanup thread could remove the part from ZooKeeper (leading to diverged replicas) outdate_broken_part(); - queue_updating_task->schedule(); return; } } diff --git a/tests/integration/test_projection_report_broken_part/__init__.py b/tests/integration/test_projection_report_broken_part/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/tests/integration/test_projection_report_broken_part/configs/testkeeper.xml b/tests/integration/test_projection_report_broken_part/configs/testkeeper.xml deleted file mode 100644 index 617371b13fa3..000000000000 --- a/tests/integration/test_projection_report_broken_part/configs/testkeeper.xml +++ /dev/null @@ -1,6 +0,0 @@ - - - - testkeeper - - diff --git a/tests/integration/test_projection_report_broken_part/test.py b/tests/integration/test_projection_report_broken_part/test.py deleted file mode 100644 index f376adf4f1a8..000000000000 --- a/tests/integration/test_projection_report_broken_part/test.py +++ /dev/null @@ -1,65 +0,0 @@ -# pylint: disable=unused-argument -# pylint: disable=redefined-outer-name -# pylint: disable=line-too-long - -import pytest -import time - -from helpers.client import QueryRuntimeException -from helpers.cluster import ClickHouseCluster - -cluster = ClickHouseCluster(__file__) -node = cluster.add_instance( - "node", - main_configs=[ - "configs/testkeeper.xml", - ], -) - - -@pytest.fixture(scope="module", autouse=True) -def start_cluster(): - try: - cluster.start() - yield cluster - finally: - cluster.shutdown() - - -def test_projection_broken_part(): - node.query( - """ - create table test_projection_broken_parts_1 (a int, b int, projection ab (select a, sum(b) group by a)) - engine = ReplicatedMergeTree('/clickhouse-tables/test_projection_broken_parts', 'r1') - order by a settings index_granularity = 1; - - create table test_projection_broken_parts_2 (a int, b int, projection ab (select a, sum(b) group by a)) - engine ReplicatedMergeTree('/clickhouse-tables/test_projection_broken_parts', 'r2') - order by a settings index_granularity = 1; - - insert into test_projection_broken_parts_1 values (1, 1), (1, 2), (1, 3); - - system sync replica test_projection_broken_parts_2; - """ - ) - - # break projection part - node.exec_in_container( - [ - "bash", - "-c", - "rm /var/lib/clickhouse/data/default/test_projection_broken_parts_1/all_0_0_0/ab.proj/data.bin", - ] - ) - - expected_error = "No such file or directory" - assert expected_error in node.query_and_get_error( - "select sum(b) from test_projection_broken_parts_1 group by a" - ) - - time.sleep(2) - - assert ( - int(node.query("select sum(b) from test_projection_broken_parts_1 group by a")) - == 6 - ) diff --git a/tests/queries/0_stateless/02254_projection_broken_part.reference b/tests/queries/0_stateless/02254_projection_broken_part.reference new file mode 100644 index 000000000000..68538fd31eaf --- /dev/null +++ b/tests/queries/0_stateless/02254_projection_broken_part.reference @@ -0,0 +1,6 @@ +1 1 1 all_0_0_0 +1 1 2 all_0_0_0 +1 1 3 all_0_0_0 +2 6 +0 +5 6 diff --git a/tests/queries/0_stateless/02254_projection_broken_part.sh b/tests/queries/0_stateless/02254_projection_broken_part.sh new file mode 100755 index 000000000000..d276c67f8de2 --- /dev/null +++ b/tests/queries/0_stateless/02254_projection_broken_part.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env bash +# Tags: long, zookeeper + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +$CLICKHOUSE_CLIENT -q "drop table if exists projection_broken_parts_1 sync;" +$CLICKHOUSE_CLIENT -q "drop table if exists projection_broken_parts_1 sync;" + +$CLICKHOUSE_CLIENT -q "create table projection_broken_parts_1 (a int, b int, projection ab (select a, sum(b) group by a)) + engine = ReplicatedMergeTree('/test/02369/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/rmt', 'r1') + order by a settings index_granularity = 1;" + +$CLICKHOUSE_CLIENT -q "create table projection_broken_parts_2 (a int, b int, projection ab (select a, sum(b) group by a)) + engine = ReplicatedMergeTree('/test/02369/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/rmt', 'r2') + order by a settings index_granularity = 1;" + +$CLICKHOUSE_CLIENT --insert_keeper_fault_injection_probability=0 -q "insert into projection_broken_parts_1 values (1, 1), (1, 2), (1, 3);" +$CLICKHOUSE_CLIENT -q "system sync replica projection_broken_parts_2;" +$CLICKHOUSE_CLIENT -q "select 1, *, _part from projection_broken_parts_2 order by b;" +$CLICKHOUSE_CLIENT -q "select 2, sum(b) from projection_broken_parts_2 group by a;" + +path=$($CLICKHOUSE_CLIENT -q "select path from system.parts where database='$CLICKHOUSE_DATABASE' and table='projection_broken_parts_1' and name='all_0_0_0'") +# ensure that path is absolute before removing +$CLICKHOUSE_CLIENT -q "select throwIf(substring('$path', 1, 1) != '/', 'Path is relative: $path')" || exit +rm -f "$path/ab.proj/data.bin" + +$CLICKHOUSE_CLIENT -q "select 3, sum(b) from projection_broken_parts_1 group by a;" 2>/dev/null + +num_tries=0 +while ! $CLICKHOUSE_CLIENT -q "select 4, sum(b) from projection_broken_parts_1 group by a format Null;" 2>/dev/null; do + sleep 1; + num_tries=$((num_tries+1)) + if [ $num_tries -eq 60 ]; then + break + fi +done + +$CLICKHOUSE_CLIENT -q "system sync replica projection_broken_parts_1;" +$CLICKHOUSE_CLIENT -q "select 5, sum(b) from projection_broken_parts_1 group by a;" + +$CLICKHOUSE_CLIENT -q "drop table if exists projection_broken_parts_1 sync;" +$CLICKHOUSE_CLIENT -q "drop table if exists projection_broken_parts_1 sync;" From 63b9c1ac0670947b49a916b5b6e47cab1dd1d3d0 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Thu, 6 Jul 2023 18:58:13 +0200 Subject: [PATCH 122/179] add test --- src/Common/ProfileEvents.cpp | 2 ++ src/IO/ReadWriteBufferFromHTTP.cpp | 6 ++++ ...ing_from_s3_with_connection_pool.reference | 1 + ...89_reading_from_s3_with_connection_pool.sh | 32 ++++++++++++++++++- 4 files changed, 40 insertions(+), 1 deletion(-) diff --git a/src/Common/ProfileEvents.cpp b/src/Common/ProfileEvents.cpp index c9030070bf2f..3bee12731aac 100644 --- a/src/Common/ProfileEvents.cpp +++ b/src/Common/ProfileEvents.cpp @@ -371,6 +371,8 @@ The server successfully detected this situation and will download merged part fr M(ReadBufferFromS3ResetSessions, "Number of HTTP sessions that were reset in ReadBufferFromS3.") \ M(ReadBufferFromS3PreservedSessions, "Number of HTTP sessions that were preserved in ReadBufferFromS3.") \ \ + M(ReadWriteBufferFromHTTPPreservedSessions, "Number of HTTP sessions that were preserved in ReadWriteBufferFromHTTP.") \ + \ M(WriteBufferFromS3Microseconds, "Time spent on writing to S3.") \ M(WriteBufferFromS3Bytes, "Bytes written to S3.") \ M(WriteBufferFromS3RequestsErrors, "Number of exceptions while writing to S3.") \ diff --git a/src/IO/ReadWriteBufferFromHTTP.cpp b/src/IO/ReadWriteBufferFromHTTP.cpp index b834c17ab6cf..6d1c0f7aafa1 100644 --- a/src/IO/ReadWriteBufferFromHTTP.cpp +++ b/src/IO/ReadWriteBufferFromHTTP.cpp @@ -5,6 +5,7 @@ namespace ProfileEvents { extern const Event ReadBufferSeekCancelConnection; +extern const Event ReadWriteBufferFromHTTPPreservedSessions; } namespace DB @@ -442,6 +443,7 @@ bool ReadWriteBufferFromHTTPBase::nextImpl() { /// Response was fully read. markSessionForReuse(session->getSession()); + ProfileEvents::increment(ProfileEvents::ReadWriteBufferFromHTTPPreservedSessions); return false; } @@ -568,6 +570,7 @@ bool ReadWriteBufferFromHTTPBase::nextImpl() { /// Eof is reached, i.e response was fully read. markSessionForReuse(session->getSession()); + ProfileEvents::increment(ProfileEvents::ReadWriteBufferFromHTTPPreservedSessions); return false; } @@ -623,8 +626,11 @@ size_t ReadWriteBufferFromHTTPBase::readBigAt(char * to, si size_t r = copyFromIStreamWithProgressCallback(*result_istr, to, n, progress_callback, &cancelled); if (!cancelled) + { /// Response was fully read. markSessionForReuse(sess); + ProfileEvents::increment(ProfileEvents::ReadWriteBufferFromHTTPPreservedSessions); + } return r; } diff --git a/tests/queries/0_stateless/02789_reading_from_s3_with_connection_pool.reference b/tests/queries/0_stateless/02789_reading_from_s3_with_connection_pool.reference index d00491fd7e5b..6ed281c757a9 100644 --- a/tests/queries/0_stateless/02789_reading_from_s3_with_connection_pool.reference +++ b/tests/queries/0_stateless/02789_reading_from_s3_with_connection_pool.reference @@ -1 +1,2 @@ 1 +1 diff --git a/tests/queries/0_stateless/02789_reading_from_s3_with_connection_pool.sh b/tests/queries/0_stateless/02789_reading_from_s3_with_connection_pool.sh index 7a8b94a10a89..ce90157d0049 100755 --- a/tests/queries/0_stateless/02789_reading_from_s3_with_connection_pool.sh +++ b/tests/queries/0_stateless/02789_reading_from_s3_with_connection_pool.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: no-fasttest, no-random-settings +# Tags: no-fasttest, no-random-settings, no-replicated-database CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh @@ -27,3 +27,33 @@ WHERE type = 'QueryFinish' AND current_database = currentDatabase() AND query_id='$query_id'; " + + +# Test connection pool in ReadWriteBufferFromHTTP + +query_id=$(${CLICKHOUSE_CLIENT} -nq " +create table mut (n int, m int, k int) engine=ReplicatedMergeTree('/test/02441/{database}/mut', '1') order by n; +set insert_keeper_fault_injection_probability=0; +insert into mut values (1, 2, 3), (10, 20, 30); + +system stop merges mut; +alter table mut delete where n = 10; + +select queryID() from( + -- a funny way to wait for a MUTATE_PART to be assigned + select sleepEachRow(2) from url('http://localhost:8123/?param_tries={1..10}&query=' || encodeURLComponent( + 'select 1 where ''MUTATE_PART'' not in (select type from system.replication_queue where database=''' || currentDatabase() || ''' and table=''mut'')' + ), 'LineAsString', 's String') + -- queryID() will be returned for each row, since the query above doesn't return anything we need to return a fake row + union all + select 1 +) limit 1 settings max_threads=1; +" 2>&1) +${CLICKHOUSE_CLIENT} --query "SYSTEM FLUSH LOGS" +${CLICKHOUSE_CLIENT} -nm --query " +SELECT ProfileEvents['ReadWriteBufferFromHTTPPreservedSessions'] > 0 +FROM system.query_log +WHERE type = 'QueryFinish' + AND current_database = currentDatabase() + AND query_id='$query_id'; +" From 45579417642c44956ebe329b5412bcbb48809d72 Mon Sep 17 00:00:00 2001 From: Yatsishin Ilya <2159081+qoega@users.noreply.github.com> Date: Thu, 6 Jul 2023 17:03:43 +0000 Subject: [PATCH 123/179] black --- tests/sqllogic/connection.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/sqllogic/connection.py b/tests/sqllogic/connection.py index 5e2634787d84..d71cc005d093 100644 --- a/tests/sqllogic/connection.py +++ b/tests/sqllogic/connection.py @@ -62,7 +62,7 @@ def default_clickhouse_odbc_conn_str(): return str( OdbcConnectingArgs.create_from_kw( dsn="ClickHouse DSN (ANSI)", - Url="http://localhost:8123/query?default_format=ODBCDriver2&default_table_engine=MergeTree" + Url="http://localhost:8123/query?default_format=ODBCDriver2&default_table_engine=MergeTree", ) ) From 58793816a73b7b17eb72c35f0266276bc40507b4 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Thu, 6 Jul 2023 19:04:34 +0200 Subject: [PATCH 124/179] fix paranoid check --- src/Storages/MergeTree/MergeTreeData.cpp | 2 +- .../MergeTree/ReplicatedMergeTreeQueue.cpp | 3 ++ src/Storages/StorageReplicatedMergeTree.cpp | 15 ++++--- src/Storages/StorageReplicatedMergeTree.h | 2 + .../02254_projection_broken_part.sh | 4 +- ...2255_broken_parts_chain_on_start.reference | 8 ++++ .../02255_broken_parts_chain_on_start.sh | 43 +++++++++++++++++++ 7 files changed, 68 insertions(+), 9 deletions(-) create mode 100644 tests/queries/0_stateless/02255_broken_parts_chain_on_start.reference create mode 100755 tests/queries/0_stateless/02255_broken_parts_chain_on_start.sh diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 4dc3583c7063..b7fde55880e3 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -2651,7 +2651,7 @@ size_t MergeTreeData::clearOldBrokenPartsFromDetachedDirectory() for (auto & [old_name, new_name, disk] : renamed_parts.old_and_new_names) { removeDetachedPart(disk, fs::path(relative_data_path) / "detached" / new_name / "", old_name); - LOG_DEBUG(log, "Removed broken detached part {} due to a timeout for broken detached parts", old_name); + LOG_WARNING(log, "Removed broken detached part {} due to a timeout for broken detached parts", old_name); old_name.clear(); } diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp index 792843cbe18a..07f46c07466f 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp @@ -218,6 +218,9 @@ void ReplicatedMergeTreeQueue::createLogEntriesToFetchBrokenParts() for (const auto & broken_part_name : broken_parts) storage.removePartAndEnqueueFetch(broken_part_name, /* storage_init = */true); + Strings parts_in_zk = storage.getZooKeeper()->getChildren(replica_path + "/parts"); + storage.paranoidCheckForCoveredPartsInZooKeeperOnStart(parts_in_zk, {}); + std::lock_guard lock(state_mutex); /// broken_parts_to_enqueue_fetches_on_loading can be assigned only once on table startup, /// so actually no race conditions are possible diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index e8176ac1d5f4..2da18f69bafb 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -1260,8 +1260,7 @@ static time_t tryGetPartCreateTime(zkutil::ZooKeeperPtr & zookeeper, const Strin return res; } -static void paranoidCheckForCoveredPartsInZooKeeperOnStart(const StorageReplicatedMergeTree * storage, const Strings & parts_in_zk, - MergeTreeDataFormatVersion format_version, Poco::Logger * log) +void StorageReplicatedMergeTree::paranoidCheckForCoveredPartsInZooKeeperOnStart(const Strings & parts_in_zk, const Strings & parts_to_fetch) const { #ifdef ABORT_ON_LOGICAL_ERROR constexpr bool paranoid_check_for_covered_parts_default = true; @@ -1275,15 +1274,15 @@ static void paranoidCheckForCoveredPartsInZooKeeperOnStart(const StorageReplicat return; /// FIXME https://github.com/ClickHouse/ClickHouse/issues/51182 - if (storage->getSettings()->use_metadata_cache) + if (getSettings()->use_metadata_cache) return; ActiveDataPartSet active_set(format_version); for (const auto & part_name : parts_in_zk) active_set.add(part_name); - const auto disks = storage->getStoragePolicy()->getDisks(); - auto path = storage->getRelativeDataPath(); + const auto disks = getStoragePolicy()->getDisks(); + auto path = getRelativeDataPath(); for (const auto & part_name : parts_in_zk) { @@ -1296,6 +1295,9 @@ static void paranoidCheckForCoveredPartsInZooKeeperOnStart(const StorageReplicat if (disk->exists(fs::path(path) / part_name)) found = true; + if (!found) + found = std::find(parts_to_fetch.begin(), parts_to_fetch.end(), part_name) != parts_to_fetch.end(); + if (!found) { LOG_WARNING(log, "Part {} exists in ZooKeeper and covered by another part in ZooKeeper ({}), but doesn't exist on any disk. " @@ -1310,7 +1312,6 @@ void StorageReplicatedMergeTree::checkParts(bool skip_sanity_checks) auto zookeeper = getZooKeeper(); Strings expected_parts_vec = zookeeper->getChildren(fs::path(replica_path) / "parts"); - paranoidCheckForCoveredPartsInZooKeeperOnStart(this, expected_parts_vec, format_version, log); /// Parts in ZK. NameSet expected_parts(expected_parts_vec.begin(), expected_parts_vec.end()); @@ -1345,6 +1346,8 @@ void StorageReplicatedMergeTree::checkParts(bool skip_sanity_checks) if (!getActiveContainingPart(missing_name)) parts_to_fetch.push_back(missing_name); + paranoidCheckForCoveredPartsInZooKeeperOnStart(expected_parts_vec, parts_to_fetch); + /** To check the adequacy, for the parts that are in the FS, but not in ZK, we will only consider not the most recent parts. * Because unexpected new parts usually arise only because they did not have time to enroll in ZK with a rough restart of the server. * It also occurs from deduplicated parts that did not have time to retire. diff --git a/src/Storages/StorageReplicatedMergeTree.h b/src/Storages/StorageReplicatedMergeTree.h index bdd3f0da5bff..72a022fce267 100644 --- a/src/Storages/StorageReplicatedMergeTree.h +++ b/src/Storages/StorageReplicatedMergeTree.h @@ -584,6 +584,8 @@ class StorageReplicatedMergeTree final : public MergeTreeData void forcefullyRemoveBrokenOutdatedPartFromZooKeeperBeforeDetaching(const String & part_name) override; + void paranoidCheckForCoveredPartsInZooKeeperOnStart(const Strings & parts_in_zk, const Strings & parts_to_fetch) const; + /// Removes a part from ZooKeeper and adds a task to the queue to download it. It is supposed to do this with broken parts. void removePartAndEnqueueFetch(const String & part_name, bool storage_init); diff --git a/tests/queries/0_stateless/02254_projection_broken_part.sh b/tests/queries/0_stateless/02254_projection_broken_part.sh index d276c67f8de2..6ba5093f234e 100755 --- a/tests/queries/0_stateless/02254_projection_broken_part.sh +++ b/tests/queries/0_stateless/02254_projection_broken_part.sh @@ -9,11 +9,11 @@ $CLICKHOUSE_CLIENT -q "drop table if exists projection_broken_parts_1 sync;" $CLICKHOUSE_CLIENT -q "drop table if exists projection_broken_parts_1 sync;" $CLICKHOUSE_CLIENT -q "create table projection_broken_parts_1 (a int, b int, projection ab (select a, sum(b) group by a)) - engine = ReplicatedMergeTree('/test/02369/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/rmt', 'r1') + engine = ReplicatedMergeTree('/test/02254/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/rmt', 'r1') order by a settings index_granularity = 1;" $CLICKHOUSE_CLIENT -q "create table projection_broken_parts_2 (a int, b int, projection ab (select a, sum(b) group by a)) - engine = ReplicatedMergeTree('/test/02369/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/rmt', 'r2') + engine = ReplicatedMergeTree('/test/02254/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/rmt', 'r2') order by a settings index_granularity = 1;" $CLICKHOUSE_CLIENT --insert_keeper_fault_injection_probability=0 -q "insert into projection_broken_parts_1 values (1, 1), (1, 2), (1, 3);" diff --git a/tests/queries/0_stateless/02255_broken_parts_chain_on_start.reference b/tests/queries/0_stateless/02255_broken_parts_chain_on_start.reference new file mode 100644 index 000000000000..d55cb5baf936 --- /dev/null +++ b/tests/queries/0_stateless/02255_broken_parts_chain_on_start.reference @@ -0,0 +1,8 @@ +1 1 10 all_0_0_0_1 +1 1 20 all_0_0_0_1 +1 1 30 all_0_0_0_1 +0 +0 +1 1 10 all_0_0_0_1 +1 1 20 all_0_0_0_1 +1 1 30 all_0_0_0_1 diff --git a/tests/queries/0_stateless/02255_broken_parts_chain_on_start.sh b/tests/queries/0_stateless/02255_broken_parts_chain_on_start.sh new file mode 100755 index 000000000000..de260937b9cb --- /dev/null +++ b/tests/queries/0_stateless/02255_broken_parts_chain_on_start.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash +# Tags: long, zookeeper + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +$CLICKHOUSE_CLIENT -q "drop table if exists rmt1 sync;" +$CLICKHOUSE_CLIENT -q "drop table if exists rmt2 sync;" + +$CLICKHOUSE_CLIENT -q "create table rmt1 (a int, b int) + engine = ReplicatedMergeTree('/test/02255/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/rmt', 'r1') order by a settings old_parts_lifetime=100500;" + +$CLICKHOUSE_CLIENT -q "create table rmt2 (a int, b int) + engine = ReplicatedMergeTree('/test/02255/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/rmt', 'r2') order by a settings old_parts_lifetime=100500;" + +$CLICKHOUSE_CLIENT --insert_keeper_fault_injection_probability=0 -q "insert into rmt1 values (1, 1), (1, 2), (1, 3);" +$CLICKHOUSE_CLIENT -q "alter table rmt1 update b = b*10 where 1 settings mutations_sync=1" +$CLICKHOUSE_CLIENT -q "system sync replica rmt2;" +$CLICKHOUSE_CLIENT -q "select 1, *, _part from rmt2 order by b;" + +path=$($CLICKHOUSE_CLIENT -q "select path from system.parts where database='$CLICKHOUSE_DATABASE' and table='rmt1' and name='all_0_0_0'") +# ensure that path is absolute before removing +$CLICKHOUSE_CLIENT -q "select throwIf(substring('$path', 1, 1) != '/', 'Path is relative: $path')" || exit +rm -f "$path/data.bin" + +path=$($CLICKHOUSE_CLIENT -q "select path from system.parts where database='$CLICKHOUSE_DATABASE' and table='rmt1' and name='all_0_0_0_1'") +# ensure that path is absolute before removing +$CLICKHOUSE_CLIENT -q "select throwIf(substring('$path', 1, 1) != '/', 'Path is relative: $path')" || exit +rm -f "$path/data.bin" + +$CLICKHOUSE_CLIENT -q "detach table rmt1 sync" +$CLICKHOUSE_CLIENT -q "attach table rmt1" 2>/dev/null + +$CLICKHOUSE_CLIENT -q "system sync replica rmt1;" +$CLICKHOUSE_CLIENT -q "select 1, *, _part from rmt1 order by b;" + +$CLICKHOUSE_CLIENT -q "truncate table rmt1" + +$CLICKHOUSE_CLIENT -q "SELECT table, lost_part_count FROM system.replicas WHERE database=currentDatabase() AND lost_part_count!=0"; + +$CLICKHOUSE_CLIENT -q "drop table if exists projection_broken_parts_1 sync;" +$CLICKHOUSE_CLIENT -q "drop table if exists projection_broken_parts_1 sync;" From 9c12994d942e48c112e9392738c561582f10bb0a Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Thu, 6 Jul 2023 19:49:22 +0200 Subject: [PATCH 125/179] initialize SeriesRecords for LogSeriesLimiter lazy --- src/Common/LoggingFormatStringHelpers.cpp | 29 +++++++++++------------ src/Common/LoggingFormatStringHelpers.h | 12 ++++++---- 2 files changed, 22 insertions(+), 19 deletions(-) diff --git a/src/Common/LoggingFormatStringHelpers.cpp b/src/Common/LoggingFormatStringHelpers.cpp index ed578018d5f9..074c8dd28039 100644 --- a/src/Common/LoggingFormatStringHelpers.cpp +++ b/src/Common/LoggingFormatStringHelpers.cpp @@ -77,9 +77,8 @@ void LogFrequencyLimiterIml::cleanup(time_t too_old_threshold_s) } -std::unordered_map> LogSeriesLimiter::series_settings; -std::unordered_map> LogSeriesLimiter::series_loggers; std::mutex LogSeriesLimiter::mutex; +time_t LogSeriesLimiter::last_cleanup = 0; LogSeriesLimiter::LogSeriesLimiter(Poco::Logger * logger_, size_t allowed_count_, time_t interval_s_) : logger(logger_) @@ -101,33 +100,33 @@ LogSeriesLimiter::LogSeriesLimiter(Poco::Logger * logger_, size_t allowed_count_ std::lock_guard lock(mutex); - if (series_settings.contains(name_hash)) - { - auto & settings = series_settings[name_hash]; - auto & [allowed_count, interval_s] = settings; - chassert(allowed_count_ == allowed_count); - chassert(interval_s_ == interval_s); - } - else + if (last_cleanup == 0) + last_cleanup = now; + + auto & series_records = getSeriesRecords(); + + static const time_t cleanup_delay_s = 600; + if (last_cleanup + cleanup_delay_s >= now) { - series_settings[name_hash] = std::make_tuple(allowed_count_, interval_s_); + time_t old = now - cleanup_delay_s; + std::erase_if(series_records, [old](const auto & elem) { return get<0>(elem.second) < old; }); + last_cleanup = now; } auto register_as_first = [&] () TSA_REQUIRES(mutex) { assert(allowed_count_ > 0); accepted = true; - series_loggers[name_hash] = std::make_tuple(now, 1, 1); + series_records[name_hash] = std::make_tuple(now, 1, 1); }; - - if (!series_loggers.contains(name_hash)) + if (!series_records.contains(name_hash)) { register_as_first(); return; } - auto & [last_time, accepted_count, total_count] = series_loggers[name_hash]; + auto & [last_time, accepted_count, total_count] = series_records[name_hash]; if (last_time + interval_s_ <= now) { debug_message = fmt::format( diff --git a/src/Common/LoggingFormatStringHelpers.h b/src/Common/LoggingFormatStringHelpers.h index 82c260e52a68..3afa3fb089da 100644 --- a/src/Common/LoggingFormatStringHelpers.h +++ b/src/Common/LoggingFormatStringHelpers.h @@ -199,12 +199,16 @@ class LogFrequencyLimiterIml class LogSeriesLimiter { static std::mutex mutex; - - /// Hash(logger_name) -> (allowed_count, interval_s) - static std::unordered_map> series_settings TSA_GUARDED_BY(mutex); + static time_t last_cleanup; /// Hash(logger_name) -> (last_logged_time_s, accepted, muted) - static std::unordered_map> series_loggers TSA_GUARDED_BY(mutex); + using SeriesRecords = std::unordered_map>; + + static SeriesRecords & getSeriesRecords() TSA_REQUIRES(mutex) + { + static SeriesRecords records; + return records; + } Poco::Logger * logger = nullptr; bool accepted = false; From ec5e26a017c39eb4d76a1b07e4083cc53a225a5d Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Thu, 6 Jul 2023 21:08:53 +0200 Subject: [PATCH 126/179] Pin rust nightly (to make it stable) Because of using Rust nightly, and without #49601 the Rust toolchain is very unstable, and can be frequently failed. So let's ping particular version. Also I've looked and it seems that Rust archives stores this archive without any TTL, since there is even a version for 2015 year. Follow-up for: #50541 Signed-off-by: Azat Khuzhin --- docker/packager/binary/Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/packager/binary/Dockerfile b/docker/packager/binary/Dockerfile index e824161a688e..897bcd24d047 100644 --- a/docker/packager/binary/Dockerfile +++ b/docker/packager/binary/Dockerfile @@ -49,8 +49,8 @@ ENV CARGO_HOME=/rust/cargo ENV PATH="/rust/cargo/bin:${PATH}" RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ chmod 777 -R /rust && \ - rustup toolchain install nightly && \ - rustup default nightly && \ + rustup toolchain install nightly-2023-07-04 && \ + rustup default nightly-2023-07-04 && \ rustup component add rust-src && \ rustup target add aarch64-unknown-linux-gnu && \ rustup target add x86_64-apple-darwin && \ From 271297823ae6abe82908220d1a540fbf0113f4d8 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Thu, 6 Jul 2023 14:56:05 +0000 Subject: [PATCH 127/179] Allow var-int encoded 64-bit integers with MSB=1 Resolves: #51486 Until now, it was illegal to encode 64-bit (unsigned) integers with MSB=1, i.e. values > (1ULL<<63) - 1, as var-int. In more detail, the var-int code used by ClickHouse server and client spent at most 9 bytes per value such that 9 * 7 = 63 bits could be encoded. Some 3rd party clients (e.g. Rust clickhouse-rs) had the same limitation, whereas other clients understand the full range (Python clickhouse-driver). PRs #47608 and #48628 added sanity checks as asserts or exceptions during var-int encoding on the server side. This was considered okay as such huge integers so far occurred only during testing (usually fuzzing) but not in practice. Issue #51486 is a new fuzzing issue where the exception thrown from the sanity check led to a half-baked progress packet and as a result, a logical error / server crash. The only fix which is not another bandaid is to allow the full range in var-int coding. Clients will have to allow the full range too, a note will be added to the changelog. (the alternative was to create another protocol version but as var-int is used all over the place this was considered infeasible) Review note: this is the relevant commit. --- src/IO/VarInt.cpp | 9 --- src/IO/VarInt.h | 65 ++++++++----------- src/Server/TCPHandler.cpp | 7 +- .../0_stateless/02812_large_varints.reference | 0 .../0_stateless/02812_large_varints.sql | 4 ++ 5 files changed, 34 insertions(+), 51 deletions(-) create mode 100644 tests/queries/0_stateless/02812_large_varints.reference create mode 100644 tests/queries/0_stateless/02812_large_varints.sql diff --git a/src/IO/VarInt.cpp b/src/IO/VarInt.cpp index ca4b95fcb60f..a4b249b01d76 100644 --- a/src/IO/VarInt.cpp +++ b/src/IO/VarInt.cpp @@ -6,7 +6,6 @@ namespace DB namespace ErrorCodes { extern const int ATTEMPT_TO_READ_AFTER_EOF; - extern const int BAD_ARGUMENTS; } void throwReadAfterEOF() @@ -14,12 +13,4 @@ void throwReadAfterEOF() throw Exception(ErrorCodes::ATTEMPT_TO_READ_AFTER_EOF, "Attempt to read after eof"); } -void throwValueTooLargeForVarIntEncoding(UInt64 x) -{ - /// Under practical circumstances, we should virtually never end up here but AST Fuzzer manages to create superlarge input integers - /// which trigger this exception. Intentionally not throwing LOGICAL_ERROR or calling abort() or [ch]assert(false), so AST Fuzzer - /// can swallow the exception and continue to run. - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Value {} is too large for VarInt encoding", x); -} - } diff --git a/src/IO/VarInt.h b/src/IO/VarInt.h index 9099b5e7f6ab..2a2743e34073 100644 --- a/src/IO/VarInt.h +++ b/src/IO/VarInt.h @@ -13,73 +13,59 @@ namespace DB /// Variable-Length Quantity (VLQ) Base-128 compression, also known as Variable Byte (VB) or Varint encoding. [[noreturn]] void throwReadAfterEOF(); -[[noreturn]] void throwValueTooLargeForVarIntEncoding(UInt64 x); -/// NOTE: Due to historical reasons, only values up to 1<<63-1 can be safely encoded/decoded (bigger values are not idempotent under -/// encoding/decoding). This cannot be changed without breaking backward compatibility (some drivers, e.g. clickhouse-rs (Rust), have the -/// same limitation, others support the full 1<<64 range, e.g. clickhouse-driver (Python)) -constexpr UInt64 VAR_UINT_MAX = (1ULL<<63) - 1; - inline void writeVarUInt(UInt64 x, WriteBuffer & ostr) { - if (x > VAR_UINT_MAX) [[unlikely]] - throwValueTooLargeForVarIntEncoding(x); - - for (size_t i = 0; i < 9; ++i) + while (x > 0x7F) { - uint8_t byte = x & 0x7F; - if (x > 0x7F) - byte |= 0x80; + uint8_t byte = 0x80 | (x & 0x7F); ostr.nextIfAtEnd(); *ostr.position() = byte; ++ostr.position(); x >>= 7; - if (!x) - return; } + + uint8_t final_byte = static_cast(x); + + ostr.nextIfAtEnd(); + *ostr.position() = final_byte; + ++ostr.position(); } inline void writeVarUInt(UInt64 x, std::ostream & ostr) { - if (x > VAR_UINT_MAX) [[unlikely]] - throwValueTooLargeForVarIntEncoding(x); - - for (size_t i = 0; i < 9; ++i) + while (x > 0x7F) { - uint8_t byte = x & 0x7F; - if (x > 0x7F) - byte |= 0x80; - + uint8_t byte = 0x80 | (x & 0x7F); ostr.put(byte); x >>= 7; - if (!x) - return; } + + uint8_t final_byte = static_cast(x); + ostr.put(final_byte); } inline char * writeVarUInt(UInt64 x, char * ostr) { - if (x > VAR_UINT_MAX) [[unlikely]] - throwValueTooLargeForVarIntEncoding(x); - - for (size_t i = 0; i < 9; ++i) + while (x > 0x7F) { - uint8_t byte = x & 0x7F; - if (x > 0x7F) - byte |= 0x80; + uint8_t byte = 0x80 | (x & 0x7F); *ostr = byte; ++ostr; x >>= 7; - if (!x) - return ostr; } + uint8_t final_byte = static_cast(x); + + *ostr = final_byte; + ++ostr; + return ostr; } @@ -101,7 +87,7 @@ template inline void readVarUInt(UInt64 & x, ReadBuffer & istr) { x = 0; - for (size_t i = 0; i < 9; ++i) + for (size_t i = 0; i < 10; ++i) { if constexpr (check_eof) if (istr.eof()) [[unlikely]] @@ -120,7 +106,7 @@ inline void readVarUInt(UInt64 & x, ReadBuffer & istr) inline void readVarUInt(UInt64 & x, ReadBuffer & istr) { - if (istr.buffer().end() - istr.position() >= 9) + if (istr.buffer().end() - istr.position() >= 10) return impl::readVarUInt(x, istr); return impl::readVarUInt(x, istr); } @@ -128,7 +114,7 @@ inline void readVarUInt(UInt64 & x, ReadBuffer & istr) inline void readVarUInt(UInt64 & x, std::istream & istr) { x = 0; - for (size_t i = 0; i < 9; ++i) + for (size_t i = 0; i < 10; ++i) { UInt64 byte = istr.get(); x |= (byte & 0x7F) << (7 * i); @@ -143,7 +129,7 @@ inline const char * readVarUInt(UInt64 & x, const char * istr, size_t size) const char * end = istr + size; x = 0; - for (size_t i = 0; i < 9; ++i) + for (size_t i = 0; i < 10; ++i) { if (istr == end) [[unlikely]] throwReadAfterEOF(); @@ -220,7 +206,8 @@ inline size_t getLengthOfVarUInt(UInt64 x) : (x < (1ULL << 42) ? 6 : (x < (1ULL << 49) ? 7 : (x < (1ULL << 56) ? 8 - : 9))))))); + : (x < (1ULL << 63) ? 9 + : 10)))))))); } diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index 4d9fb47c8931..36566832ebc9 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -1905,17 +1905,18 @@ void TCPHandler::sendData(const Block & block) { initBlockOutput(block); - auto prev_bytes_written_out = out->count(); - auto prev_bytes_written_compressed_out = state.maybe_compressed_out->count(); + size_t prev_bytes_written_out = out->count(); + size_t prev_bytes_written_compressed_out = state.maybe_compressed_out->count(); try { /// For testing hedged requests if (unknown_packet_in_send_data) { + constexpr UInt64 marker = (1ULL<<63) - 1; --unknown_packet_in_send_data; if (unknown_packet_in_send_data == 0) - writeVarUInt(VAR_UINT_MAX, *out); + writeVarUInt(marker, *out); } writeVarUInt(Protocol::Server::Data, *out); diff --git a/tests/queries/0_stateless/02812_large_varints.reference b/tests/queries/0_stateless/02812_large_varints.reference new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/queries/0_stateless/02812_large_varints.sql b/tests/queries/0_stateless/02812_large_varints.sql new file mode 100644 index 000000000000..cfbebb7292e4 --- /dev/null +++ b/tests/queries/0_stateless/02812_large_varints.sql @@ -0,0 +1,4 @@ +-- 64-bit integers with MSB set (i.e. values > (1ULL<<63) - 1) could for historical/compat reasons not be serialized as var-ints (issue #51486). +-- These two queries internally produce such big values, run them to be sure no bad things happen. +SELECT topKWeightedState(65535)(now(), -2) FORMAT Null; +SELECT number FROM numbers(toUInt64(-1)) limit 10 Format Null; From fc19e74ba9084e66a7ff43565ef80a78dda65570 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Fri, 7 Jul 2023 01:12:17 +0200 Subject: [PATCH 128/179] fix deadlock on DatabaseCatalog shutdown --- src/Interpreters/DatabaseCatalog.cpp | 11 ++++++++++- src/Interpreters/DatabaseCatalog.h | 2 ++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/src/Interpreters/DatabaseCatalog.cpp b/src/Interpreters/DatabaseCatalog.cpp index 4cb2f6e3b3d8..dc1861b3bd87 100644 --- a/src/Interpreters/DatabaseCatalog.cpp +++ b/src/Interpreters/DatabaseCatalog.cpp @@ -56,6 +56,7 @@ namespace ErrorCodes extern const int DATABASE_ACCESS_DENIED; extern const int LOGICAL_ERROR; extern const int HAVE_DEPENDENT_OBJECTS; + extern const int UNFINISHED; } TemporaryTableHolder::TemporaryTableHolder(ContextPtr context_, const TemporaryTableHolder::Creator & creator, const ASTPtr & query) @@ -196,6 +197,9 @@ void DatabaseCatalog::startupBackgroundCleanup() void DatabaseCatalog::shutdownImpl() { + is_shutting_down = true; + wait_table_finally_dropped.notify_all(); + if (cleanup_task) (*cleanup_task)->deactivate(); @@ -1160,8 +1164,13 @@ void DatabaseCatalog::waitTableFinallyDropped(const UUID & uuid) std::unique_lock lock{tables_marked_dropped_mutex}; wait_table_finally_dropped.wait(lock, [&]() TSA_REQUIRES(tables_marked_dropped_mutex) -> bool { - return !tables_marked_dropped_ids.contains(uuid); + return !tables_marked_dropped_ids.contains(uuid) || is_shutting_down; }); + + /// TSA doesn't support unique_lock + if (TSA_SUPPRESS_WARNING_FOR_READ(tables_marked_dropped_ids).contains(uuid)) + throw Exception(ErrorCodes::UNFINISHED, "Did not finish dropping the table with UUID {} because the server is shutting down, " + "will finish after restart", uuid); } void DatabaseCatalog::addDependencies( diff --git a/src/Interpreters/DatabaseCatalog.h b/src/Interpreters/DatabaseCatalog.h index 258ea2dee7ce..d502505027fa 100644 --- a/src/Interpreters/DatabaseCatalog.h +++ b/src/Interpreters/DatabaseCatalog.h @@ -308,6 +308,8 @@ class DatabaseCatalog : boost::noncopyable, WithMutableContext Poco::Logger * log; + std::atomic_bool is_shutting_down = false; + /// Do not allow simultaneous execution of DDL requests on the same table. /// database name -> database guard -> (table name mutex, counter), /// counter: how many threads are running a query on the table at the same time From d30be39224f94618393c9502961632422b6676f5 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 7 Jul 2023 03:28:33 +0200 Subject: [PATCH 129/179] Fix flaky test 00175_partition_by_ignore and move it to correct location --- .../00175_partition_by_ignore.reference | 0 .../{1_stateful => 0_stateless}/00175_partition_by_ignore.sql | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) rename tests/queries/{1_stateful => 0_stateless}/00175_partition_by_ignore.reference (100%) rename tests/queries/{1_stateful => 0_stateless}/00175_partition_by_ignore.sql (90%) diff --git a/tests/queries/1_stateful/00175_partition_by_ignore.reference b/tests/queries/0_stateless/00175_partition_by_ignore.reference similarity index 100% rename from tests/queries/1_stateful/00175_partition_by_ignore.reference rename to tests/queries/0_stateless/00175_partition_by_ignore.reference diff --git a/tests/queries/1_stateful/00175_partition_by_ignore.sql b/tests/queries/0_stateless/00175_partition_by_ignore.sql similarity index 90% rename from tests/queries/1_stateful/00175_partition_by_ignore.sql rename to tests/queries/0_stateless/00175_partition_by_ignore.sql index 737d1b59fe3e..19d63c82a87c 100644 --- a/tests/queries/1_stateful/00175_partition_by_ignore.sql +++ b/tests/queries/0_stateless/00175_partition_by_ignore.sql @@ -2,7 +2,7 @@ SELECT '-- check that partition key with ignore works correctly'; DROP TABLE IF EXISTS partition_by_ignore SYNC; -CREATE TABLE partition_by_ignore (ts DateTime, ts_2 DateTime) ENGINE=MergeTree PARTITION BY (toYYYYMM(ts), ignore(ts_2)) ORDER BY tuple(); +CREATE TABLE partition_by_ignore (ts DateTime, ts_2 DateTime) ENGINE=MergeTree PARTITION BY (toYYYYMM(ts), ignore(ts_2)) ORDER BY tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; INSERT INTO partition_by_ignore SELECT toDateTime('2022-08-03 00:00:00') + toIntervalDay(number), toDateTime('2022-08-04 00:00:00') + toIntervalDay(number) FROM numbers(60); EXPLAIN ESTIMATE SELECT count() FROM partition_by_ignore WHERE ts BETWEEN toDateTime('2022-08-07 00:00:00') AND toDateTime('2022-08-10 00:00:00') FORMAT CSV; From f8ac899c3fefb1268a5197dc4d85c2ee1eb174ca Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 7 Jul 2023 03:49:50 +0200 Subject: [PATCH 130/179] Fix flaky test 02360_send_logs_level_colors: avoid usage of `file` tool --- tests/queries/0_stateless/02360_send_logs_level_colors.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/02360_send_logs_level_colors.sh b/tests/queries/0_stateless/02360_send_logs_level_colors.sh index 0585e779815b..a9b7d4dd3c1c 100755 --- a/tests/queries/0_stateless/02360_send_logs_level_colors.sh +++ b/tests/queries/0_stateless/02360_send_logs_level_colors.sh @@ -1,11 +1,12 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=trace + # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh file_name="${CLICKHOUSE_TMP}/res_${CLICKHOUSE_DATABASE}.log" -CLICKHOUSE_CLIENT=$(echo ${CLICKHOUSE_CLIENT} | sed 's/'"--send_logs_level=${CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL}"'/--send_logs_level=trace/g') # Run query via expect to make isatty() return true function run() @@ -20,8 +21,7 @@ spawn bash -c "$command" expect 1 EOF - file "$file_name" | grep -o "ASCII text" - file "$file_name" | grep -o "with escape sequences" + rg -F $'\x1b' "$file_name" && cat "$file_name" || echo "ASCII text" } run "$CLICKHOUSE_CLIENT -q 'SELECT 1' 2>$file_name" From f0cc90a7fb0dcf75725e0f4e437828cbb4465143 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 7 Jul 2023 04:07:03 +0200 Subject: [PATCH 131/179] Revert "Merge pull request #51822 from kssenii/minor-changes" This reverts commit 5ac85f4fa888b4cca9d433b98505d52777281c6e, reversing changes made to 376c903da9502fb2efce180178d96c14a664f298. --- src/Interpreters/FilesystemCacheLog.h | 11 ++++++++++- src/Interpreters/InterpreterSystemQuery.cpp | 8 ++++---- src/Parsers/ASTSystemQuery.cpp | 8 ++++---- src/Parsers/ASTSystemQuery.h | 4 ++-- src/Parsers/ParserSystemQuery.cpp | 4 ++-- 5 files changed, 22 insertions(+), 13 deletions(-) diff --git a/src/Interpreters/FilesystemCacheLog.h b/src/Interpreters/FilesystemCacheLog.h index 0d088a922e05..d6dd00e5463c 100644 --- a/src/Interpreters/FilesystemCacheLog.h +++ b/src/Interpreters/FilesystemCacheLog.h @@ -11,7 +11,16 @@ namespace DB { - +/// +/// -------- Column --------- Type ------ +/// | event_date | DateTime | +/// | event_time | UInt64 | +/// | query_id | String | +/// | remote_file_path | String | +/// | segment_range | Tuple | +/// | read_type | String | +/// ------------------------------------- +/// struct FilesystemCacheLogElement { enum class CacheType diff --git a/src/Interpreters/InterpreterSystemQuery.cpp b/src/Interpreters/InterpreterSystemQuery.cpp index 1c2eb66923eb..e1ff8676bc77 100644 --- a/src/Interpreters/InterpreterSystemQuery.cpp +++ b/src/Interpreters/InterpreterSystemQuery.cpp @@ -370,15 +370,15 @@ BlockIO InterpreterSystemQuery::execute() else { auto cache = FileCacheFactory::instance().getByName(query.filesystem_cache_name).cache; - if (query.key_to_drop.empty()) + if (query.delete_key.empty()) { cache->removeAllReleasable(); } else { - auto key = FileCacheKey::fromKeyString(query.key_to_drop); - if (query.offset_to_drop.has_value()) - cache->removeFileSegment(key, query.offset_to_drop.value()); + auto key = FileCacheKey::fromKeyString(query.delete_key); + if (query.delete_offset.has_value()) + cache->removeFileSegment(key, query.delete_offset.value()); else cache->removeKey(key); } diff --git a/src/Parsers/ASTSystemQuery.cpp b/src/Parsers/ASTSystemQuery.cpp index 22244a7075c1..9c5e7bff61eb 100644 --- a/src/Parsers/ASTSystemQuery.cpp +++ b/src/Parsers/ASTSystemQuery.cpp @@ -212,11 +212,11 @@ void ASTSystemQuery::formatImpl(const FormatSettings & settings, FormatState &, if (!filesystem_cache_name.empty()) { settings.ostr << (settings.hilite ? hilite_none : "") << " " << filesystem_cache_name; - if (!key_to_drop.empty()) + if (!delete_key.empty()) { - settings.ostr << (settings.hilite ? hilite_none : "") << " KEY " << key_to_drop; - if (offset_to_drop.has_value()) - settings.ostr << (settings.hilite ? hilite_none : "") << " OFFSET " << offset_to_drop.value(); + settings.ostr << (settings.hilite ? hilite_none : "") << " KEY " << delete_key; + if (delete_offset.has_value()) + settings.ostr << (settings.hilite ? hilite_none : "") << " OFFSET " << delete_offset.value(); } } } diff --git a/src/Parsers/ASTSystemQuery.h b/src/Parsers/ASTSystemQuery.h index 6c81162f103c..ebc3e9cd4308 100644 --- a/src/Parsers/ASTSystemQuery.h +++ b/src/Parsers/ASTSystemQuery.h @@ -107,8 +107,8 @@ class ASTSystemQuery : public IAST, public ASTQueryWithOnCluster UInt64 seconds{}; String filesystem_cache_name; - std::string key_to_drop; - std::optional offset_to_drop; + std::string delete_key; + std::optional delete_offset; String backup_name; diff --git a/src/Parsers/ParserSystemQuery.cpp b/src/Parsers/ParserSystemQuery.cpp index 09c86876b483..ef71e994d564 100644 --- a/src/Parsers/ParserSystemQuery.cpp +++ b/src/Parsers/ParserSystemQuery.cpp @@ -409,9 +409,9 @@ bool ParserSystemQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, Expected & res->filesystem_cache_name = ast->as()->value.safeGet(); if (ParserKeyword{"KEY"}.ignore(pos, expected) && ParserIdentifier().parse(pos, ast, expected)) { - res->key_to_drop = ast->as()->name(); + res->delete_key = ast->as()->name(); if (ParserKeyword{"OFFSET"}.ignore(pos, expected) && ParserLiteral().parse(pos, ast, expected)) - res->offset_to_drop = ast->as()->value.safeGet(); + res->delete_offset = ast->as()->value.safeGet(); } } if (!parseQueryWithOnCluster(res, pos, expected)) From 7cece62d26d01621f2cd9e8cc8b6b7a68d808dd9 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 7 Jul 2023 04:07:19 +0200 Subject: [PATCH 132/179] Revert "Merge pull request #51547 from kssenii/more-flexible-drop-cache" This reverts commit 2ce7bcaa3d5fb36a11ae0211eabd5a89c2a8c5de, reversing changes made to e897207cd5402307295fb3dcf5c8650d5e0a4668. --- .../IO/CachedOnDiskReadBufferFromFile.cpp | 13 ++-- src/Disks/IO/CachedOnDiskReadBufferFromFile.h | 2 +- src/Disks/IO/ReadBufferFromRemoteFSGather.cpp | 2 - src/Interpreters/Cache/FileCache.cpp | 34 +++++----- src/Interpreters/Cache/FileCache.h | 12 +--- src/Interpreters/Cache/FileCacheKey.cpp | 5 -- src/Interpreters/Cache/FileCacheKey.h | 2 - src/Interpreters/Cache/Metadata.cpp | 26 +------ src/Interpreters/Cache/Metadata.h | 8 +-- src/Interpreters/FilesystemCacheLog.cpp | 4 -- src/Interpreters/FilesystemCacheLog.h | 2 - src/Interpreters/InterpreterSystemQuery.cpp | 13 +--- src/Parsers/ASTSystemQuery.cpp | 8 --- src/Parsers/ASTSystemQuery.h | 2 - src/Parsers/ParserSystemQuery.cpp | 8 --- ...2808_filesystem_cache_drop_query.reference | 4 -- .../02808_filesystem_cache_drop_query.sh | 67 ------------------- 17 files changed, 30 insertions(+), 182 deletions(-) delete mode 100644 tests/queries/0_stateless/02808_filesystem_cache_drop_query.reference delete mode 100755 tests/queries/0_stateless/02808_filesystem_cache_drop_query.sh diff --git a/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp b/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp index 15b6a9211dec..81aa29639ac5 100644 --- a/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp +++ b/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp @@ -74,22 +74,19 @@ CachedOnDiskReadBufferFromFile::CachedOnDiskReadBufferFromFile( } void CachedOnDiskReadBufferFromFile::appendFilesystemCacheLog( - const FileSegment & file_segment, CachedOnDiskReadBufferFromFile::ReadType type) + const FileSegment::Range & file_segment_range, CachedOnDiskReadBufferFromFile::ReadType type) { if (!cache_log) return; - const auto range = file_segment.range(); FilesystemCacheLogElement elem { .event_time = std::chrono::system_clock::to_time_t(std::chrono::system_clock::now()), .query_id = query_id, .source_file_path = source_file_path, - .file_segment_range = { range.left, range.right }, + .file_segment_range = { file_segment_range.left, file_segment_range.right }, .requested_range = { first_offset, read_until_position }, - .file_segment_key = file_segment.key().toString(), - .file_segment_offset = file_segment.offset(), - .file_segment_size = range.size(), + .file_segment_size = file_segment_range.size(), .read_from_cache_attempted = true, .read_buffer_id = current_buffer_id, .profile_counters = std::make_shared( @@ -498,7 +495,7 @@ bool CachedOnDiskReadBufferFromFile::completeFileSegmentAndGetNext() auto completed_range = current_file_segment->range(); if (cache_log) - appendFilesystemCacheLog(*current_file_segment, read_type); + appendFilesystemCacheLog(completed_range, read_type); chassert(file_offset_of_buffer_end > completed_range.right); @@ -521,7 +518,7 @@ CachedOnDiskReadBufferFromFile::~CachedOnDiskReadBufferFromFile() { if (cache_log && file_segments && !file_segments->empty()) { - appendFilesystemCacheLog(file_segments->front(), read_type); + appendFilesystemCacheLog(file_segments->front().range(), read_type); } } diff --git a/src/Disks/IO/CachedOnDiskReadBufferFromFile.h b/src/Disks/IO/CachedOnDiskReadBufferFromFile.h index 36cf8a541837..b4e7701de75c 100644 --- a/src/Disks/IO/CachedOnDiskReadBufferFromFile.h +++ b/src/Disks/IO/CachedOnDiskReadBufferFromFile.h @@ -90,7 +90,7 @@ class CachedOnDiskReadBufferFromFile : public ReadBufferFromFileBase bool completeFileSegmentAndGetNext(); - void appendFilesystemCacheLog(const FileSegment & file_segment, ReadType read_type); + void appendFilesystemCacheLog(const FileSegment::Range & file_segment_range, ReadType read_type); bool writeCache(char * data, size_t size, size_t offset, FileSegment & file_segment); diff --git a/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp b/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp index 2cd90731f1d8..16c1def7b118 100644 --- a/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp +++ b/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp @@ -90,8 +90,6 @@ void ReadBufferFromRemoteFSGather::appendUncachedReadInfo() .source_file_path = current_object.remote_path, .file_segment_range = { 0, current_object.bytes_size }, .cache_type = FilesystemCacheLogElement::CacheType::READ_FROM_FS_BYPASSING_CACHE, - .file_segment_key = {}, - .file_segment_offset = {}, .file_segment_size = current_object.bytes_size, .read_from_cache_attempted = false, }; diff --git a/src/Interpreters/Cache/FileCache.cpp b/src/Interpreters/Cache/FileCache.cpp index de8ae33433a6..91d1c63e8326 100644 --- a/src/Interpreters/Cache/FileCache.cpp +++ b/src/Interpreters/Cache/FileCache.cpp @@ -806,13 +806,6 @@ bool FileCache::tryReserve(FileSegment & file_segment, const size_t size) return true; } -void FileCache::removeKey(const Key & key) -{ - assertInitialized(); - auto locked_key = metadata.lockKeyMetadata(key, CacheMetadata::KeyNotFoundPolicy::THROW); - locked_key->removeAll(); -} - void FileCache::removeKeyIfExists(const Key & key) { assertInitialized(); @@ -825,14 +818,7 @@ void FileCache::removeKeyIfExists(const Key & key) /// But if we have multiple replicated zero-copy tables on the same server /// it became possible to start removing something from cache when it is used /// by other "zero-copy" tables. That is why it's not an error. - locked_key->removeAll(/* if_releasable */true); -} - -void FileCache::removeFileSegment(const Key & key, size_t offset) -{ - assertInitialized(); - auto locked_key = metadata.lockKeyMetadata(key, CacheMetadata::KeyNotFoundPolicy::THROW); - locked_key->removeFileSegment(offset); + locked_key->removeAllReleasable(); } void FileCache::removePathIfExists(const String & path) @@ -844,12 +830,22 @@ void FileCache::removeAllReleasable() { assertInitialized(); - metadata.iterate([](LockedKey & locked_key) { locked_key.removeAll(/* if_releasable */true); }); + auto lock = lockCache(); + + main_priority->iterate([&](LockedKey & locked_key, const FileSegmentMetadataPtr & segment_metadata) + { + if (segment_metadata->releasable()) + { + auto file_segment = segment_metadata->file_segment; + locked_key.removeFileSegment(file_segment->offset(), file_segment->lock()); + return PriorityIterationResult::REMOVE_AND_CONTINUE; + } + return PriorityIterationResult::CONTINUE; + }, lock); if (stash) { /// Remove all access information. - auto lock = lockCache(); stash->records.clear(); stash->queue->removeAll(lock); } @@ -919,7 +915,7 @@ void FileCache::loadMetadata() continue; } - const auto key = Key::fromKeyString(key_directory.filename().string()); + const auto key = Key(unhexUInt(key_directory.filename().string().data())); auto locked_key = metadata.lockKeyMetadata(key, CacheMetadata::KeyNotFoundPolicy::CREATE_EMPTY, /* is_initial_load */true); for (fs::directory_iterator offset_it{key_directory}; offset_it != fs::directory_iterator(); ++offset_it) @@ -1074,7 +1070,7 @@ FileSegmentsHolderPtr FileCache::getSnapshot() FileSegmentsHolderPtr FileCache::getSnapshot(const Key & key) { FileSegments file_segments; - auto locked_key = metadata.lockKeyMetadata(key, CacheMetadata::KeyNotFoundPolicy::THROW_LOGICAL); + auto locked_key = metadata.lockKeyMetadata(key, CacheMetadata::KeyNotFoundPolicy::THROW); for (const auto & [_, file_segment_metadata] : *locked_key->getKeyMetadata()) file_segments.push_back(FileSegment::getSnapshot(file_segment_metadata->file_segment)); return std::make_unique(std::move(file_segments)); diff --git a/src/Interpreters/Cache/FileCache.h b/src/Interpreters/Cache/FileCache.h index 2e6a5094758e..0e3b17baa2f4 100644 --- a/src/Interpreters/Cache/FileCache.h +++ b/src/Interpreters/Cache/FileCache.h @@ -83,19 +83,13 @@ class FileCache : private boost::noncopyable FileSegmentsHolderPtr set(const Key & key, size_t offset, size_t size, const CreateFileSegmentSettings & settings); - /// Remove file segment by `key` and `offset`. Throws if file segment does not exist. - void removeFileSegment(const Key & key, size_t offset); - - /// Remove files by `key`. Throws if key does not exist. - void removeKey(const Key & key); - - /// Remove files by `key`. + /// Remove files by `key`. Removes files which might be used at the moment. void removeKeyIfExists(const Key & key); - /// Removes files by `path`. + /// Removes files by `path`. Removes files which might be used at the moment. void removePathIfExists(const String & path); - /// Remove files by `key`. + /// Remove files by `key`. Will not remove files which are used at the moment. void removeAllReleasable(); std::vector tryGetCachePaths(const Key & key); diff --git a/src/Interpreters/Cache/FileCacheKey.cpp b/src/Interpreters/Cache/FileCacheKey.cpp index 772fcd600bf7..f97cdc058aa1 100644 --- a/src/Interpreters/Cache/FileCacheKey.cpp +++ b/src/Interpreters/Cache/FileCacheKey.cpp @@ -28,9 +28,4 @@ FileCacheKey FileCacheKey::random() return FileCacheKey(UUIDHelpers::generateV4().toUnderType()); } -FileCacheKey FileCacheKey::fromKeyString(const std::string & key_str) -{ - return FileCacheKey(unhexUInt(key_str.data())); -} - } diff --git a/src/Interpreters/Cache/FileCacheKey.h b/src/Interpreters/Cache/FileCacheKey.h index e788cd5e7cdc..bab8359732cd 100644 --- a/src/Interpreters/Cache/FileCacheKey.h +++ b/src/Interpreters/Cache/FileCacheKey.h @@ -21,8 +21,6 @@ struct FileCacheKey static FileCacheKey random(); bool operator==(const FileCacheKey & other) const { return key == other.key; } - - static FileCacheKey fromKeyString(const std::string & key_str); }; using FileCacheKeyAndOffset = std::pair; diff --git a/src/Interpreters/Cache/Metadata.cpp b/src/Interpreters/Cache/Metadata.cpp index 0a2d58432e42..bfaa00eac2cb 100644 --- a/src/Interpreters/Cache/Metadata.cpp +++ b/src/Interpreters/Cache/Metadata.cpp @@ -25,7 +25,6 @@ namespace DB namespace ErrorCodes { extern const int LOGICAL_ERROR; - extern const int BAD_ARGUMENTS; } FileSegmentMetadata::FileSegmentMetadata(FileSegmentPtr && file_segment_) @@ -192,8 +191,6 @@ LockedKeyPtr CacheMetadata::lockKeyMetadata( if (it == end()) { if (key_not_found_policy == KeyNotFoundPolicy::THROW) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "No such key `{}` in cache", key); - else if (key_not_found_policy == KeyNotFoundPolicy::THROW_LOGICAL) throw Exception(ErrorCodes::LOGICAL_ERROR, "No such key `{}` in cache", key); else if (key_not_found_policy == KeyNotFoundPolicy::RETURN_NULL) return nullptr; @@ -218,8 +215,6 @@ LockedKeyPtr CacheMetadata::lockKeyMetadata( return locked_metadata; if (key_not_found_policy == KeyNotFoundPolicy::THROW) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "No such key `{}` in cache", key); - else if (key_not_found_policy == KeyNotFoundPolicy::THROW_LOGICAL) throw Exception(ErrorCodes::LOGICAL_ERROR, "No such key `{}` in cache", key); if (key_not_found_policy == KeyNotFoundPolicy::RETURN_NULL) @@ -563,11 +558,11 @@ bool LockedKey::isLastOwnerOfFileSegment(size_t offset) const return file_segment_metadata->file_segment.use_count() == 2; } -void LockedKey::removeAll(bool if_releasable) +void LockedKey::removeAllReleasable() { for (auto it = key_metadata->begin(); it != key_metadata->end();) { - if (if_releasable && !it->second->releasable()) + if (!it->second->releasable()) { ++it; continue; @@ -588,32 +583,17 @@ void LockedKey::removeAll(bool if_releasable) } } -KeyMetadata::iterator LockedKey::removeFileSegment(size_t offset) -{ - auto it = key_metadata->find(offset); - if (it == key_metadata->end()) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "There is no offset {}", offset); - - auto file_segment = it->second->file_segment; - return removeFileSegmentImpl(it, file_segment->lock()); -} - KeyMetadata::iterator LockedKey::removeFileSegment(size_t offset, const FileSegmentGuard::Lock & segment_lock) { auto it = key_metadata->find(offset); if (it == key_metadata->end()) throw Exception(ErrorCodes::LOGICAL_ERROR, "There is no offset {}", offset); - return removeFileSegmentImpl(it, segment_lock); -} - -KeyMetadata::iterator LockedKey::removeFileSegmentImpl(KeyMetadata::iterator it, const FileSegmentGuard::Lock & segment_lock) -{ auto file_segment = it->second->file_segment; LOG_DEBUG( key_metadata->log, "Remove from cache. Key: {}, offset: {}, size: {}", - getKey(), file_segment->offset(), file_segment->reserved_size); + getKey(), offset, file_segment->reserved_size); chassert(file_segment->assertCorrectnessUnlocked(segment_lock)); diff --git a/src/Interpreters/Cache/Metadata.h b/src/Interpreters/Cache/Metadata.h index 42d74338e127..503c19f4150d 100644 --- a/src/Interpreters/Cache/Metadata.h +++ b/src/Interpreters/Cache/Metadata.h @@ -87,7 +87,7 @@ struct CacheMetadata : public std::unordered_map, { public: using Key = FileCacheKey; - using IterateCacheMetadataFunc = std::function; + using IterateCacheMetadataFunc = std::function; explicit CacheMetadata(const std::string & path_); @@ -106,7 +106,6 @@ struct CacheMetadata : public std::unordered_map, enum class KeyNotFoundPolicy { THROW, - THROW_LOGICAL, CREATE_EMPTY, RETURN_NULL, }; @@ -170,10 +169,9 @@ struct LockedKey : private boost::noncopyable std::shared_ptr getKeyMetadata() const { return key_metadata; } std::shared_ptr getKeyMetadata() { return key_metadata; } - void removeAll(bool if_releasable = true); + void removeAllReleasable(); KeyMetadata::iterator removeFileSegment(size_t offset, const FileSegmentGuard::Lock &); - KeyMetadata::iterator removeFileSegment(size_t offset); void shrinkFileSegmentToDownloadedSize(size_t offset, const FileSegmentGuard::Lock &); @@ -190,8 +188,6 @@ struct LockedKey : private boost::noncopyable std::string toString() const; private: - KeyMetadata::iterator removeFileSegmentImpl(KeyMetadata::iterator it, const FileSegmentGuard::Lock &); - const std::shared_ptr key_metadata; KeyGuard::Lock lock; /// `lock` must be destructed before `key_metadata`. }; diff --git a/src/Interpreters/FilesystemCacheLog.cpp b/src/Interpreters/FilesystemCacheLog.cpp index b660db064d12..17f0fda71ec9 100644 --- a/src/Interpreters/FilesystemCacheLog.cpp +++ b/src/Interpreters/FilesystemCacheLog.cpp @@ -40,8 +40,6 @@ NamesAndTypesList FilesystemCacheLogElement::getNamesAndTypes() {"source_file_path", std::make_shared()}, {"file_segment_range", std::make_shared(types)}, {"total_requested_range", std::make_shared(types)}, - {"key", std::make_shared()}, - {"offset", std::make_shared()}, {"size", std::make_shared()}, {"read_type", std::make_shared()}, {"read_from_cache_attempted", std::make_shared()}, @@ -62,8 +60,6 @@ void FilesystemCacheLogElement::appendToBlock(MutableColumns & columns) const columns[i++]->insert(source_file_path); columns[i++]->insert(Tuple{file_segment_range.first, file_segment_range.second}); columns[i++]->insert(Tuple{requested_range.first, requested_range.second}); - columns[i++]->insert(file_segment_key); - columns[i++]->insert(file_segment_offset); columns[i++]->insert(file_segment_size); columns[i++]->insert(typeToString(cache_type)); columns[i++]->insert(read_from_cache_attempted); diff --git a/src/Interpreters/FilesystemCacheLog.h b/src/Interpreters/FilesystemCacheLog.h index d6dd00e5463c..1b22d561c511 100644 --- a/src/Interpreters/FilesystemCacheLog.h +++ b/src/Interpreters/FilesystemCacheLog.h @@ -39,8 +39,6 @@ struct FilesystemCacheLogElement std::pair file_segment_range{}; std::pair requested_range{}; CacheType cache_type{}; - std::string file_segment_key; - size_t file_segment_offset; size_t file_segment_size; bool read_from_cache_attempted; String read_buffer_id; diff --git a/src/Interpreters/InterpreterSystemQuery.cpp b/src/Interpreters/InterpreterSystemQuery.cpp index e1ff8676bc77..f2d011b12d10 100644 --- a/src/Interpreters/InterpreterSystemQuery.cpp +++ b/src/Interpreters/InterpreterSystemQuery.cpp @@ -370,18 +370,7 @@ BlockIO InterpreterSystemQuery::execute() else { auto cache = FileCacheFactory::instance().getByName(query.filesystem_cache_name).cache; - if (query.delete_key.empty()) - { - cache->removeAllReleasable(); - } - else - { - auto key = FileCacheKey::fromKeyString(query.delete_key); - if (query.delete_offset.has_value()) - cache->removeFileSegment(key, query.delete_offset.value()); - else - cache->removeKey(key); - } + cache->removeAllReleasable(); } break; } diff --git a/src/Parsers/ASTSystemQuery.cpp b/src/Parsers/ASTSystemQuery.cpp index 9c5e7bff61eb..a91449ff0357 100644 --- a/src/Parsers/ASTSystemQuery.cpp +++ b/src/Parsers/ASTSystemQuery.cpp @@ -210,15 +210,7 @@ void ASTSystemQuery::formatImpl(const FormatSettings & settings, FormatState &, else if (type == Type::DROP_FILESYSTEM_CACHE) { if (!filesystem_cache_name.empty()) - { settings.ostr << (settings.hilite ? hilite_none : "") << " " << filesystem_cache_name; - if (!delete_key.empty()) - { - settings.ostr << (settings.hilite ? hilite_none : "") << " KEY " << delete_key; - if (delete_offset.has_value()) - settings.ostr << (settings.hilite ? hilite_none : "") << " OFFSET " << delete_offset.value(); - } - } } else if (type == Type::UNFREEZE) { diff --git a/src/Parsers/ASTSystemQuery.h b/src/Parsers/ASTSystemQuery.h index ebc3e9cd4308..ca4802d9a9b4 100644 --- a/src/Parsers/ASTSystemQuery.h +++ b/src/Parsers/ASTSystemQuery.h @@ -107,8 +107,6 @@ class ASTSystemQuery : public IAST, public ASTQueryWithOnCluster UInt64 seconds{}; String filesystem_cache_name; - std::string delete_key; - std::optional delete_offset; String backup_name; diff --git a/src/Parsers/ParserSystemQuery.cpp b/src/Parsers/ParserSystemQuery.cpp index ef71e994d564..48dbe60e2414 100644 --- a/src/Parsers/ParserSystemQuery.cpp +++ b/src/Parsers/ParserSystemQuery.cpp @@ -405,15 +405,7 @@ bool ParserSystemQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, Expected & ParserLiteral path_parser; ASTPtr ast; if (path_parser.parse(pos, ast, expected)) - { res->filesystem_cache_name = ast->as()->value.safeGet(); - if (ParserKeyword{"KEY"}.ignore(pos, expected) && ParserIdentifier().parse(pos, ast, expected)) - { - res->delete_key = ast->as()->name(); - if (ParserKeyword{"OFFSET"}.ignore(pos, expected) && ParserLiteral().parse(pos, ast, expected)) - res->delete_offset = ast->as()->value.safeGet(); - } - } if (!parseQueryWithOnCluster(res, pos, expected)) return false; break; diff --git a/tests/queries/0_stateless/02808_filesystem_cache_drop_query.reference b/tests/queries/0_stateless/02808_filesystem_cache_drop_query.reference deleted file mode 100644 index d80fc78e03dc..000000000000 --- a/tests/queries/0_stateless/02808_filesystem_cache_drop_query.reference +++ /dev/null @@ -1,4 +0,0 @@ -1 -0 -1 -0 diff --git a/tests/queries/0_stateless/02808_filesystem_cache_drop_query.sh b/tests/queries/0_stateless/02808_filesystem_cache_drop_query.sh deleted file mode 100755 index 9d987d0ebf2d..000000000000 --- a/tests/queries/0_stateless/02808_filesystem_cache_drop_query.sh +++ /dev/null @@ -1,67 +0,0 @@ -#!/usr/bin/env bash -# Tags: no-fasttest, no-parallel, no-s3-storage, no-random-settings - -# set -x - -CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# shellcheck source=../shell_config.sh -. "$CUR_DIR"/../shell_config.sh - - -disk_name="${CLICKHOUSE_TEST_UNIQUE_NAME}" -$CLICKHOUSE_CLIENT -nm --query """ -DROP TABLE IF EXISTS test; -CREATE TABLE test (a Int32, b String) -ENGINE = MergeTree() ORDER BY tuple() -SETTINGS disk = disk_$disk_name(type = cache, max_size = '100Ki', path = ${CLICKHOUSE_TEST_UNIQUE_NAME}, disk = s3_disk); - -INSERT INTO test SELECT 1, 'test'; -""" - -query_id=$RANDOM - -$CLICKHOUSE_CLIENT --query_id "$query_id" --query "SELECT * FROM test FORMAT Null SETTINGS enable_filesystem_cache_log = 1" - -${CLICKHOUSE_CLIENT} -q " system flush logs" - -key=$($CLICKHOUSE_CLIENT -nm --query """ -SELECT key FROM system.filesystem_cache_log WHERE query_id = '$query_id' ORDER BY size DESC LIMIT 1; -""") - -offset=$($CLICKHOUSE_CLIENT -nm --query """ -SELECT offset FROM system.filesystem_cache_log WHERE query_id = '$query_id' ORDER BY size DESC LIMIT 1; -""") - -$CLICKHOUSE_CLIENT -nm --query """ -SELECT count() FROM system.filesystem_cache WHERE key = '$key' AND file_segment_range_begin = $offset; -""" - -$CLICKHOUSE_CLIENT -nm --query """ -SYSTEM DROP FILESYSTEM CACHE '$disk_name' KEY $key OFFSET $offset; -""" - -$CLICKHOUSE_CLIENT -nm --query """ -SELECT count() FROM system.filesystem_cache WHERE key = '$key' AND file_segment_range_begin = $offset; -""" - -query_id=$RANDOM$RANDOM - -$CLICKHOUSE_CLIENT --query_id "$query_id" --query "SELECT * FROM test FORMAT Null SETTINGS enable_filesystem_cache_log = 1" - -${CLICKHOUSE_CLIENT} -q " system flush logs" - -key=$($CLICKHOUSE_CLIENT -nm --query """ -SELECT key FROM system.filesystem_cache_log WHERE query_id = '$query_id' ORDER BY size DESC LIMIT 1; -""") - -$CLICKHOUSE_CLIENT -nm --query """ -SELECT count() FROM system.filesystem_cache WHERE key = '$key'; -""" - -$CLICKHOUSE_CLIENT -nm --query """ -SYSTEM DROP FILESYSTEM CACHE '$disk_name' KEY $key -""" - -$CLICKHOUSE_CLIENT -nm --query """ -SELECT count() FROM system.filesystem_cache WHERE key = '$key'; -""" From d0ad416e352f39e20b034c5ee1b51cb9efdc6aec Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 7 Jul 2023 04:41:47 +0200 Subject: [PATCH 133/179] Fix flaky test detach_attach_partition_race --- .../0_stateless/01164_detach_attach_partition_race.sh | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01164_detach_attach_partition_race.sh b/tests/queries/0_stateless/01164_detach_attach_partition_race.sh index 7640b9dddf26..3aec4c3445d9 100755 --- a/tests/queries/0_stateless/01164_detach_attach_partition_race.sh +++ b/tests/queries/0_stateless/01164_detach_attach_partition_race.sh @@ -2,9 +2,12 @@ # Tags: race CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=none # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh +$CLICKHOUSE_CLIENT -q "drop table if exists mt" + $CLICKHOUSE_CLIENT -q "create table mt (n int) engine=MergeTree order by n settings parts_to_throw_insert=1000" $CLICKHOUSE_CLIENT -q "insert into mt values (1)" $CLICKHOUSE_CLIENT -q "insert into mt values (2)" @@ -13,7 +16,9 @@ $CLICKHOUSE_CLIENT -q "insert into mt values (3)" function thread_insert() { while true; do - $CLICKHOUSE_CLIENT -q "insert into mt values (rand())"; + # It might be the case that the threads are terminated and exited, but some children didn't and they are still sending queries when we are dropping tables. + # That's why the "Table doesn't exist" error is allowed, while other errors don't. + $CLICKHOUSE_CLIENT -q "insert into mt values (rand())" 2>&1 | tr -d '\n' | rg -v "Table .+ doesn't exist"; done } From 2246e86159824f9e658ca28ecb796295a1b8585c Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 7 Jul 2023 05:30:32 +0200 Subject: [PATCH 134/179] Fix error in subquery operators --- .../AggregateFunctionMinMaxAny.h | 48 +++++++++++-------- .../02812_subquery_operators.reference | 6 +++ .../0_stateless/02812_subquery_operators.sql | 6 +++ 3 files changed, 41 insertions(+), 19 deletions(-) create mode 100644 tests/queries/0_stateless/02812_subquery_operators.reference create mode 100644 tests/queries/0_stateless/02812_subquery_operators.sql diff --git a/src/AggregateFunctions/AggregateFunctionMinMaxAny.h b/src/AggregateFunctions/AggregateFunctionMinMaxAny.h index 5312df32459f..6bfa6895a5cb 100644 --- a/src/AggregateFunctions/AggregateFunctionMinMaxAny.h +++ b/src/AggregateFunctions/AggregateFunctionMinMaxAny.h @@ -51,7 +51,8 @@ struct SingleValueDataFixed T value = T{}; public: - static constexpr bool is_nullable = false; + static constexpr bool result_is_nullable = false; + static constexpr bool should_skip_null_arguments = true; static constexpr bool is_any = false; bool has() const @@ -501,7 +502,8 @@ struct SingleValueDataString char small_data[MAX_SMALL_STRING_SIZE]; /// Including the terminating zero. public: - static constexpr bool is_nullable = false; + static constexpr bool result_is_nullable = false; + static constexpr bool should_skip_null_arguments = true; static constexpr bool is_any = false; bool has() const @@ -769,7 +771,7 @@ static_assert( /// For any other value types. -template +template struct SingleValueDataGeneric { private: @@ -779,12 +781,13 @@ struct SingleValueDataGeneric bool has_value = false; public: - static constexpr bool is_nullable = IS_NULLABLE; + static constexpr bool result_is_nullable = RESULT_IS_NULLABLE; + static constexpr bool should_skip_null_arguments = !RESULT_IS_NULLABLE; static constexpr bool is_any = false; bool has() const { - if constexpr (is_nullable) + if constexpr (result_is_nullable) return has_value; return !value.isNull(); } @@ -820,14 +823,14 @@ struct SingleValueDataGeneric void change(const IColumn & column, size_t row_num, Arena *) { column.get(row_num, value); - if constexpr (is_nullable) + if constexpr (result_is_nullable) has_value = true; } void change(const Self & to, Arena *) { value = to.value; - if constexpr (is_nullable) + if constexpr (result_is_nullable) has_value = true; } @@ -844,7 +847,7 @@ struct SingleValueDataGeneric bool changeFirstTime(const Self & to, Arena * arena) { - if (!has() && (is_nullable || to.has())) + if (!has() && (result_is_nullable || to.has())) { change(to, arena); return true; @@ -879,7 +882,7 @@ struct SingleValueDataGeneric } else { - if constexpr (is_nullable) + if constexpr (result_is_nullable) { Field new_value; column.get(row_num, new_value); @@ -910,7 +913,7 @@ struct SingleValueDataGeneric { if (!to.has()) return false; - if constexpr (is_nullable) + if constexpr (result_is_nullable) { if (!has()) { @@ -945,7 +948,7 @@ struct SingleValueDataGeneric } else { - if constexpr (is_nullable) + if constexpr (result_is_nullable) { Field new_value; column.get(row_num, new_value); @@ -975,7 +978,7 @@ struct SingleValueDataGeneric { if (!to.has()) return false; - if constexpr (is_nullable) + if constexpr (result_is_nullable) { if (!value.isNull() && (to.value.isNull() || value < to.value)) { @@ -1138,13 +1141,20 @@ struct AggregateFunctionAnyLastData : Data #endif }; + +/** The aggregate function 'singleValueOrNull' is used to implement subquery operators, + * such as x = ALL (SELECT ...) + * It checks if there is only one unique non-NULL value in the data. + * If there is only one unique value - returns it. + * If there are zero or at least two distinct values - returns NULL. + */ template struct AggregateFunctionSingleValueOrNullData : Data { - static constexpr bool is_nullable = true; - using Self = AggregateFunctionSingleValueOrNullData; + static constexpr bool result_is_nullable = true; + bool first_value = true; bool is_null = false; @@ -1166,7 +1176,7 @@ struct AggregateFunctionSingleValueOrNullData : Data if (!to.has()) return; - if (first_value) + if (first_value && !to.first_value) { first_value = false; this->change(to, arena); @@ -1311,7 +1321,7 @@ class AggregateFunctionsSingleValue final : public IAggregateFunctionDataHelper< static DataTypePtr createResultType(const DataTypePtr & type_) { - if constexpr (Data::is_nullable) + if constexpr (Data::result_is_nullable) return makeNullable(type_); return type_; } @@ -1431,13 +1441,13 @@ class AggregateFunctionsSingleValue final : public IAggregateFunctionDataHelper< } AggregateFunctionPtr getOwnNullAdapter( - const AggregateFunctionPtr & nested_function, + const AggregateFunctionPtr & original_function, const DataTypes & /*arguments*/, const Array & /*params*/, const AggregateFunctionProperties & /*properties*/) const override { - if (Data::is_nullable) - return nested_function; + if (Data::result_is_nullable && !Data::should_skip_null_arguments) + return original_function; return nullptr; } diff --git a/tests/queries/0_stateless/02812_subquery_operators.reference b/tests/queries/0_stateless/02812_subquery_operators.reference new file mode 100644 index 000000000000..aed0a046f996 --- /dev/null +++ b/tests/queries/0_stateless/02812_subquery_operators.reference @@ -0,0 +1,6 @@ + +Hello +Hello +123 +1 + ['\0'] [] \0 [''] diff --git a/tests/queries/0_stateless/02812_subquery_operators.sql b/tests/queries/0_stateless/02812_subquery_operators.sql new file mode 100644 index 000000000000..b0638b43e899 --- /dev/null +++ b/tests/queries/0_stateless/02812_subquery_operators.sql @@ -0,0 +1,6 @@ +SELECT singleValueOrNull(toNullable('')); +SELECT singleValueOrNull(toNullable('Hello')); +SELECT singleValueOrNull((SELECT 'Hello')); +SELECT singleValueOrNull(toNullable(123)); +SELECT '' = ALL (SELECT toNullable('')); +SELECT '', ['\0'], [], singleValueOrNull(( SELECT '\0' ) ), ['']; From 4c44c1f6ea422356bbed589aa5053fcd08139cb6 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Fri, 7 Jul 2023 06:32:42 +0000 Subject: [PATCH 135/179] Wait inside the function --- tests/queries/0_stateless/02481_async_insert_race_long.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/queries/0_stateless/02481_async_insert_race_long.sh b/tests/queries/0_stateless/02481_async_insert_race_long.sh index c4b026c6abae..d8153967e9a8 100755 --- a/tests/queries/0_stateless/02481_async_insert_race_long.sh +++ b/tests/queries/0_stateless/02481_async_insert_race_long.sh @@ -32,6 +32,8 @@ function insert3() ${MY_CLICKHOUSE_CLIENT} --wait_for_async_insert 1 -q "INSERT INTO async_inserts_race VALUES (7, 'g') (8, 'h')" & sleep 0.05 done + + wait } function select1() From 23bd23802fc160a34e09db83c87fda53ef645e19 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Fri, 7 Jul 2023 12:26:15 +0300 Subject: [PATCH 136/179] CacheDictionary request only unique keys from source --- src/Dictionaries/CacheDictionary.cpp | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/src/Dictionaries/CacheDictionary.cpp b/src/Dictionaries/CacheDictionary.cpp index c5c88a9f142a..e27e25ea7c4b 100644 --- a/src/Dictionaries/CacheDictionary.cpp +++ b/src/Dictionaries/CacheDictionary.cpp @@ -552,13 +552,14 @@ void CacheDictionary::update(CacheDictionaryUpdateUnitPtr Date: Fri, 7 Jul 2023 13:32:39 +0300 Subject: [PATCH 137/179] Update 02360_send_logs_level_colors.sh --- tests/queries/0_stateless/02360_send_logs_level_colors.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02360_send_logs_level_colors.sh b/tests/queries/0_stateless/02360_send_logs_level_colors.sh index a9b7d4dd3c1c..127c94c88e2a 100755 --- a/tests/queries/0_stateless/02360_send_logs_level_colors.sh +++ b/tests/queries/0_stateless/02360_send_logs_level_colors.sh @@ -21,7 +21,7 @@ spawn bash -c "$command" expect 1 EOF - rg -F $'\x1b' "$file_name" && cat "$file_name" || echo "ASCII text" + grep -F $'\x1b' "$file_name" && cat "$file_name" || echo "ASCII text" } run "$CLICKHOUSE_CLIENT -q 'SELECT 1' 2>$file_name" From 602392bb6206590e0d24df05eabf69a970767756 Mon Sep 17 00:00:00 2001 From: Alexander Gololobov <440544+davenger@users.noreply.github.com> Date: Fri, 7 Jul 2023 12:37:16 +0200 Subject: [PATCH 138/179] Print short fault info only from safe fields --- src/Daemon/BaseDaemon.cpp | 81 ++++++++++++++++++++++++--------------- 1 file changed, 51 insertions(+), 30 deletions(-) diff --git a/src/Daemon/BaseDaemon.cpp b/src/Daemon/BaseDaemon.cpp index 6d29523a3548..f766880bd34a 100644 --- a/src/Daemon/BaseDaemon.cpp +++ b/src/Daemon/BaseDaemon.cpp @@ -310,21 +310,9 @@ class SignalListener : public Poco::Runnable { ThreadStatus thread_status; - String query_id; - String query; - - /// Send logs from this thread to client if possible. - /// It will allow client to see failure messages directly. - if (thread_ptr) - { - query_id = thread_ptr->getQueryId(); - query = thread_ptr->getQueryForLog(); - - if (auto logs_queue = thread_ptr->getInternalTextLogsQueue()) - { - CurrentThread::attachInternalTextLogsQueue(logs_queue, LogsLevel::trace); - } - } + /// First log those fields that are safe to access and that should not cause new fault. + /// That way we will have some duplicated info in the log but we don't loose important info + /// in case of double fault. std::string signal_description = "Unknown signal"; @@ -336,21 +324,6 @@ class SignalListener : public Poco::Runnable else if (sig >= 0) signal_description = strsignal(sig); // NOLINT(concurrency-mt-unsafe) // it is not thread-safe but ok in this context - LOG_FATAL(log, "########################################"); - - if (query_id.empty()) - { - LOG_FATAL(log, "(version {}{}, build id: {}, git hash: {}) (from thread {}) (no query) Received signal {} ({})", - VERSION_STRING, VERSION_OFFICIAL, daemon.build_id, daemon.git_hash, - thread_num, signal_description, sig); - } - else - { - LOG_FATAL(log, "(version {}{}, build id: {}, git hash: {}) (from thread {}) (query_id: {}) (query: {}) Received signal {} ({})", - VERSION_STRING, VERSION_OFFICIAL, daemon.build_id, daemon.git_hash, - thread_num, query_id, query, signal_description, sig); - } - String error_message; if (sig != SanitizerTrap) @@ -358,8 +331,15 @@ class SignalListener : public Poco::Runnable else error_message = "Sanitizer trap."; + LOG_FATAL(log, "########## Short fault info ############"); + + LOG_FATAL(log, "(version {}{}, build id: {}, git hash: {}) (from thread {}) Received signal {} ({})", + VERSION_STRING, VERSION_OFFICIAL, daemon.build_id, daemon.git_hash, + thread_num, signal_description, sig); + LOG_FATAL(log, fmt::runtime(error_message)); + String bare_stacktrace_str; if (stack_trace.getSize()) { /// Write bare stack trace (addresses) just in case if we will fail to print symbolized stack trace. @@ -375,6 +355,47 @@ class SignalListener : public Poco::Runnable } LOG_FATAL(log, fmt::runtime(bare_stacktrace.str())); + bare_stacktrace_str = bare_stacktrace.str(); + } + + /// Now try to access potentially unsafe data in thread_ptr. + + String query_id; + String query; + + /// Send logs from this thread to client if possible. + /// It will allow client to see failure messages directly. + if (thread_ptr) + { + query_id = thread_ptr->getQueryId(); + query = thread_ptr->getQueryForLog(); + + if (auto logs_queue = thread_ptr->getInternalTextLogsQueue()) + { + CurrentThread::attachInternalTextLogsQueue(logs_queue, LogsLevel::trace); + } + } + + LOG_FATAL(log, "########################################"); + + if (query_id.empty()) + { + LOG_FATAL(log, "(version {}{}, build id: {}, git hash: {}) (from thread {}) (no query) Received signal {} ({})", + VERSION_STRING, VERSION_OFFICIAL, daemon.build_id, daemon.git_hash, + thread_num, signal_description, sig); + } + else + { + LOG_FATAL(log, "(version {}{}, build id: {}, git hash: {}) (from thread {}) (query_id: {}) (query: {}) Received signal {} ({})", + VERSION_STRING, VERSION_OFFICIAL, daemon.build_id, daemon.git_hash, + thread_num, query_id, query, signal_description, sig); + } + + LOG_FATAL(log, fmt::runtime(error_message)); + + if (!bare_stacktrace_str.empty()) + { + LOG_FATAL(log, fmt::runtime(bare_stacktrace_str)); } /// Write symbolized stack trace line by line for better grep-ability. From 50bda59a0d226b108ab1521ae6499d35bab01ad0 Mon Sep 17 00:00:00 2001 From: Yatsishin Ilya <2159081+qoega@users.noreply.github.com> Date: Fri, 7 Jul 2023 11:05:42 +0000 Subject: [PATCH 139/179] Fix typo --- .../test_s3_zero_copy_replication/test.py | 26 +++++++++---------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/tests/integration/test_s3_zero_copy_replication/test.py b/tests/integration/test_s3_zero_copy_replication/test.py index bc13c127610b..2a4e0eece088 100644 --- a/tests/integration/test_s3_zero_copy_replication/test.py +++ b/tests/integration/test_s3_zero_copy_replication/test.py @@ -48,7 +48,7 @@ def get_large_objects_count(cluster, size=100, folder="data"): return counter -def check_objects_exisis(cluster, object_list, folder="data"): +def check_objects_exist(cluster, object_list, folder="data"): minio = cluster.minio_client for obj in object_list: if obj: @@ -466,7 +466,7 @@ def s3_zero_copy_unfreeze_base(cluster, unfreeze_query_template): assert objects01 == objects02 - check_objects_exisis(cluster, objects01) + check_objects_exist(cluster, objects01) node1.query("TRUNCATE TABLE unfreeze_test") node2.query("SYSTEM SYNC REPLICA unfreeze_test", timeout=30) @@ -477,12 +477,12 @@ def s3_zero_copy_unfreeze_base(cluster, unfreeze_query_template): assert objects01 == objects11 assert objects01 == objects12 - check_objects_exisis(cluster, objects11) + check_objects_exist(cluster, objects11) node1.query(f"{unfreeze_query_template} 'freeze_backup1'") wait_mutations(node1, "unfreeze_test", 10) - check_objects_exisis(cluster, objects12) + check_objects_exist(cluster, objects12) node2.query(f"{unfreeze_query_template} 'freeze_backup2'") wait_mutations(node2, "unfreeze_test", 10) @@ -540,8 +540,8 @@ def s3_zero_copy_drop_detached(cluster, unfreeze_query_template): wait_mutations(node1, "drop_detached_test", 10) wait_mutations(node2, "drop_detached_test", 10) - check_objects_exisis(cluster, objects1) - check_objects_exisis(cluster, objects2) + check_objects_exist(cluster, objects1) + check_objects_exist(cluster, objects2) node2.query( "ALTER TABLE drop_detached_test DROP DETACHED PARTITION '1'", @@ -551,8 +551,8 @@ def s3_zero_copy_drop_detached(cluster, unfreeze_query_template): wait_mutations(node1, "drop_detached_test", 10) wait_mutations(node2, "drop_detached_test", 10) - check_objects_exisis(cluster, objects1) - check_objects_exisis(cluster, objects2) + check_objects_exist(cluster, objects1) + check_objects_exist(cluster, objects2) node1.query( "ALTER TABLE drop_detached_test DROP DETACHED PARTITION '1'", @@ -562,7 +562,7 @@ def s3_zero_copy_drop_detached(cluster, unfreeze_query_template): wait_mutations(node1, "drop_detached_test", 10) wait_mutations(node2, "drop_detached_test", 10) - check_objects_exisis(cluster, objects1) + check_objects_exist(cluster, objects1) check_objects_not_exisis(cluster, objects_diff) node1.query( @@ -573,7 +573,7 @@ def s3_zero_copy_drop_detached(cluster, unfreeze_query_template): wait_mutations(node1, "drop_detached_test", 10) wait_mutations(node2, "drop_detached_test", 10) - check_objects_exisis(cluster, objects1) + check_objects_exist(cluster, objects1) node2.query( "ALTER TABLE drop_detached_test DROP DETACHED PARTITION '0'", @@ -682,7 +682,7 @@ def test_s3_zero_copy_keeps_data_after_mutation(started_cluster): wait_for_active_parts(node2, 4, "zero_copy_mutation") objects1 = node1.get_table_objects("zero_copy_mutation") - check_objects_exisis(cluster, objects1) + check_objects_exist(cluster, objects1) node1.query( """ @@ -710,7 +710,7 @@ def test_s3_zero_copy_keeps_data_after_mutation(started_cluster): nodeY = node2 objectsY = nodeY.get_table_objects("zero_copy_mutation") - check_objects_exisis(cluster, objectsY) + check_objects_exist(cluster, objectsY) nodeX.query( """ @@ -745,7 +745,7 @@ def test_s3_zero_copy_keeps_data_after_mutation(started_cluster): """ ) - check_objects_exisis(cluster, objectsY) + check_objects_exist(cluster, objectsY) nodeY.query( """ From d439db31397e8576a6e49e209bf069612ef9d2f5 Mon Sep 17 00:00:00 2001 From: Alexander Gololobov <440544+davenger@users.noreply.github.com> Date: Fri, 7 Jul 2023 13:10:55 +0200 Subject: [PATCH 140/179] Print just signal number first, and only then get its description --- src/Daemon/BaseDaemon.cpp | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/src/Daemon/BaseDaemon.cpp b/src/Daemon/BaseDaemon.cpp index f766880bd34a..422f6ffb63fe 100644 --- a/src/Daemon/BaseDaemon.cpp +++ b/src/Daemon/BaseDaemon.cpp @@ -314,6 +314,11 @@ class SignalListener : public Poco::Runnable /// That way we will have some duplicated info in the log but we don't loose important info /// in case of double fault. + LOG_FATAL(log, "########## Short fault info ############"); + LOG_FATAL(log, "(version {}{}, build id: {}, git hash: {}) (from thread {}) Received signal {}", + VERSION_STRING, VERSION_OFFICIAL, daemon.build_id, daemon.git_hash, + thread_num, sig); + std::string signal_description = "Unknown signal"; /// Some of these are not really signals, but our own indications on failure reason. @@ -324,6 +329,8 @@ class SignalListener : public Poco::Runnable else if (sig >= 0) signal_description = strsignal(sig); // NOLINT(concurrency-mt-unsafe) // it is not thread-safe but ok in this context + LOG_FATAL(log, "Signal description: {}", signal_description); + String error_message; if (sig != SanitizerTrap) @@ -331,12 +338,6 @@ class SignalListener : public Poco::Runnable else error_message = "Sanitizer trap."; - LOG_FATAL(log, "########## Short fault info ############"); - - LOG_FATAL(log, "(version {}{}, build id: {}, git hash: {}) (from thread {}) Received signal {} ({})", - VERSION_STRING, VERSION_OFFICIAL, daemon.build_id, daemon.git_hash, - thread_num, signal_description, sig); - LOG_FATAL(log, fmt::runtime(error_message)); String bare_stacktrace_str; From 05649c7b384cb412fa9e25150413460cc969893e Mon Sep 17 00:00:00 2001 From: Alexander Gololobov <440544+davenger@users.noreply.github.com> Date: Fri, 7 Jul 2023 13:22:52 +0200 Subject: [PATCH 141/179] Removed duplicate header --- src/Storages/System/attachSystemTables.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Storages/System/attachSystemTables.cpp b/src/Storages/System/attachSystemTables.cpp index a9873c821ce0..84965b3196b9 100644 --- a/src/Storages/System/attachSystemTables.cpp +++ b/src/Storages/System/attachSystemTables.cpp @@ -1,4 +1,3 @@ -#include "Storages/System/StorageSystemJemalloc.h" #include "config.h" #include From 6d798e0bde13416488409718fd2db6191dde1197 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Fri, 7 Jul 2023 10:16:36 +0000 Subject: [PATCH 142/179] Better check for current_thread --- src/Common/ThreadStatus.cpp | 9 +++++---- src/Common/ThreadStatus.h | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/src/Common/ThreadStatus.cpp b/src/Common/ThreadStatus.cpp index 7a602afe7e73..b39ea7e8ea8b 100644 --- a/src/Common/ThreadStatus.cpp +++ b/src/Common/ThreadStatus.cpp @@ -199,13 +199,14 @@ ThreadStatus::~ThreadStatus() if (deleter) deleter(); + chassert(!check_current_thread_on_destruction || current_thread == this); + /// Only change current_thread if it's currently being used by this ThreadStatus /// For example, PushingToViews chain creates and deletes ThreadStatus instances while running in the main query thread - if (check_current_thread_on_destruction) - { - assert(current_thread == this); + if (current_thread == this) current_thread = nullptr; - } + else if (check_current_thread_on_destruction) + LOG_ERROR(log, "current_thread contains invalid address"); } void ThreadStatus::updatePerformanceCounters() diff --git a/src/Common/ThreadStatus.h b/src/Common/ThreadStatus.h index 7c8dbdb68bdf..aa1e3eea6e50 100644 --- a/src/Common/ThreadStatus.h +++ b/src/Common/ThreadStatus.h @@ -224,7 +224,7 @@ class ThreadStatus : public boost::noncopyable Poco::Logger * log = nullptr; - bool check_current_thread_on_destruction; + [[maybe_unused]] bool check_current_thread_on_destruction; public: explicit ThreadStatus(bool check_current_thread_on_destruction_ = true); From d9d0e9062a4f30775b1b0d32121fef3da1ea33bf Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Fri, 7 Jul 2023 11:41:01 +0000 Subject: [PATCH 143/179] Remove maybe_unused --- src/Common/ThreadStatus.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Common/ThreadStatus.h b/src/Common/ThreadStatus.h index aa1e3eea6e50..7c8dbdb68bdf 100644 --- a/src/Common/ThreadStatus.h +++ b/src/Common/ThreadStatus.h @@ -224,7 +224,7 @@ class ThreadStatus : public boost::noncopyable Poco::Logger * log = nullptr; - [[maybe_unused]] bool check_current_thread_on_destruction; + bool check_current_thread_on_destruction; public: explicit ThreadStatus(bool check_current_thread_on_destruction_ = true); From 36e52efc3e7602e43628246562b2db70ca85e765 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Fri, 7 Jul 2023 11:57:12 +0000 Subject: [PATCH 144/179] Remove timeout --- .../01164_detach_attach_partition_race.sh | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/tests/queries/0_stateless/01164_detach_attach_partition_race.sh b/tests/queries/0_stateless/01164_detach_attach_partition_race.sh index 3aec4c3445d9..e645cb5aae7b 100755 --- a/tests/queries/0_stateless/01164_detach_attach_partition_race.sh +++ b/tests/queries/0_stateless/01164_detach_attach_partition_race.sh @@ -2,7 +2,6 @@ # Tags: race CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=none # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh @@ -15,16 +14,16 @@ $CLICKHOUSE_CLIENT -q "insert into mt values (3)" function thread_insert() { - while true; do - # It might be the case that the threads are terminated and exited, but some children didn't and they are still sending queries when we are dropping tables. - # That's why the "Table doesn't exist" error is allowed, while other errors don't. - $CLICKHOUSE_CLIENT -q "insert into mt values (rand())" 2>&1 | tr -d '\n' | rg -v "Table .+ doesn't exist"; + local TIMELIMIT=$((SECONDS+$1)) + while [ $SECONDS -lt "$TIMELIMIT" ]; do + $CLICKHOUSE_CLIENT -q "insert into mt values (rand())"; done } function thread_detach_attach() { - while true; do + local TIMELIMIT=$((SECONDS+$1)) + while [ $SECONDS -lt "$TIMELIMIT" ]; do $CLICKHOUSE_CLIENT -q "alter table mt detach partition id 'all'"; $CLICKHOUSE_CLIENT -q "alter table mt attach partition id 'all'"; done @@ -32,7 +31,8 @@ function thread_detach_attach() function thread_drop_detached() { - while true; do + local TIMELIMIT=$((SECONDS+$1)) + while [ $SECONDS -lt "$TIMELIMIT" ]; do $CLICKHOUSE_CLIENT --allow_drop_detached 1 -q "alter table mt drop detached partition id 'all'"; done } @@ -43,10 +43,10 @@ export -f thread_drop_detached; TIMEOUT=10 -timeout $TIMEOUT bash -c thread_insert & -timeout $TIMEOUT bash -c thread_detach_attach 2> /dev/null & -timeout $TIMEOUT bash -c thread_detach_attach 2> /dev/null & -timeout $TIMEOUT bash -c thread_drop_detached 2> /dev/null & +thread_insert $TIMEOUT & +thread_detach_attach $TIMEOUT 2> /dev/null & +thread_detach_attach $TIMEOUT 2> /dev/null & +thread_drop_detached $TIMEOUT 2> /dev/null & wait From 1e0d97c282b1415aed77dd7198ab244a84c7aea9 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Fri, 7 Jul 2023 12:19:10 +0000 Subject: [PATCH 145/179] Do not remove inputs after ActionsDAG::merge --- src/Interpreters/ActionsDAG.cpp | 2 +- .../0_stateless/02812_bug_with_unused_join_columns.reference | 0 .../queries/0_stateless/02812_bug_with_unused_join_columns.sql | 1 + 3 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/02812_bug_with_unused_join_columns.reference create mode 100644 tests/queries/0_stateless/02812_bug_with_unused_join_columns.sql diff --git a/src/Interpreters/ActionsDAG.cpp b/src/Interpreters/ActionsDAG.cpp index 906875dd314d..46c14c503e4d 100644 --- a/src/Interpreters/ActionsDAG.cpp +++ b/src/Interpreters/ActionsDAG.cpp @@ -1366,7 +1366,7 @@ ActionsDAGPtr ActionsDAG::merge(ActionsDAG && first, ActionsDAG && second) first.mergeInplace(std::move(second)); /// Drop unused inputs and, probably, some actions. - first.removeUnusedActions(); + first.removeUnusedActions(false); return std::make_shared(std::move(first)); } diff --git a/tests/queries/0_stateless/02812_bug_with_unused_join_columns.reference b/tests/queries/0_stateless/02812_bug_with_unused_join_columns.reference new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/queries/0_stateless/02812_bug_with_unused_join_columns.sql b/tests/queries/0_stateless/02812_bug_with_unused_join_columns.sql new file mode 100644 index 000000000000..6c801b5b73ec --- /dev/null +++ b/tests/queries/0_stateless/02812_bug_with_unused_join_columns.sql @@ -0,0 +1 @@ +SELECT concat(func.name, comb.name) AS x FROM system.functions AS func JOIN system.aggregate_function_combinators AS comb using name WHERE is_aggregate settings allow_experimental_analyzer=1; From fa7fe5277c99c036ff488997aab46b36c6901610 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Fri, 7 Jul 2023 12:25:13 +0000 Subject: [PATCH 146/179] Better comment. --- src/Interpreters/ActionsDAG.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Interpreters/ActionsDAG.cpp b/src/Interpreters/ActionsDAG.cpp index 46c14c503e4d..2f9fc7e5746c 100644 --- a/src/Interpreters/ActionsDAG.cpp +++ b/src/Interpreters/ActionsDAG.cpp @@ -1365,7 +1365,7 @@ ActionsDAGPtr ActionsDAG::merge(ActionsDAG && first, ActionsDAG && second) { first.mergeInplace(std::move(second)); - /// Drop unused inputs and, probably, some actions. + /// Some actions could become unused. Do not drop inputs to preserve the header. first.removeUnusedActions(false); return std::make_shared(std::move(first)); From ee33000fc24367166ebf56772b0be4ca0ee25192 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Fri, 7 Jul 2023 14:08:54 +0000 Subject: [PATCH 147/179] Fixing tests. --- src/Interpreters/ActionsDAG.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/Interpreters/ActionsDAG.cpp b/src/Interpreters/ActionsDAG.cpp index 2f9fc7e5746c..e68e2580231b 100644 --- a/src/Interpreters/ActionsDAG.cpp +++ b/src/Interpreters/ActionsDAG.cpp @@ -465,8 +465,12 @@ void ActionsDAG::removeUnusedActions(const Names & required_names, bool allow_re void ActionsDAG::removeUnusedActions(bool allow_remove_inputs, bool allow_constant_folding) { std::unordered_set visited_nodes; + std::unordered_set used_inputs; std::stack stack; + for (const auto * input : inputs) + used_inputs.insert(input); + for (const auto * node : outputs) { visited_nodes.insert(node); @@ -484,7 +488,7 @@ void ActionsDAG::removeUnusedActions(bool allow_remove_inputs, bool allow_consta stack.push(&node); } - if (node.type == ActionType::INPUT && !allow_remove_inputs) + if (node.type == ActionType::INPUT && !allow_remove_inputs && used_inputs.contains(&node)) visited_nodes.insert(&node); } From e08f140d62988cd0340ec75f441891a2c01539c3 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Fri, 7 Jul 2023 17:32:10 +0300 Subject: [PATCH 148/179] Update 02254_projection_broken_part.sh --- tests/queries/0_stateless/02254_projection_broken_part.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02254_projection_broken_part.sh b/tests/queries/0_stateless/02254_projection_broken_part.sh index 6ba5093f234e..3521d1d9d16e 100755 --- a/tests/queries/0_stateless/02254_projection_broken_part.sh +++ b/tests/queries/0_stateless/02254_projection_broken_part.sh @@ -26,7 +26,7 @@ path=$($CLICKHOUSE_CLIENT -q "select path from system.parts where database='$CLI $CLICKHOUSE_CLIENT -q "select throwIf(substring('$path', 1, 1) != '/', 'Path is relative: $path')" || exit rm -f "$path/ab.proj/data.bin" -$CLICKHOUSE_CLIENT -q "select 3, sum(b) from projection_broken_parts_1 group by a;" 2>/dev/null +$CLICKHOUSE_CLIENT -q "select 3, sum(b) from projection_broken_parts_1 group by a format Null;" 2>/dev/null num_tries=0 while ! $CLICKHOUSE_CLIENT -q "select 4, sum(b) from projection_broken_parts_1 group by a format Null;" 2>/dev/null; do From 05b7da78130b21367b69a2cc22a319be11de8207 Mon Sep 17 00:00:00 2001 From: DanRoscigno Date: Fri, 7 Jul 2023 10:32:44 -0400 Subject: [PATCH 149/179] add doc note for MongoDB Atlas --- docs/en/engines/table-engines/integrations/mongodb.md | 9 +++++++++ docs/en/sql-reference/table-functions/mongodb.md | 8 ++++++++ 2 files changed, 17 insertions(+) diff --git a/docs/en/engines/table-engines/integrations/mongodb.md b/docs/en/engines/table-engines/integrations/mongodb.md index 912f81573dbb..f87e8da8b5b0 100644 --- a/docs/en/engines/table-engines/integrations/mongodb.md +++ b/docs/en/engines/table-engines/integrations/mongodb.md @@ -33,6 +33,15 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name - `options` — MongoDB connection string options (optional parameter). +:::tip +If you are using the MongoDB Atlas cloud offering please add these options: + +``` +'connectTimeoutMS=10000&ssl=true&authSource=admin' +``` + +::: + ## Usage Example {#usage-example} Create a table in ClickHouse which allows to read data from MongoDB collection: diff --git a/docs/en/sql-reference/table-functions/mongodb.md b/docs/en/sql-reference/table-functions/mongodb.md index aad60a7003c6..a483414c0d4b 100644 --- a/docs/en/sql-reference/table-functions/mongodb.md +++ b/docs/en/sql-reference/table-functions/mongodb.md @@ -30,6 +30,14 @@ mongodb(host:port, database, collection, user, password, structure [, options]) - `options` - MongoDB connection string options (optional parameter). +:::tip +If you are using the MongoDB Atlas cloud offering please add these options: + +``` +'connectTimeoutMS=10000&ssl=true&authSource=admin' +``` + +::: **Returned Value** From 0bd16d47be2c2040ab1d6787e953b0c4154ee0a1 Mon Sep 17 00:00:00 2001 From: Slach Date: Fri, 7 Jul 2023 19:44:20 +0500 Subject: [PATCH 150/179] fix documentation insconsistency about additional_tables_filter during reproduce https://github.com/ClickHouse/ClickHouse/issues/51948 Signed-off-by: Slach --- docs/en/operations/settings/settings.md | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 5f6cf98646bd..195a9e26b53e 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -17,7 +17,8 @@ Default value: 0. **Example** ``` sql -insert into table_1 values (1, 'a'), (2, 'bb'), (3, 'ccc'), (4, 'dddd'); +INSERT INTO table_1 VALUES (1, 'a'), (2, 'bb'), (3, 'ccc'), (4, 'dddd'); +SELECT * FROM table_1; ``` ```response ┌─x─┬─y────┐ @@ -30,7 +31,7 @@ insert into table_1 values (1, 'a'), (2, 'bb'), (3, 'ccc'), (4, 'dddd'); ```sql SELECT * FROM table_1 -SETTINGS additional_table_filters = (('table_1', 'x != 2')) +SETTINGS additional_table_filters = {'table_1': 'x != 2'} ``` ```response ┌─x─┬─y────┐ @@ -50,7 +51,8 @@ Default value: `''`. **Example** ``` sql -insert into table_1 values (1, 'a'), (2, 'bb'), (3, 'ccc'), (4, 'dddd'); +INSERT INTO table_1 VALUES (1, 'a'), (2, 'bb'), (3, 'ccc'), (4, 'dddd'); +SElECT * FROM table_1; ``` ```response ┌─x─┬─y────┐ From 50ea0855bf622ede96cb9726d5010d03c8dbebf4 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Fri, 7 Jul 2023 17:47:09 +0300 Subject: [PATCH 151/179] Update 02439_merge_selecting_partitions.sql (#51862) * Update 02439_merge_selecting_partitions.sql * Update 02439_merge_selecting_partitions.reference * Update 02439_merge_selecting_partitions.reference * fix --- .../0_stateless/02439_merge_selecting_partitions.reference | 1 - .../0_stateless/02439_merge_selecting_partitions.sql | 6 ++++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/02439_merge_selecting_partitions.reference b/tests/queries/0_stateless/02439_merge_selecting_partitions.reference index e836994b3aa7..e69de29bb2d1 100644 --- a/tests/queries/0_stateless/02439_merge_selecting_partitions.reference +++ b/tests/queries/0_stateless/02439_merge_selecting_partitions.reference @@ -1 +0,0 @@ -/test/02439/s1/default/block_numbers/123 diff --git a/tests/queries/0_stateless/02439_merge_selecting_partitions.sql b/tests/queries/0_stateless/02439_merge_selecting_partitions.sql index 88ce2834d6b5..3d0c0af84d5a 100644 --- a/tests/queries/0_stateless/02439_merge_selecting_partitions.sql +++ b/tests/queries/0_stateless/02439_merge_selecting_partitions.sql @@ -22,7 +22,9 @@ select sleepEachRow(3) as higher_probability_of_reproducing_the_issue format Nul system flush logs; -- it should not list unneeded partitions where we cannot merge anything -select distinct path from system.zookeeper_log where path like '/test/02439/s1/' || currentDatabase() || '/block_numbers/%' - and op_num in ('List', 'SimpleList', 'FilteredList') and path not like '%/block_numbers/1'; +select * from system.zookeeper_log where path like '/test/02439/s1/' || currentDatabase() || '/block_numbers/%' + and op_num in ('List', 'SimpleList', 'FilteredList') + and path not like '%/block_numbers/1' and path not like '%/block_numbers/123' + and event_time >= now() - interval 1 minute; drop table rmt; From 8266067e1a650453968f278f64e20bd4addc7aa2 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Fri, 7 Jul 2023 19:09:55 +0300 Subject: [PATCH 152/179] Fixed style check --- src/Dictionaries/CacheDictionary.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Dictionaries/CacheDictionary.cpp b/src/Dictionaries/CacheDictionary.cpp index e27e25ea7c4b..3011151ef007 100644 --- a/src/Dictionaries/CacheDictionary.cpp +++ b/src/Dictionaries/CacheDictionary.cpp @@ -549,12 +549,12 @@ void CacheDictionary::update(CacheDictionaryUpdateUnitPtr Date: Fri, 7 Jul 2023 18:39:20 +0200 Subject: [PATCH 153/179] comments for the tests --- ...nal_block_structure_mismatch_bug.reference | 1 - ...791_final_block_structure_mismatch_bug.sql | 38 ++++++++----------- 2 files changed, 15 insertions(+), 24 deletions(-) diff --git a/tests/queries/0_stateless/02791_final_block_structure_mismatch_bug.reference b/tests/queries/0_stateless/02791_final_block_structure_mismatch_bug.reference index ca810c46a2db..a8401b1cae8f 100644 --- a/tests/queries/0_stateless/02791_final_block_structure_mismatch_bug.reference +++ b/tests/queries/0_stateless/02791_final_block_structure_mismatch_bug.reference @@ -7,4 +7,3 @@ 1 2 3 -2 diff --git a/tests/queries/0_stateless/02791_final_block_structure_mismatch_bug.sql b/tests/queries/0_stateless/02791_final_block_structure_mismatch_bug.sql index a82e43d81f43..394e3bff87bd 100644 --- a/tests/queries/0_stateless/02791_final_block_structure_mismatch_bug.sql +++ b/tests/queries/0_stateless/02791_final_block_structure_mismatch_bug.sql @@ -17,10 +17,18 @@ INSERT INTO test_block_mismatch VALUES (1, toDateTime('2023-02-02 12:12:12')); INSERT INTO test_block_mismatch VALUES (1, toDateTime('2023-02-02 12:12:12')); SELECT count(*) FROM test_block_mismatch FINAL; +optimize table test_block_mismatch final; +system stop merges test_block_mismatch; + INSERT INTO test_block_mismatch VALUES (2, toDateTime('2023-01-01 12:12:12')); INSERT INTO test_block_mismatch VALUES (2, toDateTime('2023-01-01 12:12:12')); +-- one lonely part in 2023-02-02 partition and 3 parts in 2023-01-01 partition. +-- lonely part will not be processed by PartsSplitter and 2023-01-01's parts will be - previously this led to the `Block structure mismatch in Pipe::unitePipes` exception. SELECT count(*) FROM test_block_mismatch FINAL; + +-- variations of the test above with slightly modified table definitions + CREATE TABLE test_block_mismatch_sk1 ( a UInt32, @@ -39,10 +47,14 @@ INSERT INTO test_block_mismatch_sk1 VALUES (1, toDateTime('2023-02-02 12:12:12') INSERT INTO test_block_mismatch_sk1 VALUES (1, toDateTime('2023-02-02 12:12:12')); SELECT count(*) FROM test_block_mismatch_sk1 FINAL; +optimize table test_block_mismatch_sk1 final; +system stop merges test_block_mismatch_sk1; + INSERT INTO test_block_mismatch_sk1 VALUES (2, toDateTime('2023-01-01 12:12:12')); INSERT INTO test_block_mismatch_sk1 VALUES (2, toDateTime('2023-01-01 12:12:12')); SELECT count(*) FROM test_block_mismatch_sk1 FINAL; + CREATE TABLE test_block_mismatch_sk2 ( a UInt32, @@ -61,29 +73,9 @@ INSERT INTO test_block_mismatch_sk2 VALUES (1, toDateTime('2023-02-02 12:12:12') INSERT INTO test_block_mismatch_sk2 VALUES (1, toDateTime('2023-02-02 12:12:12')); SELECT count(*) FROM test_block_mismatch_sk2 FINAL; +optimize table test_block_mismatch_sk2 final; +system stop merges test_block_mismatch_sk2; + INSERT INTO test_block_mismatch_sk2 VALUES (2, toDateTime('2023-01-01 12:12:12')); INSERT INTO test_block_mismatch_sk2 VALUES (2, toDateTime('2023-01-01 12:12:12')); SELECT count(*) FROM test_block_mismatch_sk2 FINAL; - -CREATE TABLE test_block_mismatch_magic_row_dist -( - a UInt32, - b DateTime -) -ENGINE = ReplacingMergeTree -PARTITION BY toYYYYMM(b) -ORDER BY (toDate(b), a); - -INSERT INTO test_block_mismatch_magic_row_dist VALUES (1, toDateTime('2023-02-02 12:12:12')); -INSERT INTO test_block_mismatch_magic_row_dist VALUES (1, toDateTime('2023-02-02 12:12:12')); -INSERT INTO test_block_mismatch_magic_row_dist VALUES (1, toDateTime('2023-02-02 12:12:12')); -INSERT INTO test_block_mismatch_magic_row_dist VALUES (1, toDateTime('2023-02-02 12:12:12')); - -optimize table test_block_mismatch_magic_row_dist final; - -system stop merges test_block_mismatch_magic_row_dist; - -INSERT INTO test_block_mismatch_magic_row_dist VALUES (1, toDateTime('2023-01-01 12:12:12')); -INSERT INTO test_block_mismatch_magic_row_dist VALUES (1, toDateTime('2023-01-01 12:12:12')); - -SELECT count(*) FROM test_block_mismatch_magic_row_dist FINAL; From 93b76c93210bccfda6d6b2413bf07cf48c4f9fa3 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Fri, 7 Jul 2023 16:40:47 +0200 Subject: [PATCH 154/179] better logs on shutdown --- base/base/getThreadId.cpp | 33 ++++++++++++++-------- base/base/getThreadId.h | 2 ++ src/Daemon/BaseDaemon.cpp | 1 + src/Interpreters/Context.cpp | 41 ++++++++++++++++------------ src/Interpreters/DatabaseCatalog.cpp | 2 ++ 5 files changed, 49 insertions(+), 30 deletions(-) diff --git a/base/base/getThreadId.cpp b/base/base/getThreadId.cpp index b6c22bb8856b..a42d79c5698b 100644 --- a/base/base/getThreadId.cpp +++ b/base/base/getThreadId.cpp @@ -15,25 +15,34 @@ static thread_local uint64_t current_tid = 0; -uint64_t getThreadId() + +static void setCurrentThreadId() { - if (!current_tid) - { #if defined(OS_ANDROID) - current_tid = gettid(); + current_tid = gettid(); #elif defined(OS_LINUX) - current_tid = static_cast(syscall(SYS_gettid)); /// This call is always successful. - man gettid + current_tid = static_cast(syscall(SYS_gettid)); /// This call is always successful. - man gettid #elif defined(OS_FREEBSD) - current_tid = pthread_getthreadid_np(); + current_tid = pthread_getthreadid_np(); #elif defined(OS_SUNOS) - // On Solaris-derived systems, this returns the ID of the LWP, analogous - // to a thread. - current_tid = static_cast(pthread_self()); + // On Solaris-derived systems, this returns the ID of the LWP, analogous + // to a thread. + current_tid = static_cast(pthread_self()); #else - if (0 != pthread_threadid_np(nullptr, ¤t_tid)) - throw std::logic_error("pthread_threadid_np returned error"); + if (0 != pthread_threadid_np(nullptr, ¤t_tid)) + throw std::logic_error("pthread_threadid_np returned error"); #endif - } +} + +uint64_t getThreadId() +{ + if (!current_tid) + setCurrentThreadId(); return current_tid; } + +void updateCurrentThreadIdAfterFork() +{ + setCurrentThreadId(); +} diff --git a/base/base/getThreadId.h b/base/base/getThreadId.h index a1b5ff5f3e8d..f90c76029e1b 100644 --- a/base/base/getThreadId.h +++ b/base/base/getThreadId.h @@ -3,3 +3,5 @@ /// Obtain thread id from OS. The value is cached in thread local variable. uint64_t getThreadId(); + +void updateCurrentThreadIdAfterFork(); diff --git a/src/Daemon/BaseDaemon.cpp b/src/Daemon/BaseDaemon.cpp index 6d29523a3548..d63e99764372 100644 --- a/src/Daemon/BaseDaemon.cpp +++ b/src/Daemon/BaseDaemon.cpp @@ -1101,6 +1101,7 @@ void BaseDaemon::setupWatchdog() if (0 == pid) { + updateCurrentThreadIdAfterFork(); logger().information("Forked a child process to watch"); #if defined(OS_LINUX) if (0 != prctl(PR_SET_PDEATHSIG, SIGKILL)) diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index 7482450d529f..7b3d419cce40 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -176,6 +176,15 @@ namespace ErrorCodes extern const int NUMBER_OF_COLUMNS_DOESNT_MATCH; } +#define SHUTDOWN(log, desc, ptr, method) do \ +{ \ + if (ptr) \ + { \ + LOG_DEBUG(log, "Shutting down " desc); \ + ptr->method; \ + } \ +} while (false) \ + /** Set of known objects (environment), that could be used in query. * Shared (global) part. Order of members (especially, order of destruction) is very important. @@ -479,35 +488,29 @@ struct ContextSharedPart : boost::noncopyable /// Stop periodic reloading of the configuration files. /// This must be done first because otherwise the reloading may pass a changed config /// to some destroyed parts of ContextSharedPart. - if (external_dictionaries_loader) - external_dictionaries_loader->enablePeriodicUpdates(false); - if (external_user_defined_executable_functions_loader) - external_user_defined_executable_functions_loader->enablePeriodicUpdates(false); - if (user_defined_sql_objects_loader) - user_defined_sql_objects_loader->stopWatching(); + SHUTDOWN(log, "dictionaries loader", external_dictionaries_loader, enablePeriodicUpdates(false)); + SHUTDOWN(log, "UDFs loader", external_user_defined_executable_functions_loader, enablePeriodicUpdates(false)); + SHUTDOWN(log, "another UDFs loader", user_defined_sql_objects_loader, stopWatching()); + + LOG_TRACE(log, "Shutting down named sessions"); Session::shutdownNamedSessions(); /// Waiting for current backups/restores to be finished. This must be done before `DatabaseCatalog::shutdown()`. - if (backups_worker) - backups_worker->shutdown(); + SHUTDOWN(log, "backups worker", backups_worker, shutdown()); /** After system_logs have been shut down it is guaranteed that no system table gets created or written to. * Note that part changes at shutdown won't be logged to part log. */ - if (system_logs) - system_logs->shutdown(); + SHUTDOWN(log, "system logs", system_logs, shutdown()); + LOG_TRACE(log, "Shutting down database catalog"); DatabaseCatalog::shutdown(); - if (merge_mutate_executor) - merge_mutate_executor->wait(); - if (fetch_executor) - fetch_executor->wait(); - if (moves_executor) - moves_executor->wait(); - if (common_executor) - common_executor->wait(); + SHUTDOWN(log, "merges executor", merge_mutate_executor, wait()); + SHUTDOWN(log, "fetches executor", fetch_executor, wait()); + SHUTDOWN(log, "moves executor", moves_executor, wait()); + SHUTDOWN(log, "common executor", common_executor, wait()); TransactionLog::shutdownIfAny(); @@ -533,10 +536,12 @@ struct ContextSharedPart : boost::noncopyable /// DDLWorker should be deleted without lock, cause its internal thread can /// take it as well, which will cause deadlock. + LOG_TRACE(log, "Shutting down DDLWorker"); delete_ddl_worker.reset(); /// Background operations in cache use background schedule pool. /// Deactivate them before destructing it. + LOG_TRACE(log, "Shutting down caches"); const auto & caches = FileCacheFactory::instance().getAll(); for (const auto & [_, cache] : caches) cache->cache->deactivateBackgroundOperations(); diff --git a/src/Interpreters/DatabaseCatalog.cpp b/src/Interpreters/DatabaseCatalog.cpp index 4cb2f6e3b3d8..271330bc64a8 100644 --- a/src/Interpreters/DatabaseCatalog.cpp +++ b/src/Interpreters/DatabaseCatalog.cpp @@ -227,9 +227,11 @@ void DatabaseCatalog::shutdownImpl() databases_with_delayed_shutdown.push_back(database.second); continue; } + LOG_TRACE(log, "Shutting down database {}", database.first); database.second->shutdown(); } + LOG_TRACE(log, "Shutting down system databases"); for (auto & database : databases_with_delayed_shutdown) { database->shutdown(); From a96874850ec0faaf049cce01feee6c4a572d7961 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 8 Jul 2023 00:55:57 +0200 Subject: [PATCH 155/179] Revert "Merge pull request #48115 from save-my-heart/throw_non_parametric_function" This reverts commit 5f930aeb2619bda8f27f3cfc6ba01ffaf48c3d64, reversing changes made to 35572321a14d617cfd110a48d8d3416615bd75c9. --- .../UserDefined/UserDefinedSQLFunctionVisitor.cpp | 7 ------- src/Interpreters/ActionsVisitor.cpp | 7 ------- .../0_stateless/02701_non_parametric_function.reference | 0 .../0_stateless/02701_non_parametric_function.sql | 9 --------- 4 files changed, 23 deletions(-) delete mode 100644 tests/queries/0_stateless/02701_non_parametric_function.reference delete mode 100644 tests/queries/0_stateless/02701_non_parametric_function.sql diff --git a/src/Functions/UserDefined/UserDefinedSQLFunctionVisitor.cpp b/src/Functions/UserDefined/UserDefinedSQLFunctionVisitor.cpp index 597e4efe35eb..360d1cdf76ce 100644 --- a/src/Functions/UserDefined/UserDefinedSQLFunctionVisitor.cpp +++ b/src/Functions/UserDefined/UserDefinedSQLFunctionVisitor.cpp @@ -20,7 +20,6 @@ namespace DB namespace ErrorCodes { extern const int UNSUPPORTED_METHOD; - extern const int FUNCTION_CANNOT_HAVE_PARAMETERS; } void UserDefinedSQLFunctionVisitor::visit(ASTPtr & ast) @@ -139,12 +138,6 @@ ASTPtr UserDefinedSQLFunctionVisitor::tryToReplaceFunction(const ASTFunction & f if (!user_defined_function) return nullptr; - /// All UDFs are not parametric for now. - if (function.parameters) - { - throw Exception(ErrorCodes::FUNCTION_CANNOT_HAVE_PARAMETERS, "Function {} is not parametric", function.name); - } - const auto & function_arguments_list = function.children.at(0)->as(); auto & function_arguments = function_arguments_list->children; diff --git a/src/Interpreters/ActionsVisitor.cpp b/src/Interpreters/ActionsVisitor.cpp index 10502b7e66d1..01f2d4cf22e7 100644 --- a/src/Interpreters/ActionsVisitor.cpp +++ b/src/Interpreters/ActionsVisitor.cpp @@ -78,7 +78,6 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; extern const int TOO_FEW_ARGUMENTS_FOR_FUNCTION; extern const int TOO_MANY_ARGUMENTS_FOR_FUNCTION; - extern const int FUNCTION_CANNOT_HAVE_PARAMETERS; } static NamesAndTypesList::iterator findColumn(const String & name, NamesAndTypesList & cols) @@ -1106,12 +1105,6 @@ void ActionsMatcher::visit(const ASTFunction & node, const ASTPtr & ast, Data & } } - /// Normal functions are not parametric for now. - if (node.parameters) - { - throw Exception(ErrorCodes::FUNCTION_CANNOT_HAVE_PARAMETERS, "Function {} is not parametric", node.name); - } - Names argument_names; DataTypes argument_types; bool arguments_present = true; diff --git a/tests/queries/0_stateless/02701_non_parametric_function.reference b/tests/queries/0_stateless/02701_non_parametric_function.reference deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/tests/queries/0_stateless/02701_non_parametric_function.sql b/tests/queries/0_stateless/02701_non_parametric_function.sql deleted file mode 100644 index 5261fa7b0821..000000000000 --- a/tests/queries/0_stateless/02701_non_parametric_function.sql +++ /dev/null @@ -1,9 +0,0 @@ --- Tags: no-parallel - -SELECT * FROM system.numbers WHERE number > toUInt64(10)(number) LIMIT 10; -- { serverError 309 } - -CREATE FUNCTION IF NOT EXISTS sum_udf as (x, y) -> (x + y); - -SELECT sum_udf(1)(1, 2); -- { serverError 309 } - -DROP FUNCTION IF EXISTS sum_udf; From f4696d762cb3e15878b99c51bcad9ee15a8972c1 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 8 Jul 2023 00:56:42 +0200 Subject: [PATCH 156/179] Revert "Merge pull request #49419 from ClickHouse/fix-function-parameter-exception" This reverts commit b921476a3be536b17b967391cefab3888c0c96b2, reversing changes made to 7896d307379bc813665fa5b11d08c202ea67f4fb. --- src/Analyzer/Passes/QueryAnalysisPass.cpp | 15 --------------- tests/analyzer_tech_debt.txt | 1 + 2 files changed, 1 insertion(+), 15 deletions(-) diff --git a/src/Analyzer/Passes/QueryAnalysisPass.cpp b/src/Analyzer/Passes/QueryAnalysisPass.cpp index 163092f1b7f7..da8933aabaae 100644 --- a/src/Analyzer/Passes/QueryAnalysisPass.cpp +++ b/src/Analyzer/Passes/QueryAnalysisPass.cpp @@ -116,7 +116,6 @@ namespace ErrorCodes extern const int UNKNOWN_TABLE; extern const int ILLEGAL_COLUMN; extern const int NUMBER_OF_COLUMNS_DOESNT_MATCH; - extern const int FUNCTION_CANNOT_HAVE_PARAMETERS; } /** Query analyzer implementation overview. Please check documentation in QueryAnalysisPass.h first. @@ -4897,11 +4896,6 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi lambda_expression_untyped->formatASTForErrorMessage(), scope.scope_node->formatASTForErrorMessage()); - if (!parameters.empty()) - { - throw Exception(ErrorCodes::FUNCTION_CANNOT_HAVE_PARAMETERS, "Function {} is not parametric", function_node.formatASTForErrorMessage()); - } - auto lambda_expression_clone = lambda_expression_untyped->clone(); IdentifierResolveScope lambda_scope(lambda_expression_clone, &scope /*parent_scope*/); @@ -5018,12 +5012,9 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi } FunctionOverloadResolverPtr function = UserDefinedExecutableFunctionFactory::instance().tryGet(function_name, scope.context, parameters); - bool is_executable_udf = false; if (!function) function = FunctionFactory::instance().tryGet(function_name, scope.context); - else - is_executable_udf = true; if (!function) { @@ -5074,12 +5065,6 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi return result_projection_names; } - /// Executable UDFs may have parameters. They are checked in UserDefinedExecutableFunctionFactory. - if (!parameters.empty() && !is_executable_udf) - { - throw Exception(ErrorCodes::FUNCTION_CANNOT_HAVE_PARAMETERS, "Function {} is not parametric", function_name); - } - /** For lambda arguments we need to initialize lambda argument types DataTypeFunction using `getLambdaArgumentTypes` function. * Then each lambda arguments are initialized with columns, where column source is lambda. * This information is important for later steps of query processing. diff --git a/tests/analyzer_tech_debt.txt b/tests/analyzer_tech_debt.txt index f7cc13dd2e21..f838a19940ae 100644 --- a/tests/analyzer_tech_debt.txt +++ b/tests/analyzer_tech_debt.txt @@ -111,6 +111,7 @@ 00917_multiple_joins_denny_crane 00725_join_on_bug_1 00636_partition_key_parts_pruning +00261_storage_aliases_and_array_join 01825_type_json_multiple_files 01281_group_by_limit_memory_tracking 02723_zookeeper_name From 19072c9b475fef191dfd18929cc81c25e8115026 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 8 Jul 2023 02:03:23 +0300 Subject: [PATCH 157/179] Corrent example about parametric executable UDFs. --- docs/en/sql-reference/functions/udf.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/en/sql-reference/functions/udf.md b/docs/en/sql-reference/functions/udf.md index 9c6b1b0c66b8..51734beed038 100644 --- a/docs/en/sql-reference/functions/udf.md +++ b/docs/en/sql-reference/functions/udf.md @@ -171,12 +171,13 @@ Result: └──────────────────────────────┘ ``` -Executable user defined functions can take constant parameters configured in `command` setting (works only for user defined functions with `executable` type). +Executable user defined functions can take constant parameters configured in `command` setting (works only for user defined functions with `executable` type). It also requires the `execute_direct` option (to ensure no shell argument expansion vulnerability). File `test_function_parameter_python.xml` (`/etc/clickhouse-server/test_function_parameter_python.xml` with default path settings). ```xml executable + true test_function_parameter_python String From 6990f078a0bf87f23d478e83c51001b7cb0d4b8a Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy <99031427+yakov-olkhovskiy@users.noreply.github.com> Date: Fri, 7 Jul 2023 19:19:30 -0400 Subject: [PATCH 158/179] cleaner way --- src/Daemon/BaseDaemon.cpp | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/Daemon/BaseDaemon.cpp b/src/Daemon/BaseDaemon.cpp index a75aac7a08ec..af2d355d335c 100644 --- a/src/Daemon/BaseDaemon.cpp +++ b/src/Daemon/BaseDaemon.cpp @@ -154,10 +154,7 @@ static void signalHandler(int sig, siginfo_t * info, void * context) writePODBinary(*info, out); writePODBinary(signal_context, out); writePODBinary(stack_trace, out); - if (Exception::enable_job_stack_trace) - writeVectorBinary(Exception::thread_frame_pointers, out); - else - writeVarUInt(0, out); + writeVectorBinary(Exception::enable_job_stack_trace ? Exception::thread_frame_pointers : std::vector{}, out); writeBinary(static_cast(getThreadId()), out); writePODBinary(current_thread, out); From 9144a2dbb2a17af72304267edfe5a81ee7daa0b9 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 8 Jul 2023 05:23:13 +0200 Subject: [PATCH 159/179] Fix unrelated messages from LSan in clickhouse-client --- tests/clickhouse-test | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/clickhouse-test b/tests/clickhouse-test index 4860ce0fac9a..95470f779875 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -57,6 +57,8 @@ MESSAGES_TO_RETRY = [ "ConnectionPoolWithFailover: Connection failed at try", "DB::Exception: New table appeared in database being dropped or detached. Try again", "is already started to be removing by another replica right now", + # This is from LSan, and it indicates its own internal problem: + "Unable to get registers from thread", ] MAX_RETRIES = 3 From c828db572078bb68bbcd20c6850073030d4addac Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 8 Jul 2023 05:57:23 +0200 Subject: [PATCH 160/179] Allow OOM in AST Fuzzer with Sanitizers --- docker/test/fuzzer/run-fuzzer.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/test/fuzzer/run-fuzzer.sh b/docker/test/fuzzer/run-fuzzer.sh index d2c8de7a211c..5cda0831a847 100755 --- a/docker/test/fuzzer/run-fuzzer.sh +++ b/docker/test/fuzzer/run-fuzzer.sh @@ -291,7 +291,7 @@ quit if [ "$server_died" == 1 ] then # The server has died. - if ! rg --text -o 'Received signal.*|Logical error.*|Assertion.*failed|Failed assertion.*|.*runtime error: .*|.*is located.*|(SUMMARY|ERROR): [a-zA-Z]+Sanitizer:.*|.*_LIBCPP_ASSERT.*' server.log > description.txt + if ! rg --text -o 'Received signal.*|Logical error.*|Assertion.*failed|Failed assertion.*|.*runtime error: .*|.*is located.*|(SUMMARY|ERROR): [a-zA-Z]+Sanitizer:.*|.*_LIBCPP_ASSERT.*|.*Child process was terminated by signal 9.*' server.log > description.txt then echo "Lost connection to server. See the logs." > description.txt fi From 1bdcd29da2bfc4cab02a0db5dedeb7d0515ac49c Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 8 Jul 2023 06:02:38 +0200 Subject: [PATCH 161/179] Disable one test under Analyzer --- tests/analyzer_tech_debt.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/analyzer_tech_debt.txt b/tests/analyzer_tech_debt.txt index f7cc13dd2e21..a10f72e743a8 100644 --- a/tests/analyzer_tech_debt.txt +++ b/tests/analyzer_tech_debt.txt @@ -128,3 +128,4 @@ 02784_parallel_replicas_automatic_disabling 02581_share_big_sets_between_mutation_tasks_long 02581_share_big_sets_between_multiple_mutations_tasks_long +00992_system_parts_race_condition_zookeeper_long From adbd85b975aba4618ddf2a934422559410eeea48 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 8 Jul 2023 06:26:44 +0200 Subject: [PATCH 162/179] Fix Docker --- tests/integration/conftest.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 749f4aa1cde6..5933883f7b0d 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -42,6 +42,13 @@ def cleanup_environment(): logging.debug(f"Docker ps before start:{r.stdout}") else: logging.debug(f"No running containers") + + logging.debug("Pruning Docker networks") + run_and_check( + ["docker network prune"], + shell=True, + nothrow=True, + ) except Exception as e: logging.exception(f"cleanup_environment:{str(e)}") pass From cdbf279b65cca972ce63dd7fd835d2b46359f7f3 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 8 Jul 2023 06:46:28 +0200 Subject: [PATCH 163/179] Fix test 01825_type_json_from_map --- tests/queries/0_stateless/01825_type_json_from_map.sql | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01825_type_json_from_map.sql b/tests/queries/0_stateless/01825_type_json_from_map.sql index 2480aca16679..51e60843a1ae 100644 --- a/tests/queries/0_stateless/01825_type_json_from_map.sql +++ b/tests/queries/0_stateless/01825_type_json_from_map.sql @@ -1,4 +1,5 @@ --- Tags: no-fasttest +-- Tags: no-fasttest, no-random-merge-tree-settings +-- For example, it is 4 times slower with --merge_max_block_size=5967 --index_granularity=55 --min_bytes_for_wide_part=847510133 DROP TABLE IF EXISTS t_json; DROP TABLE IF EXISTS t_map; From 0b0caec9c435aaf0df3e01ef64bf06397d11f2ce Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 8 Jul 2023 07:51:17 +0300 Subject: [PATCH 164/179] Update Context.cpp --- src/Interpreters/Context.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index 7b3d419cce40..8df8723123f4 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -181,7 +181,7 @@ namespace ErrorCodes if (ptr) \ { \ LOG_DEBUG(log, "Shutting down " desc); \ - ptr->method; \ + (ptr)->method; \ } \ } while (false) \ From 4de02c243816f907643eefbbe4743861660b6d99 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 8 Jul 2023 07:04:33 +0200 Subject: [PATCH 165/179] Fix test 02354_distributed_with_external_aggregation_memory_usage --- ...distributed_with_external_aggregation_memory_usage.sql | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02354_distributed_with_external_aggregation_memory_usage.sql b/tests/queries/0_stateless/02354_distributed_with_external_aggregation_memory_usage.sql index 548660e36b1e..c8ec40bb0a73 100644 --- a/tests/queries/0_stateless/02354_distributed_with_external_aggregation_memory_usage.sql +++ b/tests/queries/0_stateless/02354_distributed_with_external_aggregation_memory_usage.sql @@ -1,5 +1,7 @@ -- Tags: long, no-tsan, no-msan, no-asan, no-ubsan, no-debug, no-s3-storage +DROP TABLE IF EXISTS t_2354_dist_with_external_aggr; + create table t_2354_dist_with_external_aggr(a UInt64, b String, c FixedString(100)) engine = MergeTree order by tuple(); insert into t_2354_dist_with_external_aggr select number, toString(number) as s, toFixedString(s, 100) from numbers_mt(5e7); @@ -15,8 +17,12 @@ set max_bytes_before_external_group_by = '2G', -- whole aggregation state of local aggregation uncompressed is 5.8G -- it is hard to provide an accurate estimation for memory usage, so 4G is just the actual value taken from the logs + delta +-- also avoid using localhost, so the queries will go over separate connections +-- (otherwise the memory usage for merge will be counted together with the localhost query) select a, b, c, sum(a) as s -from remote('127.0.0.{1,2}', currentDatabase(), t_2354_dist_with_external_aggr) +from remote('127.0.0.{2,3}', currentDatabase(), t_2354_dist_with_external_aggr) group by a, b, c format Null settings max_memory_usage = '4Gi'; + +DROP TABLE t_2354_dist_with_external_aggr; From df31034820c245030b16fddd7b9b3e06c07b0d51 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Fri, 7 Jul 2023 20:29:59 +0200 Subject: [PATCH 166/179] rollback merge tasks on exception --- src/Storages/MergeTree/IExecutableTask.h | 11 +++++++---- .../MergeTree/MergeFromLogEntryTask.cpp | 2 +- .../MergeTree/MergeFromLogEntryTask.h | 2 +- .../MergeTree/MergePlainMergeTreeTask.cpp | 19 ++++++++++++++++--- .../MergeTree/MergePlainMergeTreeTask.h | 5 +++-- .../MergeTree/MergeTreeBackgroundExecutor.cpp | 17 ++++++++++------- .../MergeTree/MutateFromLogEntryTask.cpp | 2 +- .../MergeTree/MutateFromLogEntryTask.h | 2 +- .../MergeTree/MutatePlainMergeTreeTask.cpp | 4 ++-- .../MergeTree/MutatePlainMergeTreeTask.h | 5 +++-- src/Storages/MergeTree/MutateTask.cpp | 15 +++++++++------ .../ReplicatedMergeMutateTaskBase.cpp | 2 +- .../MergeTree/ReplicatedMergeMutateTaskBase.h | 3 ++- .../MergeTree/tests/gtest_executor.cpp | 10 ++++++---- src/Storages/StorageMergeTree.cpp | 2 +- 15 files changed, 64 insertions(+), 37 deletions(-) diff --git a/src/Storages/MergeTree/IExecutableTask.h b/src/Storages/MergeTree/IExecutableTask.h index d0c2d4a840e9..738056e0ea00 100644 --- a/src/Storages/MergeTree/IExecutableTask.h +++ b/src/Storages/MergeTree/IExecutableTask.h @@ -32,8 +32,9 @@ class IExecutableTask using TaskResultCallback = std::function; virtual bool executeStep() = 0; virtual void onCompleted() = 0; - virtual StorageID getStorageID() = 0; - virtual Priority getPriority() = 0; + virtual StorageID getStorageID() const = 0; + virtual String getQueryId() const = 0; + virtual Priority getPriority() const = 0; virtual ~IExecutableTask() = default; }; @@ -63,12 +64,14 @@ class ExecutableLambdaAdapter : public IExecutableTask, boost::noncopyable } void onCompleted() override { job_result_callback(!res); } - StorageID getStorageID() override { return id; } - Priority getPriority() override + StorageID getStorageID() const override { return id; } + Priority getPriority() const override { throw Exception(ErrorCodes::LOGICAL_ERROR, "getPriority() method is not supported by LambdaAdapter"); } + String getQueryId() const override { return id.getShortName() + "::lambda"; } + private: bool res = false; std::function job_to_execute; diff --git a/src/Storages/MergeTree/MergeFromLogEntryTask.cpp b/src/Storages/MergeTree/MergeFromLogEntryTask.cpp index 17582e7df98b..9f54c554c85a 100644 --- a/src/Storages/MergeTree/MergeFromLogEntryTask.cpp +++ b/src/Storages/MergeTree/MergeFromLogEntryTask.cpp @@ -287,7 +287,7 @@ ReplicatedMergeMutateTaskBase::PrepareResult MergeFromLogEntryTask::prepare() task_context = Context::createCopy(storage.getContext()); task_context->makeQueryContext(); - task_context->setCurrentQueryId(""); + task_context->setCurrentQueryId(getQueryId()); /// Add merge to list merge_mutate_entry = storage.getContext()->getMergeList().insert( diff --git a/src/Storages/MergeTree/MergeFromLogEntryTask.h b/src/Storages/MergeTree/MergeFromLogEntryTask.h index 62908f79fb43..16e69a568bae 100644 --- a/src/Storages/MergeTree/MergeFromLogEntryTask.h +++ b/src/Storages/MergeTree/MergeFromLogEntryTask.h @@ -24,7 +24,7 @@ class MergeFromLogEntryTask : public ReplicatedMergeMutateTaskBase StorageReplicatedMergeTree & storage_, IExecutableTask::TaskResultCallback & task_result_callback_); - Priority getPriority() override { return priority; } + Priority getPriority() const override { return priority; } protected: /// Both return false if we can't execute merge. diff --git a/src/Storages/MergeTree/MergePlainMergeTreeTask.cpp b/src/Storages/MergeTree/MergePlainMergeTreeTask.cpp index 9302bdf11de4..3f5753a0c95d 100644 --- a/src/Storages/MergeTree/MergePlainMergeTreeTask.cpp +++ b/src/Storages/MergeTree/MergePlainMergeTreeTask.cpp @@ -3,8 +3,10 @@ #include #include #include +#include #include #include +#include namespace DB @@ -16,7 +18,7 @@ namespace ErrorCodes } -StorageID MergePlainMergeTreeTask::getStorageID() +StorageID MergePlainMergeTreeTask::getStorageID() const { return storage.getStorageID(); } @@ -77,7 +79,6 @@ bool MergePlainMergeTreeTask::executeStep() throw Exception(ErrorCodes::LOGICAL_ERROR, "Task with state SUCCESS mustn't be executed again"); } } - return false; } @@ -145,16 +146,28 @@ void MergePlainMergeTreeTask::finish() storage.merger_mutator.renameMergedTemporaryPart(new_part, future_part->parts, txn, transaction); transaction.commit(); + ThreadFuzzer::maybeInjectSleep(); + ThreadFuzzer::maybeInjectMemoryLimitException(); + write_part_log({}); storage.incrementMergedPartsProfileEvent(new_part->getType()); transfer_profile_counters_to_initial_query(); + + if (auto txn_ = txn_holder.getTransaction()) + { + /// Explicitly commit the transaction if we own it (it's a background merge, not OPTIMIZE) + TransactionLog::instance().commitTransaction(txn_, /* throw_on_unknown_status */ false); + ThreadFuzzer::maybeInjectSleep(); + ThreadFuzzer::maybeInjectMemoryLimitException(); + } + } ContextMutablePtr MergePlainMergeTreeTask::createTaskContext() const { auto context = Context::createCopy(storage.getContext()); context->makeQueryContext(); - auto queryId = storage.getStorageID().getShortName() + "::" + future_part->name; + auto queryId = getQueryId(); context->setCurrentQueryId(queryId); return context; } diff --git a/src/Storages/MergeTree/MergePlainMergeTreeTask.h b/src/Storages/MergeTree/MergePlainMergeTreeTask.h index 95df8c90c9b1..5cc9c0e50d36 100644 --- a/src/Storages/MergeTree/MergePlainMergeTreeTask.h +++ b/src/Storages/MergeTree/MergePlainMergeTreeTask.h @@ -39,8 +39,9 @@ class MergePlainMergeTreeTask : public IExecutableTask bool executeStep() override; void onCompleted() override; - StorageID getStorageID() override; - Priority getPriority() override { return priority; } + StorageID getStorageID() const override; + Priority getPriority() const override { return priority; } + String getQueryId() const override { return getStorageID().getShortName() + "::" + merge_mutate_entry->future_part->name; } void setCurrentTransaction(MergeTreeTransactionHolder && txn_holder_, MergeTreeTransactionPtr && txn_) { diff --git a/src/Storages/MergeTree/MergeTreeBackgroundExecutor.cpp b/src/Storages/MergeTree/MergeTreeBackgroundExecutor.cpp index d4f8d1140a24..6eab43371621 100644 --- a/src/Storages/MergeTree/MergeTreeBackgroundExecutor.cpp +++ b/src/Storages/MergeTree/MergeTreeBackgroundExecutor.cpp @@ -136,7 +136,7 @@ bool MergeTreeBackgroundExecutor::trySchedule(ExecutableTaskPtr task) return true; } -void printExceptionWithRespectToAbort(Poco::Logger * log) +void printExceptionWithRespectToAbort(Poco::Logger * log, const String & query_id) { std::exception_ptr ex = std::current_exception(); @@ -155,14 +155,14 @@ void printExceptionWithRespectToAbort(Poco::Logger * log) if (e.code() == ErrorCodes::ABORTED) LOG_DEBUG(log, getExceptionMessageAndPattern(e, /* with_stacktrace */ false)); else - tryLogCurrentException(__PRETTY_FUNCTION__); + tryLogCurrentException(log, "Exception while executing background task {" + query_id + "}"); }); } catch (...) { NOEXCEPT_SCOPE({ ALLOW_ALLOCATIONS_IN_SCOPE; - tryLogCurrentException(__PRETTY_FUNCTION__); + tryLogCurrentException(log, "Exception while executing background task {" + query_id + "}"); }); } } @@ -239,7 +239,9 @@ void MergeTreeBackgroundExecutor::routine(TaskRuntimeDataPtr item) has_tasks.notify_one(); }; - auto release_task = [this, &erase_from_active, &on_task_done](TaskRuntimeDataPtr && item_) + String query_id; + + auto release_task = [this, &erase_from_active, &on_task_done, &query_id](TaskRuntimeDataPtr && item_) { std::lock_guard guard(mutex); @@ -256,7 +258,7 @@ void MergeTreeBackgroundExecutor::routine(TaskRuntimeDataPtr item) } catch (...) { - printExceptionWithRespectToAbort(log); + printExceptionWithRespectToAbort(log, query_id); } on_task_done(std::move(item_)); @@ -267,11 +269,12 @@ void MergeTreeBackgroundExecutor::routine(TaskRuntimeDataPtr item) try { ALLOW_ALLOCATIONS_IN_SCOPE; + item->task->getQueryId(); need_execute_again = item->task->executeStep(); } catch (...) { - printExceptionWithRespectToAbort(log); + printExceptionWithRespectToAbort(log, query_id); /// Release the task with exception context. /// An exception context is needed to proper delete write buffers without finalization release_task(std::move(item)); @@ -298,7 +301,7 @@ void MergeTreeBackgroundExecutor::routine(TaskRuntimeDataPtr item) } catch (...) { - printExceptionWithRespectToAbort(log); + printExceptionWithRespectToAbort(log, query_id); on_task_done(std::move(item)); return; } diff --git a/src/Storages/MergeTree/MutateFromLogEntryTask.cpp b/src/Storages/MergeTree/MutateFromLogEntryTask.cpp index ba55fb400ca4..6cb9d50436e6 100644 --- a/src/Storages/MergeTree/MutateFromLogEntryTask.cpp +++ b/src/Storages/MergeTree/MutateFromLogEntryTask.cpp @@ -191,7 +191,7 @@ ReplicatedMergeMutateTaskBase::PrepareResult MutateFromLogEntryTask::prepare() task_context = Context::createCopy(storage.getContext()); task_context->makeQueryContext(); - task_context->setCurrentQueryId(""); + task_context->setCurrentQueryId(getQueryId()); merge_mutate_entry = storage.getContext()->getMergeList().insert( storage.getStorageID(), diff --git a/src/Storages/MergeTree/MutateFromLogEntryTask.h b/src/Storages/MergeTree/MutateFromLogEntryTask.h index b6d3f5d4b6b6..42d8307e948e 100644 --- a/src/Storages/MergeTree/MutateFromLogEntryTask.h +++ b/src/Storages/MergeTree/MutateFromLogEntryTask.h @@ -31,7 +31,7 @@ class MutateFromLogEntryTask : public ReplicatedMergeMutateTaskBase {} - Priority getPriority() override { return priority; } + Priority getPriority() const override { return priority; } private: diff --git a/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp b/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp index 3180431d31be..bf8e879e3d07 100644 --- a/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp +++ b/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp @@ -13,7 +13,7 @@ namespace ErrorCodes } -StorageID MutatePlainMergeTreeTask::getStorageID() +StorageID MutatePlainMergeTreeTask::getStorageID() const { return storage.getStorageID(); } @@ -137,7 +137,7 @@ ContextMutablePtr MutatePlainMergeTreeTask::createTaskContext() const { auto context = Context::createCopy(storage.getContext()); context->makeQueryContext(); - auto queryId = storage.getStorageID().getShortName() + "::" + future_part->name; + auto queryId = getQueryId(); context->setCurrentQueryId(queryId); return context; } diff --git a/src/Storages/MergeTree/MutatePlainMergeTreeTask.h b/src/Storages/MergeTree/MutatePlainMergeTreeTask.h index bd03c276256d..ef11780a873f 100644 --- a/src/Storages/MergeTree/MutatePlainMergeTreeTask.h +++ b/src/Storages/MergeTree/MutatePlainMergeTreeTask.h @@ -41,8 +41,9 @@ class MutatePlainMergeTreeTask : public IExecutableTask bool executeStep() override; void onCompleted() override; - StorageID getStorageID() override; - Priority getPriority() override { return priority; } + StorageID getStorageID() const override; + Priority getPriority() const override { return priority; } + String getQueryId() const override { return getStorageID().getShortName() + "::" + merge_mutate_entry->future_part->name; } private: diff --git a/src/Storages/MergeTree/MutateTask.cpp b/src/Storages/MergeTree/MutateTask.cpp index f4a071b8f279..be5128847569 100644 --- a/src/Storages/MergeTree/MutateTask.cpp +++ b/src/Storages/MergeTree/MutateTask.cpp @@ -884,8 +884,9 @@ class MergeProjectionPartsTask : public IExecutableTask } void onCompleted() override { throw Exception(ErrorCodes::LOGICAL_ERROR, "Not implemented"); } - StorageID getStorageID() override { throw Exception(ErrorCodes::LOGICAL_ERROR, "Not implemented"); } - Priority getPriority() override { throw Exception(ErrorCodes::LOGICAL_ERROR, "Not implemented"); } + StorageID getStorageID() const override { throw Exception(ErrorCodes::LOGICAL_ERROR, "Not implemented"); } + Priority getPriority() const override { throw Exception(ErrorCodes::LOGICAL_ERROR, "Not implemented"); } + String getQueryId() const override { throw Exception(ErrorCodes::LOGICAL_ERROR, "Not implemented"); } bool executeStep() override { @@ -1206,8 +1207,9 @@ class MutateAllPartColumnsTask : public IExecutableTask explicit MutateAllPartColumnsTask(MutationContextPtr ctx_) : ctx(ctx_) {} void onCompleted() override { throw Exception(ErrorCodes::LOGICAL_ERROR, "Not implemented"); } - StorageID getStorageID() override { throw Exception(ErrorCodes::LOGICAL_ERROR, "Not implemented"); } - Priority getPriority() override { throw Exception(ErrorCodes::LOGICAL_ERROR, "Not implemented"); } + StorageID getStorageID() const override { throw Exception(ErrorCodes::LOGICAL_ERROR, "Not implemented"); } + Priority getPriority() const override { throw Exception(ErrorCodes::LOGICAL_ERROR, "Not implemented"); } + String getQueryId() const override { throw Exception(ErrorCodes::LOGICAL_ERROR, "Not implemented"); } bool executeStep() override { @@ -1434,8 +1436,9 @@ class MutateSomePartColumnsTask : public IExecutableTask explicit MutateSomePartColumnsTask(MutationContextPtr ctx_) : ctx(ctx_) {} void onCompleted() override { throw Exception(ErrorCodes::LOGICAL_ERROR, "Not implemented"); } - StorageID getStorageID() override { throw Exception(ErrorCodes::LOGICAL_ERROR, "Not implemented"); } - Priority getPriority() override { throw Exception(ErrorCodes::LOGICAL_ERROR, "Not implemented"); } + StorageID getStorageID() const override { throw Exception(ErrorCodes::LOGICAL_ERROR, "Not implemented"); } + Priority getPriority() const override { throw Exception(ErrorCodes::LOGICAL_ERROR, "Not implemented"); } + String getQueryId() const override { throw Exception(ErrorCodes::LOGICAL_ERROR, "Not implemented"); } bool executeStep() override { diff --git a/src/Storages/MergeTree/ReplicatedMergeMutateTaskBase.cpp b/src/Storages/MergeTree/ReplicatedMergeMutateTaskBase.cpp index 61356558e161..b4748ee77ea0 100644 --- a/src/Storages/MergeTree/ReplicatedMergeMutateTaskBase.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeMutateTaskBase.cpp @@ -16,7 +16,7 @@ namespace ErrorCodes extern const int PART_IS_TEMPORARILY_LOCKED; } -StorageID ReplicatedMergeMutateTaskBase::getStorageID() +StorageID ReplicatedMergeMutateTaskBase::getStorageID() const { return storage.getStorageID(); } diff --git a/src/Storages/MergeTree/ReplicatedMergeMutateTaskBase.h b/src/Storages/MergeTree/ReplicatedMergeMutateTaskBase.h index 1e7f98342458..ba514f11f209 100644 --- a/src/Storages/MergeTree/ReplicatedMergeMutateTaskBase.h +++ b/src/Storages/MergeTree/ReplicatedMergeMutateTaskBase.h @@ -33,7 +33,8 @@ class ReplicatedMergeMutateTaskBase : public IExecutableTask ~ReplicatedMergeMutateTaskBase() override = default; void onCompleted() override; - StorageID getStorageID() override; + StorageID getStorageID() const override; + String getQueryId() const override { return getStorageID().getShortName() + "::" + selected_entry->log_entry->new_part_name; } bool executeStep() override; protected: diff --git a/src/Storages/MergeTree/tests/gtest_executor.cpp b/src/Storages/MergeTree/tests/gtest_executor.cpp index 5815b74284ab..6f34eb4dfbd2 100644 --- a/src/Storages/MergeTree/tests/gtest_executor.cpp +++ b/src/Storages/MergeTree/tests/gtest_executor.cpp @@ -39,7 +39,7 @@ class FakeExecutableTask : public IExecutableTask return false; } - StorageID getStorageID() override + StorageID getStorageID() const override { return {"test", name}; } @@ -51,7 +51,8 @@ class FakeExecutableTask : public IExecutableTask throw std::runtime_error("Unlucky..."); } - Priority getPriority() override { return {}; } + Priority getPriority() const override { return {}; } + String getQueryId() const override { return {}; } private: std::mt19937 generator; @@ -79,14 +80,15 @@ class LambdaExecutableTask : public IExecutableTask return --step_count; } - StorageID getStorageID() override + StorageID getStorageID() const override { return {"test", name}; } void onCompleted() override {} - Priority getPriority() override { return priority; } + Priority getPriority() const override { return priority; } + String getQueryId() const override { return "test::lambda"; } private: String name; diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp index 4c0c0c8e3fa8..add1d112c1aa 100644 --- a/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -1269,7 +1269,7 @@ bool StorageMergeTree::scheduleDataProcessingJob(BackgroundJobsAssignee & assign { /// TODO Transactions: avoid beginning transaction if there is nothing to merge. txn = TransactionLog::instance().beginTransaction(); - transaction_for_merge = MergeTreeTransactionHolder{txn, /* autocommit = */ true}; + transaction_for_merge = MergeTreeTransactionHolder{txn, /* autocommit = */ false}; } bool has_mutations = false; From 44ae3a0986c941f234a7cb63468e77b626d10713 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Sat, 8 Jul 2023 14:58:38 +0200 Subject: [PATCH 167/179] fix a bug in projections --- src/Storages/MergeTree/IMergeTreeDataPart.cpp | 13 ++++++++++++- src/Storages/MergeTree/IMergeTreeDataPart.h | 9 ++++++++- src/Storages/MergeTree/MergeTreeData.cpp | 14 +++++++++++++- src/Storages/MergeTree/MutateTask.cpp | 2 +- src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp | 4 ++-- src/Storages/StorageMergeTree.cpp | 2 +- src/Storages/StorageReplicatedMergeTree.cpp | 2 +- 7 files changed, 38 insertions(+), 8 deletions(-) diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index b95918648697..9309f0d4df67 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -312,15 +312,20 @@ IMergeTreeDataPart::IMergeTreeDataPart( const IMergeTreeDataPart * parent_part_) : DataPartStorageHolder(data_part_storage_) , storage(storage_) - , name(name_) + , mutable_name(name_) + , name(mutable_name) , info(info_) , index_granularity_info(storage_, part_type_) , part_type(part_type_) , parent_part(parent_part_) + , parent_part_name(parent_part ? parent_part->name : "") , use_metadata_cache(storage.use_metadata_cache) { if (parent_part) + { + chassert(parent_part_name.starts_with(parent_part->info.partition_id)); /// Make sure there's no prefix state = MergeTreeDataPartState::Active; + } incrementStateMetric(state); incrementTypeMetric(part_type); @@ -337,6 +342,12 @@ IMergeTreeDataPart::~IMergeTreeDataPart() decrementTypeMetric(part_type); } +void IMergeTreeDataPart::setName(const String & new_name) +{ + mutable_name = new_name; + for (auto & proj_part : projection_parts) + proj_part.second->parent_part_name = new_name; +} String IMergeTreeDataPart::getNewName(const MergeTreePartInfo & new_part_info) const { diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.h b/src/Storages/MergeTree/IMergeTreeDataPart.h index 92dbe0840811..2c0cf37b3a57 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.h +++ b/src/Storages/MergeTree/IMergeTreeDataPart.h @@ -200,9 +200,14 @@ class IMergeTreeDataPart : public std::enable_shared_from_this> & getProjectionParts() const { return projection_parts; } @@ -519,6 +525,7 @@ class IMergeTreeDataPart : public std::enable_shared_from_this> projection_parts; diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index b7fde55880e3..f81726863b29 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -7455,7 +7455,19 @@ void MergeTreeData::reportBrokenPart(MergeTreeData::DataPartPtr data_part) const return; if (data_part->isProjectionPart()) - data_part = data_part->getParentPart()->shared_from_this(); + { + String parent_part_name = data_part->getParentPartName(); + auto parent_part = getPartIfExists(parent_part_name, {DataPartState::PreActive, DataPartState::Active, DataPartState::Outdated}); + + if (!parent_part) + { + LOG_WARNING(log, "Did not find parent part {} for potentially broken projection part {}", + parent_part_name, data_part->getDataPartStorage().getFullPath()); + return; + } + + data_part = parent_part; + } if (data_part->getDataPartStorage().isBroken()) { diff --git a/src/Storages/MergeTree/MutateTask.cpp b/src/Storages/MergeTree/MutateTask.cpp index f4a071b8f279..41f767cc4de9 100644 --- a/src/Storages/MergeTree/MutateTask.cpp +++ b/src/Storages/MergeTree/MutateTask.cpp @@ -917,7 +917,7 @@ class MergeProjectionPartsTask : public IExecutableTask { LOG_DEBUG(log, "Merged a projection part in level {}", current_level); selected_parts[0]->renameTo(projection.name + ".proj", true); - selected_parts[0]->name = projection.name; + selected_parts[0]->setName(projection.name); selected_parts[0]->is_temp = false; ctx->new_data_part->addProjectionPart(name, std::move(selected_parts[0])); diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp index 4128654a632e..22e2ab945eb9 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp @@ -788,7 +788,7 @@ std::pair, bool> ReplicatedMergeTreeSinkImpl:: part->info.level = 0; part->info.mutation = 0; - part->name = part->getNewName(part->info); + part->setName(part->getNewName(part->info)); StorageReplicatedMergeTree::LogEntry log_entry; @@ -914,7 +914,7 @@ std::pair, bool> ReplicatedMergeTreeSinkImpl:: /// Note that it may also appear on filesystem right now in PreActive state due to concurrent inserts of the same data. /// It will be checked when we will try to rename directory. - part->name = existing_part_name; + part->setName(existing_part_name); part->info = MergeTreePartInfo::fromPartName(existing_part_name, storage.format_version); /// Used only for exception messages. block_number = part->info.min_block; diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp index 4c0c0c8e3fa8..d427a857f079 100644 --- a/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -2260,7 +2260,7 @@ void StorageMergeTree::fillNewPartName(MutableDataPartPtr & part, DataPartsLock { part->info.min_block = part->info.max_block = increment.get(); part->info.mutation = 0; - part->name = part->getNewName(part->info); + part->setName(part->getNewName(part->info)); } } diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 2da18f69bafb..8a21da69460a 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -9262,7 +9262,7 @@ bool StorageReplicatedMergeTree::createEmptyPartInsteadOfLost(zkutil::ZooKeeperP } MergeTreeData::MutableDataPartPtr new_data_part = createEmptyPart(new_part_info, partition, lost_part_name, NO_TRANSACTION_PTR); - new_data_part->name = lost_part_name; + new_data_part->setName(lost_part_name); try { From 85531f32cfb5339c45dade1b84c2a20f0a694cfe Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 8 Jul 2023 19:32:44 +0300 Subject: [PATCH 168/179] Update 02804_clusterAllReplicas_insert.sql --- tests/queries/0_stateless/02804_clusterAllReplicas_insert.sql | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/02804_clusterAllReplicas_insert.sql b/tests/queries/0_stateless/02804_clusterAllReplicas_insert.sql index 05bda19eb9e9..c39d9e7d78bd 100644 --- a/tests/queries/0_stateless/02804_clusterAllReplicas_insert.sql +++ b/tests/queries/0_stateless/02804_clusterAllReplicas_insert.sql @@ -3,3 +3,4 @@ create table data (key Int) engine=Memory(); -- NOTE: internal_replication is false, so INSERT will be done only into one shard insert into function clusterAllReplicas(test_cluster_two_shards, currentDatabase(), data, rand()) values (2); select * from data order by key; +drop table data; From 2a8c7d0ea23e2b7a41d03d32b0fb44513fa309e0 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 8 Jul 2023 21:52:19 +0300 Subject: [PATCH 169/179] Update src/Parsers/ParserCreateQuery.cpp Co-authored-by: Nikolay Degterinsky <43110995+evillique@users.noreply.github.com> --- src/Parsers/ParserCreateQuery.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Parsers/ParserCreateQuery.cpp b/src/Parsers/ParserCreateQuery.cpp index c4c02ab74177..415d3321eb5a 100644 --- a/src/Parsers/ParserCreateQuery.cpp +++ b/src/Parsers/ParserCreateQuery.cpp @@ -304,7 +304,7 @@ bool ParserTablePropertiesDeclarationList::parseImpl(Pos & pos, ASTPtr & node, E for (const auto & elem : list->children) { - if (auto *cd = elem->as()) + if (auto * cd = elem->as()) { if (cd->primary_key_specifier) { From a10aa9ad50db5bd3b95a7ebe4ccce4bf10c8e1f6 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Wed, 3 May 2023 10:09:11 +0200 Subject: [PATCH 170/179] Force libunwind usage (removes gcc_eh support) libunwind is reentrant and signal safe, and works faster then then gcc_eh (plus it has some custom patches for problems that have been found during it's usage in ClickHouse). gcc_eh may be missing in the system (if gcc was not installed), and even if it exists clickhouse uses -nodefaultlibs, so some care should be made to make it work. Also this library is tiny and there shouln't be any problem to require it always (there is already tendency to require some contrib libraries, i.e. poco). Signed-off-by: Azat Khuzhin --- CMakeLists.txt | 1 - cmake/darwin/default_libs.cmake | 1 + cmake/target.cmake | 1 - cmake/unwind.cmake | 14 +------------- contrib/jemalloc-cmake/CMakeLists.txt | 17 +++++++---------- contrib/libcxx-cmake/CMakeLists.txt | 4 +--- contrib/libcxxabi-cmake/CMakeLists.txt | 6 ++---- docker/test/fasttest/run.sh | 1 - docs/en/development/build-cross-riscv.md | 2 +- programs/server/Server.cpp | 6 +----- src/Common/QueryProfiler.cpp | 8 ++++---- src/Common/QueryProfiler.h | 4 ++-- src/Common/StackTrace.cpp | 9 +-------- src/Common/config.h.in | 1 - .../System/StorageSystemBuildOptions.cpp.in | 1 - 15 files changed, 21 insertions(+), 55 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 06ee98b5ee1d..45c3c422d7a3 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -87,7 +87,6 @@ if (ENABLE_FUZZING) set (ENABLE_CLICKHOUSE_ODBC_BRIDGE OFF) set (ENABLE_LIBRARIES 0) set (ENABLE_SSL 1) - set (USE_UNWIND ON) set (ENABLE_EMBEDDED_COMPILER 0) set (ENABLE_EXAMPLES 0) set (ENABLE_UTILS 0) diff --git a/cmake/darwin/default_libs.cmake b/cmake/darwin/default_libs.cmake index 812847e62010..42b8473cb755 100644 --- a/cmake/darwin/default_libs.cmake +++ b/cmake/darwin/default_libs.cmake @@ -15,6 +15,7 @@ set(CMAKE_OSX_DEPLOYMENT_TARGET 10.15) set(THREADS_PREFER_PTHREAD_FLAG ON) find_package(Threads REQUIRED) +include (cmake/unwind.cmake) include (cmake/cxx.cmake) link_libraries(global-group) diff --git a/cmake/target.cmake b/cmake/target.cmake index 0791da87bf0a..ffab08f1103b 100644 --- a/cmake/target.cmake +++ b/cmake/target.cmake @@ -40,7 +40,6 @@ if (CMAKE_CROSSCOMPILING) set (OPENSSL_NO_ASM ON CACHE INTERNAL "") set (ENABLE_JEMALLOC ON CACHE INTERNAL "") set (ENABLE_PARQUET OFF CACHE INTERNAL "") - set (USE_UNWIND OFF CACHE INTERNAL "") set (ENABLE_GRPC OFF CACHE INTERNAL "") set (ENABLE_HDFS OFF CACHE INTERNAL "") set (ENABLE_MYSQL OFF CACHE INTERNAL "") diff --git a/cmake/unwind.cmake b/cmake/unwind.cmake index c9f5f30a5d6b..84e4f01b7523 100644 --- a/cmake/unwind.cmake +++ b/cmake/unwind.cmake @@ -1,13 +1 @@ -option (USE_UNWIND "Enable libunwind (better stacktraces)" ${ENABLE_LIBRARIES}) - -if (USE_UNWIND) - add_subdirectory(contrib/libunwind-cmake) - set (UNWIND_LIBRARIES unwind) - set (EXCEPTION_HANDLING_LIBRARY ${UNWIND_LIBRARIES}) - - message (STATUS "Using libunwind: ${UNWIND_LIBRARIES}") -else () - set (EXCEPTION_HANDLING_LIBRARY gcc_eh) -endif () - -message (STATUS "Using exception handler: ${EXCEPTION_HANDLING_LIBRARY}") +add_subdirectory(contrib/libunwind-cmake) diff --git a/contrib/jemalloc-cmake/CMakeLists.txt b/contrib/jemalloc-cmake/CMakeLists.txt index 97f723bb5400..20025dfc63e0 100644 --- a/contrib/jemalloc-cmake/CMakeLists.txt +++ b/contrib/jemalloc-cmake/CMakeLists.txt @@ -170,16 +170,13 @@ endif () target_compile_definitions(_jemalloc PRIVATE -DJEMALLOC_PROF=1) -if (USE_UNWIND) - # jemalloc provides support for two different libunwind flavors: the original HP libunwind and the one coming with gcc / g++ / libstdc++. - # The latter is identified by `JEMALLOC_PROF_LIBGCC` and uses `_Unwind_Backtrace` method instead of `unw_backtrace`. - # At the time ClickHouse uses LLVM libunwind which follows libgcc's way of backtracing. - - # ClickHouse has to provide `unw_backtrace` method by the means of [commit 8e2b31e](https://github.com/ClickHouse/libunwind/commit/8e2b31e766dd502f6df74909e04a7dbdf5182eb1). - - target_compile_definitions (_jemalloc PRIVATE -DJEMALLOC_PROF_LIBGCC=1) - target_link_libraries (_jemalloc PRIVATE unwind) -endif () +# jemalloc provides support for two different libunwind flavors: the original HP libunwind and the one coming with gcc / g++ / libstdc++. +# The latter is identified by `JEMALLOC_PROF_LIBGCC` and uses `_Unwind_Backtrace` method instead of `unw_backtrace`. +# At the time ClickHouse uses LLVM libunwind which follows libgcc's way of backtracing. +# +# ClickHouse has to provide `unw_backtrace` method by the means of [commit 8e2b31e](https://github.com/ClickHouse/libunwind/commit/8e2b31e766dd502f6df74909e04a7dbdf5182eb1). +target_compile_definitions (_jemalloc PRIVATE -DJEMALLOC_PROF_LIBGCC=1) +target_link_libraries (_jemalloc PRIVATE unwind) # for RTLD_NEXT target_compile_options(_jemalloc PRIVATE -D_GNU_SOURCE) diff --git a/contrib/libcxx-cmake/CMakeLists.txt b/contrib/libcxx-cmake/CMakeLists.txt index a13e4f0f60a6..b7e59e2c9a3b 100644 --- a/contrib/libcxx-cmake/CMakeLists.txt +++ b/contrib/libcxx-cmake/CMakeLists.txt @@ -61,9 +61,7 @@ target_include_directories(cxx SYSTEM BEFORE PUBLIC $<$:$ target_compile_definitions(cxx PRIVATE -D_LIBCPP_BUILDING_LIBRARY -DLIBCXX_BUILDING_LIBCXXABI) # Enable capturing stack traces for all exceptions. -if (USE_UNWIND) - target_compile_definitions(cxx PUBLIC -DSTD_EXCEPTION_HAS_STACK_TRACE=1) -endif () +target_compile_definitions(cxx PUBLIC -DSTD_EXCEPTION_HAS_STACK_TRACE=1) if (USE_MUSL) target_compile_definitions(cxx PUBLIC -D_LIBCPP_HAS_MUSL_LIBC=1) diff --git a/contrib/libcxxabi-cmake/CMakeLists.txt b/contrib/libcxxabi-cmake/CMakeLists.txt index 0473527912ea..c7ee34e6e287 100644 --- a/contrib/libcxxabi-cmake/CMakeLists.txt +++ b/contrib/libcxxabi-cmake/CMakeLists.txt @@ -35,12 +35,10 @@ target_include_directories(cxxabi SYSTEM BEFORE ) target_compile_definitions(cxxabi PRIVATE -D_LIBCPP_BUILDING_LIBRARY) target_compile_options(cxxabi PRIVATE -nostdinc++ -fno-sanitize=undefined -Wno-macro-redefined) # If we don't disable UBSan, infinite recursion happens in dynamic_cast. -target_link_libraries(cxxabi PUBLIC ${EXCEPTION_HANDLING_LIBRARY}) +target_link_libraries(cxxabi PUBLIC unwind) # Enable capturing stack traces for all exceptions. -if (USE_UNWIND) - target_compile_definitions(cxxabi PUBLIC -DSTD_EXCEPTION_HAS_STACK_TRACE=1) -endif () +target_compile_definitions(cxxabi PUBLIC -DSTD_EXCEPTION_HAS_STACK_TRACE=1) install( TARGETS cxxabi diff --git a/docker/test/fasttest/run.sh b/docker/test/fasttest/run.sh index 989ed9d2fbb3..828c73e67819 100755 --- a/docker/test/fasttest/run.sh +++ b/docker/test/fasttest/run.sh @@ -166,7 +166,6 @@ function run_cmake "-DENABLE_UTILS=0" "-DENABLE_EMBEDDED_COMPILER=0" "-DENABLE_THINLTO=0" - "-DUSE_UNWIND=1" "-DENABLE_NURAFT=1" "-DENABLE_SIMDJSON=1" "-DENABLE_JEMALLOC=1" diff --git a/docs/en/development/build-cross-riscv.md b/docs/en/development/build-cross-riscv.md index e3550a046c75..c21353f7f73c 100644 --- a/docs/en/development/build-cross-riscv.md +++ b/docs/en/development/build-cross-riscv.md @@ -23,7 +23,7 @@ sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)" ``` bash cd ClickHouse mkdir build-riscv64 -CC=clang-16 CXX=clang++-16 cmake . -Bbuild-riscv64 -G Ninja -DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-riscv64.cmake -DGLIBC_COMPATIBILITY=OFF -DENABLE_LDAP=OFF -DOPENSSL_NO_ASM=ON -DENABLE_JEMALLOC=ON -DENABLE_PARQUET=OFF -DUSE_UNWIND=OFF -DENABLE_GRPC=OFF -DENABLE_HDFS=OFF -DENABLE_MYSQL=OFF +CC=clang-16 CXX=clang++-16 cmake . -Bbuild-riscv64 -G Ninja -DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-riscv64.cmake -DGLIBC_COMPATIBILITY=OFF -DENABLE_LDAP=OFF -DOPENSSL_NO_ASM=ON -DENABLE_JEMALLOC=ON -DENABLE_PARQUET=OFF -DENABLE_GRPC=OFF -DENABLE_HDFS=OFF -DENABLE_MYSQL=OFF ninja -C build-riscv64 ``` diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index 7fbbcd394461..071f7d3177e1 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -1627,7 +1627,7 @@ try /// Init trace collector only after trace_log system table was created /// Disable it if we collect test coverage information, because it will work extremely slow. -#if USE_UNWIND && !WITH_COVERAGE +#if !WITH_COVERAGE /// Profilers cannot work reliably with any other libunwind or without PHDR cache. if (hasPHDRCache()) { @@ -1650,10 +1650,6 @@ try /// Describe multiple reasons when query profiler cannot work. -#if !USE_UNWIND - LOG_INFO(log, "Query Profiler and TraceCollector are disabled because they cannot work without bundled unwind (stack unwinding) library."); -#endif - #if WITH_COVERAGE LOG_INFO(log, "Query Profiler and TraceCollector are disabled because they work extremely slow with test coverage."); #endif diff --git a/src/Common/QueryProfiler.cpp b/src/Common/QueryProfiler.cpp index 313d4b77739d..dc9f36105131 100644 --- a/src/Common/QueryProfiler.cpp +++ b/src/Common/QueryProfiler.cpp @@ -91,7 +91,7 @@ namespace ErrorCodes extern const int NOT_IMPLEMENTED; } -#if USE_UNWIND +#ifndef __APPLE__ Timer::Timer() : log(&Poco::Logger::get("Timer")) {} @@ -209,13 +209,13 @@ QueryProfilerBase::QueryProfilerBase(UInt64 thread_id, int clock_t UNUSED(pause_signal); throw Exception(ErrorCodes::NOT_IMPLEMENTED, "QueryProfiler disabled because they cannot work under sanitizers"); -#elif !USE_UNWIND +#elif defined(__APPLE__) UNUSED(thread_id); UNUSED(clock_type); UNUSED(period); UNUSED(pause_signal); - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "QueryProfiler cannot work with stock libunwind"); + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "QueryProfiler cannot work on OSX"); #else /// Sanity check. if (!hasPHDRCache()) @@ -264,7 +264,7 @@ QueryProfilerBase::~QueryProfilerBase() template void QueryProfilerBase::cleanup() { -#if USE_UNWIND +#ifndef __APPLE__ timer.stop(); signal_handler_disarmed = true; #endif diff --git a/src/Common/QueryProfiler.h b/src/Common/QueryProfiler.h index 6a9ed10e315c..87432a4b6998 100644 --- a/src/Common/QueryProfiler.h +++ b/src/Common/QueryProfiler.h @@ -28,7 +28,7 @@ namespace DB * Note that signal handler implementation is defined by template parameter. See QueryProfilerReal and QueryProfilerCPU. */ -#if USE_UNWIND +#ifndef __APPLE__ class Timer { public: @@ -60,7 +60,7 @@ class QueryProfilerBase Poco::Logger * log; -#if USE_UNWIND +#ifndef __APPLE__ inline static thread_local Timer timer = Timer(); #endif diff --git a/src/Common/StackTrace.cpp b/src/Common/StackTrace.cpp index aea0f854fe14..c13b63854e46 100644 --- a/src/Common/StackTrace.cpp +++ b/src/Common/StackTrace.cpp @@ -20,13 +20,10 @@ #include #include #include +#include #include "config.h" -#if USE_UNWIND -# include -#endif - namespace { /// Currently this variable is set up once on server startup. @@ -287,12 +284,8 @@ StackTrace::StackTrace(const ucontext_t & signal_context) void StackTrace::tryCapture() { -#if USE_UNWIND size = unw_backtrace(frame_pointers.data(), capacity); __msan_unpoison(frame_pointers.data(), size * sizeof(frame_pointers[0])); -#else - size = 0; -#endif } /// ClickHouse uses bundled libc++ so type names will be the same on every system thus it's safe to hardcode them diff --git a/src/Common/config.h.in b/src/Common/config.h.in index 71b4e098c8fb..1cb13d3ae3ec 100644 --- a/src/Common/config.h.in +++ b/src/Common/config.h.in @@ -9,7 +9,6 @@ #cmakedefine01 USE_AWS_S3 #cmakedefine01 USE_AZURE_BLOB_STORAGE #cmakedefine01 USE_BROTLI -#cmakedefine01 USE_UNWIND #cmakedefine01 USE_CASSANDRA #cmakedefine01 USE_SENTRY #cmakedefine01 USE_GRPC diff --git a/src/Storages/System/StorageSystemBuildOptions.cpp.in b/src/Storages/System/StorageSystemBuildOptions.cpp.in index c2d35c96ce5c..4e7a25d7726b 100644 --- a/src/Storages/System/StorageSystemBuildOptions.cpp.in +++ b/src/Storages/System/StorageSystemBuildOptions.cpp.in @@ -23,7 +23,6 @@ const char * auto_config_build[] "USE_EMBEDDED_COMPILER", "@USE_EMBEDDED_COMPILER@", "USE_GLIBC_COMPATIBILITY", "@GLIBC_COMPATIBILITY@", "USE_JEMALLOC", "@ENABLE_JEMALLOC@", - "USE_UNWIND", "@USE_UNWIND@", "USE_ICU", "@USE_ICU@", "USE_H3", "@USE_H3@", "USE_MYSQL", "@USE_MYSQL@", From 45d36b736a8d6b207fb9cf88f8f0ba8f2a7e0ce6 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Sat, 8 Jul 2023 23:14:02 +0000 Subject: [PATCH 171/179] Update version_date.tsv and changelogs after v23.6.2.18-stable --- docker/keeper/Dockerfile | 2 +- docker/server/Dockerfile.alpine | 2 +- docker/server/Dockerfile.ubuntu | 2 +- docs/changelogs/v23.6.2.18-stable.md | 25 +++++++++++++++++++++++++ utils/list-versions/version_date.tsv | 2 ++ 5 files changed, 30 insertions(+), 3 deletions(-) create mode 100644 docs/changelogs/v23.6.2.18-stable.md diff --git a/docker/keeper/Dockerfile b/docker/keeper/Dockerfile index f13fcdc14d6f..8a6324aef88a 100644 --- a/docker/keeper/Dockerfile +++ b/docker/keeper/Dockerfile @@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \ esac ARG REPOSITORY="https://s3.amazonaws.com/clickhouse-builds/22.4/31c367d3cd3aefd316778601ff6565119fe36682/package_release" -ARG VERSION="23.6.1.1524" +ARG VERSION="23.6.2.18" ARG PACKAGES="clickhouse-keeper" # user/group precreated explicitly with fixed uid/gid on purpose. diff --git a/docker/server/Dockerfile.alpine b/docker/server/Dockerfile.alpine index 5e5be3f6d73f..7f4536276010 100644 --- a/docker/server/Dockerfile.alpine +++ b/docker/server/Dockerfile.alpine @@ -33,7 +33,7 @@ RUN arch=${TARGETARCH:-amd64} \ # lts / testing / prestable / etc ARG REPO_CHANNEL="stable" ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}" -ARG VERSION="23.6.1.1524" +ARG VERSION="23.6.2.18" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" # user/group precreated explicitly with fixed uid/gid on purpose. diff --git a/docker/server/Dockerfile.ubuntu b/docker/server/Dockerfile.ubuntu index 8693193455f7..1fa7b83ae161 100644 --- a/docker/server/Dockerfile.ubuntu +++ b/docker/server/Dockerfile.ubuntu @@ -23,7 +23,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list ARG REPO_CHANNEL="stable" ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main" -ARG VERSION="23.6.1.1524" +ARG VERSION="23.6.2.18" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" # set non-empty deb_location_url url to create a docker image diff --git a/docs/changelogs/v23.6.2.18-stable.md b/docs/changelogs/v23.6.2.18-stable.md new file mode 100644 index 000000000000..1f872a190ba7 --- /dev/null +++ b/docs/changelogs/v23.6.2.18-stable.md @@ -0,0 +1,25 @@ +--- +sidebar_position: 1 +sidebar_label: 2023 +--- + +# 2023 Changelog + +### ClickHouse release v23.6.2.18-stable (89f39a7ccfe) FIXME as compared to v23.6.1.1524-stable (d1c7e13d088) + +#### Build/Testing/Packaging Improvement +* Backported in [#51888](https://github.com/ClickHouse/ClickHouse/issues/51888): Update cargo dependencies. [#51721](https://github.com/ClickHouse/ClickHouse/pull/51721) ([Raúl Marín](https://github.com/Algunenano)). + +#### Bug Fix (user-visible misbehavior in an official stable release) + +* Fix reading from empty column in `parseSipHashKey` [#51804](https://github.com/ClickHouse/ClickHouse/pull/51804) ([Nikita Taranov](https://github.com/nickitat)). +* Allow parametric UDFs [#51964](https://github.com/ClickHouse/ClickHouse/pull/51964) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Remove the usage of Analyzer setting in the client [#51578](https://github.com/ClickHouse/ClickHouse/pull/51578) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix 02116_tuple_element with Analyzer [#51669](https://github.com/ClickHouse/ClickHouse/pull/51669) ([Robert Schulze](https://github.com/rschu1ze)). +* Fix SQLLogic docker images [#51719](https://github.com/ClickHouse/ClickHouse/pull/51719) ([Antonio Andelic](https://github.com/antonio2368)). +* Fix source image for sqllogic [#51728](https://github.com/ClickHouse/ClickHouse/pull/51728) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Pin for docker-ce [#51743](https://github.com/ClickHouse/ClickHouse/pull/51743) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). + diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index 2a098d8c1da6..dd46f6103d07 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -1,3 +1,4 @@ +v23.6.2.18-stable 2023-07-09 v23.6.1.1524-stable 2023-06-30 v23.5.4.25-stable 2023-06-29 v23.5.3.24-stable 2023-06-17 @@ -55,6 +56,7 @@ v22.9.4.32-stable 2022-10-26 v22.9.3.18-stable 2022-09-30 v22.9.2.7-stable 2022-09-23 v22.9.1.2603-stable 2022-09-22 +v22.8.20.11-lts 2023-07-09 v22.8.19.10-lts 2023-06-17 v22.8.18.31-lts 2023-06-12 v22.8.17.17-lts 2023-04-22 From c968fe808fc1b7693e53bb3d4f9adc03f41c7066 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Sat, 8 Jul 2023 23:17:41 +0000 Subject: [PATCH 172/179] Update version_date.tsv and changelogs after v22.8.20.11-lts --- docker/keeper/Dockerfile | 2 +- docker/server/Dockerfile.alpine | 2 +- docker/server/Dockerfile.ubuntu | 2 +- docs/changelogs/v22.8.20.11-lts.md | 20 ++++++++++++++++++++ utils/list-versions/version_date.tsv | 2 ++ 5 files changed, 25 insertions(+), 3 deletions(-) create mode 100644 docs/changelogs/v22.8.20.11-lts.md diff --git a/docker/keeper/Dockerfile b/docker/keeper/Dockerfile index f13fcdc14d6f..8a6324aef88a 100644 --- a/docker/keeper/Dockerfile +++ b/docker/keeper/Dockerfile @@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \ esac ARG REPOSITORY="https://s3.amazonaws.com/clickhouse-builds/22.4/31c367d3cd3aefd316778601ff6565119fe36682/package_release" -ARG VERSION="23.6.1.1524" +ARG VERSION="23.6.2.18" ARG PACKAGES="clickhouse-keeper" # user/group precreated explicitly with fixed uid/gid on purpose. diff --git a/docker/server/Dockerfile.alpine b/docker/server/Dockerfile.alpine index 5e5be3f6d73f..7f4536276010 100644 --- a/docker/server/Dockerfile.alpine +++ b/docker/server/Dockerfile.alpine @@ -33,7 +33,7 @@ RUN arch=${TARGETARCH:-amd64} \ # lts / testing / prestable / etc ARG REPO_CHANNEL="stable" ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}" -ARG VERSION="23.6.1.1524" +ARG VERSION="23.6.2.18" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" # user/group precreated explicitly with fixed uid/gid on purpose. diff --git a/docker/server/Dockerfile.ubuntu b/docker/server/Dockerfile.ubuntu index 8693193455f7..1fa7b83ae161 100644 --- a/docker/server/Dockerfile.ubuntu +++ b/docker/server/Dockerfile.ubuntu @@ -23,7 +23,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list ARG REPO_CHANNEL="stable" ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main" -ARG VERSION="23.6.1.1524" +ARG VERSION="23.6.2.18" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" # set non-empty deb_location_url url to create a docker image diff --git a/docs/changelogs/v22.8.20.11-lts.md b/docs/changelogs/v22.8.20.11-lts.md new file mode 100644 index 000000000000..bd45ce9319a5 --- /dev/null +++ b/docs/changelogs/v22.8.20.11-lts.md @@ -0,0 +1,20 @@ +--- +sidebar_position: 1 +sidebar_label: 2023 +--- + +# 2023 Changelog + +### ClickHouse release v22.8.20.11-lts (c9ca79e24e8) FIXME as compared to v22.8.19.10-lts (989bc2fe8b0) + +#### Bug Fix (user-visible misbehavior in an official stable release) + +* Fix broken index analysis when binary operator contains a null constant argument [#50177](https://github.com/ClickHouse/ClickHouse/pull/50177) ([Amos Bird](https://github.com/amosbird)). +* Fix incorrect constant folding [#50536](https://github.com/ClickHouse/ClickHouse/pull/50536) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix fuzzer failure in ActionsDAG [#51301](https://github.com/ClickHouse/ClickHouse/pull/51301) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix segfault in MathUnary [#51499](https://github.com/ClickHouse/ClickHouse/pull/51499) ([Ilya Yatsishin](https://github.com/qoega)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Decoupled commits from [#51180](https://github.com/ClickHouse/ClickHouse/issues/51180) for backports [#51561](https://github.com/ClickHouse/ClickHouse/pull/51561) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). + diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index 2a098d8c1da6..dd46f6103d07 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -1,3 +1,4 @@ +v23.6.2.18-stable 2023-07-09 v23.6.1.1524-stable 2023-06-30 v23.5.4.25-stable 2023-06-29 v23.5.3.24-stable 2023-06-17 @@ -55,6 +56,7 @@ v22.9.4.32-stable 2022-10-26 v22.9.3.18-stable 2022-09-30 v22.9.2.7-stable 2022-09-23 v22.9.1.2603-stable 2022-09-22 +v22.8.20.11-lts 2023-07-09 v22.8.19.10-lts 2023-06-17 v22.8.18.31-lts 2023-06-12 v22.8.17.17-lts 2023-04-22 From 8d9e1d41c5d0dc8220b97c68ebe6a21c10042b2a Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 9 Jul 2023 03:39:05 +0200 Subject: [PATCH 173/179] Move a test to the right place --- .../00178_query_datetime64_index.reference | 0 .../{1_stateful => 0_stateless}/00178_query_datetime64_index.sql | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename tests/queries/{1_stateful => 0_stateless}/00178_query_datetime64_index.reference (100%) rename tests/queries/{1_stateful => 0_stateless}/00178_query_datetime64_index.sql (100%) diff --git a/tests/queries/1_stateful/00178_query_datetime64_index.reference b/tests/queries/0_stateless/00178_query_datetime64_index.reference similarity index 100% rename from tests/queries/1_stateful/00178_query_datetime64_index.reference rename to tests/queries/0_stateless/00178_query_datetime64_index.reference diff --git a/tests/queries/1_stateful/00178_query_datetime64_index.sql b/tests/queries/0_stateless/00178_query_datetime64_index.sql similarity index 100% rename from tests/queries/1_stateful/00178_query_datetime64_index.sql rename to tests/queries/0_stateless/00178_query_datetime64_index.sql From 29f625e7bb9f8553b3e42850a9bcf9e8411a700e Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 9 Jul 2023 03:43:18 +0200 Subject: [PATCH 174/179] Add a check to validate that the stateful tests are stateful --- utils/check-style/check-style | 3 +++ 1 file changed, 3 insertions(+) diff --git a/utils/check-style/check-style b/utils/check-style/check-style index e7c06fefee21..0b3b86b4772e 100755 --- a/utils/check-style/check-style +++ b/utils/check-style/check-style @@ -407,3 +407,6 @@ find $ROOT_PATH/{src,programs,utils} -name '*.h' -or -name '*.cpp' | xargs grep # If a user is doing dynamic or typeid cast with a pointer, and immediately dereferencing it, it is unsafe. find $ROOT_PATH/{src,programs,utils} -name '*.h' -or -name '*.cpp' | xargs grep --line-number -P '(dynamic|typeid)_cast<[^>]+\*>\([^\(\)]+\)->' | grep -P '.' && echo "It's suspicious when you are doing a dynamic_cast or typeid_cast with a pointer and immediately dereferencing it. Use references instead of pointers or check a pointer to nullptr." + +# The stateful directory should only contain the tests that depend on the test dataset (hits or visits). +find $ROOT_PATH/tests/queries/1_stateful -name '*.sql' -or -name '*.sh' | grep -v '00076_system_columns_bytes' | xargs -I{} bash -c 'grep -q -P "hits|visits" "{}" || echo "The test {} does not depend on the test dataset (hits or visits table) and should be located in the 0_stateless directory. You can also add an exception to the check-style script."' From 52632af9b38e052050e6e33d2b10614a376461be Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 9 Jul 2023 06:18:10 +0300 Subject: [PATCH 175/179] Update connection.py --- tests/sqllogic/connection.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/sqllogic/connection.py b/tests/sqllogic/connection.py index d71cc005d093..ca03839fc35e 100644 --- a/tests/sqllogic/connection.py +++ b/tests/sqllogic/connection.py @@ -62,7 +62,7 @@ def default_clickhouse_odbc_conn_str(): return str( OdbcConnectingArgs.create_from_kw( dsn="ClickHouse DSN (ANSI)", - Url="http://localhost:8123/query?default_format=ODBCDriver2&default_table_engine=MergeTree", + Url="http://localhost:8123/query?default_format=ODBCDriver2&default_table_engine=MergeTree&union_default_mode=DISTINCT&group_by_use_nulls=1&join_use_nulls=1", ) ) From d52041345401bdd1a02c2482546da2d5c21793cb Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sun, 9 Jul 2023 09:20:03 +0200 Subject: [PATCH 176/179] Cleanup SymbolIndex after reload got removed Remove MultiVersion for SymbolIndex structure since after #51873 it is useless. Follow-up for: #51873 Signed-off-by: Azat Khuzhin --- src/Common/StackTrace.cpp | 6 ++---- src/Common/SymbolIndex.cpp | 11 +++-------- src/Common/SymbolIndex.h | 9 +++------ src/Common/examples/symbol_index.cpp | 3 +-- src/Common/getResource.cpp | 2 +- src/Daemon/BaseDaemon.cpp | 2 +- src/Daemon/SentryWriter.cpp | 2 +- src/Functions/addressToLine.h | 3 +-- src/Functions/addressToSymbol.cpp | 3 +-- src/Functions/serverConstants.cpp | 2 +- src/Interpreters/CrashLog.cpp | 2 +- 11 files changed, 16 insertions(+), 29 deletions(-) diff --git a/src/Common/StackTrace.cpp b/src/Common/StackTrace.cpp index c13b63854e46..b323f1e4363e 100644 --- a/src/Common/StackTrace.cpp +++ b/src/Common/StackTrace.cpp @@ -208,8 +208,7 @@ void StackTrace::symbolize( const StackTrace::FramePointers & frame_pointers, [[maybe_unused]] size_t offset, size_t size, StackTrace::Frames & frames) { #if defined(__ELF__) && !defined(OS_FREEBSD) - auto symbol_index_ptr = DB::SymbolIndex::instance(); - const DB::SymbolIndex & symbol_index = *symbol_index_ptr; + const DB::SymbolIndex & symbol_index = DB::SymbolIndex::instance(); std::unordered_map dwarfs; for (size_t i = 0; i < offset; ++i) @@ -341,8 +340,7 @@ toStringEveryLineImpl([[maybe_unused]] bool fatal, const StackTraceRefTriple & s using enum DB::Dwarf::LocationInfoMode; const auto mode = fatal ? FULL_WITH_INLINE : FAST; - auto symbol_index_ptr = DB::SymbolIndex::instance(); - const DB::SymbolIndex & symbol_index = *symbol_index_ptr; + const DB::SymbolIndex & symbol_index = DB::SymbolIndex::instance(); std::unordered_map dwarfs; for (size_t i = stack_trace.offset; i < stack_trace.size; ++i) diff --git a/src/Common/SymbolIndex.cpp b/src/Common/SymbolIndex.cpp index 4c7f3827125c..cb02bb3ff75e 100644 --- a/src/Common/SymbolIndex.cpp +++ b/src/Common/SymbolIndex.cpp @@ -509,7 +509,7 @@ const T * find(const void * address, const std::vector & vec) } -void SymbolIndex::update() +void SymbolIndex::load() { dl_iterate_phdr(collectSymbols, &data); @@ -549,17 +549,12 @@ String SymbolIndex::getBuildIDHex() const return build_id_hex; } -MultiVersion & SymbolIndex::instanceImpl() +const SymbolIndex & SymbolIndex::instance() { - static MultiVersion instance(std::unique_ptr(new SymbolIndex)); + static SymbolIndex instance; return instance; } -MultiVersion::Version SymbolIndex::instance() -{ - return instanceImpl().get(); -} - } #endif diff --git a/src/Common/SymbolIndex.h b/src/Common/SymbolIndex.h index 773f59b7914a..4fd108434d50 100644 --- a/src/Common/SymbolIndex.h +++ b/src/Common/SymbolIndex.h @@ -8,8 +8,6 @@ #include #include -#include - namespace DB { @@ -20,10 +18,10 @@ namespace DB class SymbolIndex : private boost::noncopyable { protected: - SymbolIndex() { update(); } + SymbolIndex() { load(); } public: - static MultiVersion::Version instance(); + static const SymbolIndex & instance(); struct Symbol { @@ -89,8 +87,7 @@ class SymbolIndex : private boost::noncopyable private: Data data; - void update(); - static MultiVersion & instanceImpl(); + void load(); }; } diff --git a/src/Common/examples/symbol_index.cpp b/src/Common/examples/symbol_index.cpp index 13a49fd65ad3..ca9c26f27d66 100644 --- a/src/Common/examples/symbol_index.cpp +++ b/src/Common/examples/symbol_index.cpp @@ -22,8 +22,7 @@ int main(int argc, char ** argv) return 1; } - auto symbol_index_ptr = SymbolIndex::instance(); - const SymbolIndex & symbol_index = *symbol_index_ptr; + const SymbolIndex & symbol_index = SymbolIndex::instance(); for (const auto & elem : symbol_index.symbols()) std::cout << elem.name << ": " << elem.address_begin << " ... " << elem.address_end << "\n"; diff --git a/src/Common/getResource.cpp b/src/Common/getResource.cpp index fe603fcc5502..72ba24c2f44f 100644 --- a/src/Common/getResource.cpp +++ b/src/Common/getResource.cpp @@ -16,7 +16,7 @@ std::string_view getResource(std::string_view name) #if defined USE_MUSL /// If static linking is used, we cannot use dlsym and have to parse ELF symbol table by ourself. - return DB::SymbolIndex::instance()->getResource(name_replaced); + return DB::SymbolIndex::instance().getResource(name_replaced); #else // In most `dlsym(3)` APIs, one passes the symbol name as it appears via diff --git a/src/Daemon/BaseDaemon.cpp b/src/Daemon/BaseDaemon.cpp index bf6c3b4cdcfb..319d2bc8b5ba 100644 --- a/src/Daemon/BaseDaemon.cpp +++ b/src/Daemon/BaseDaemon.cpp @@ -986,7 +986,7 @@ void BaseDaemon::initializeTerminationAndSignalProcessing() signal_listener_thread.start(*signal_listener); #if defined(__ELF__) && !defined(OS_FREEBSD) - String build_id_hex = SymbolIndex::instance()->getBuildIDHex(); + String build_id_hex = SymbolIndex::instance().getBuildIDHex(); if (build_id_hex.empty()) build_id = ""; else diff --git a/src/Daemon/SentryWriter.cpp b/src/Daemon/SentryWriter.cpp index 041d3292841a..e38d339d0885 100644 --- a/src/Daemon/SentryWriter.cpp +++ b/src/Daemon/SentryWriter.cpp @@ -150,7 +150,7 @@ void SentryWriter::onFault(int sig, const std::string & error_message, const Sta sentry_set_extra("signal_number", sentry_value_new_int32(sig)); #if defined(__ELF__) && !defined(OS_FREEBSD) - const String & build_id_hex = DB::SymbolIndex::instance()->getBuildIDHex(); + const String & build_id_hex = DB::SymbolIndex::instance().getBuildIDHex(); sentry_set_tag("build_id", build_id_hex.c_str()); #endif diff --git a/src/Functions/addressToLine.h b/src/Functions/addressToLine.h index 1410e55d9a9d..5c1611fe173d 100644 --- a/src/Functions/addressToLine.h +++ b/src/Functions/addressToLine.h @@ -90,8 +90,7 @@ class FunctionAddressToLineBase : public IFunction ResultT impl(uintptr_t addr) const { - auto symbol_index_ptr = SymbolIndex::instance(); - const SymbolIndex & symbol_index = *symbol_index_ptr; + const SymbolIndex & symbol_index = SymbolIndex::instance(); if (const auto * object = symbol_index.findObject(reinterpret_cast(addr))) { diff --git a/src/Functions/addressToSymbol.cpp b/src/Functions/addressToSymbol.cpp index 95d57f6d296c..cc5ad4c4fdf7 100644 --- a/src/Functions/addressToSymbol.cpp +++ b/src/Functions/addressToSymbol.cpp @@ -68,8 +68,7 @@ class FunctionAddressToSymbol : public IFunction ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { - auto symbol_index_ptr = SymbolIndex::instance(); - const SymbolIndex & symbol_index = *symbol_index_ptr; + const SymbolIndex & symbol_index = SymbolIndex::instance(); const ColumnPtr & column = arguments[0].column; const ColumnUInt64 * column_concrete = checkAndGetColumn(column.get()); diff --git a/src/Functions/serverConstants.cpp b/src/Functions/serverConstants.cpp index 0fda53414dee..4294f97d7718 100644 --- a/src/Functions/serverConstants.cpp +++ b/src/Functions/serverConstants.cpp @@ -27,7 +27,7 @@ namespace public: static constexpr auto name = "buildId"; static FunctionPtr create(ContextPtr context) { return std::make_shared(context); } - explicit FunctionBuildId(ContextPtr context) : FunctionConstantBase(SymbolIndex::instance()->getBuildIDHex(), context->isDistributed()) {} + explicit FunctionBuildId(ContextPtr context) : FunctionConstantBase(SymbolIndex::instance().getBuildIDHex(), context->isDistributed()) {} }; #endif diff --git a/src/Interpreters/CrashLog.cpp b/src/Interpreters/CrashLog.cpp index f1f0ffb6f600..08c08ffecd10 100644 --- a/src/Interpreters/CrashLog.cpp +++ b/src/Interpreters/CrashLog.cpp @@ -52,7 +52,7 @@ void CrashLogElement::appendToBlock(MutableColumns & columns) const String build_id_hex; #if defined(__ELF__) && !defined(OS_FREEBSD) - build_id_hex = SymbolIndex::instance()->getBuildIDHex(); + build_id_hex = SymbolIndex::instance().getBuildIDHex(); #endif columns[i++]->insert(build_id_hex); } From 3b954a2952477bee203a5e00c2cbb9f6a50ae274 Mon Sep 17 00:00:00 2001 From: Konstantin Ilchenko Date: Sun, 9 Jul 2023 14:38:16 +0200 Subject: [PATCH 177/179] [DOCS] Add REMOVE SAMPLE BY to docs --- .../statements/alter/sample-by.md | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/docs/en/sql-reference/statements/alter/sample-by.md b/docs/en/sql-reference/statements/alter/sample-by.md index b20f3c7b5d33..ccad792f8535 100644 --- a/docs/en/sql-reference/statements/alter/sample-by.md +++ b/docs/en/sql-reference/statements/alter/sample-by.md @@ -5,15 +5,28 @@ sidebar_label: SAMPLE BY title: "Manipulating Sampling-Key Expressions" --- -Syntax: +# Manipulating SAMPLE BY expression + +The following operations are available: + +## MODIFY ``` sql ALTER TABLE [db].name [ON CLUSTER cluster] MODIFY SAMPLE BY new_expression ``` -The command changes the [sampling key](../../../engines/table-engines/mergetree-family/mergetree.md) of the table to `new_expression` (an expression or a tuple of expressions). +The command changes the [sampling key](../../../engines/table-engines/mergetree-family/mergetree.md) of the table to `new_expression` (an expression or a tuple of expressions). The primary key must contain the new sample key. + +## REMOVE + +``` sql +ALTER TABLE [db].name [ON CLUSTER cluster] REMOVE SAMPLE BY +``` + +The command removes the [sampling key](../../../engines/table-engines/mergetree-family/mergetree.md) of the table. + -The command is lightweight in the sense that it only changes metadata. The primary key must contain the new sample key. +The commands `MODIFY` and `REMOVE` are lightweight in the sense that they only change metadata or remove files. :::note It only works for tables in the [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md) family (including [replicated](../../../engines/table-engines/mergetree-family/replication.md) tables). From 2db092f9d82537e7bac4f31568a0d1c21dbc5799 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sat, 8 Jul 2023 21:06:00 +0200 Subject: [PATCH 178/179] Cleanup remote_servers in dist config.xml At first, there was no such amount of clusters in dist config, they added when someone need to write some new cluster for tests. So let's move them to the clusters.xml that is deployed only for tests, and leave only default cluster. And cleanup also some configs that had been copied from dist config in the repo (about test_config_* integration tests, this should be OK, since there are more_clusters.xml as well, that covers additional cases). Signed-off-by: Azat Khuzhin --- .../internal/platform/data/file_test.go | 4 +- .../testdata/configs/xml/config.xml | 67 -------- .../testdata/configs/yaml/config.yaml | 40 ----- .../testdata/configs/yandex_xml/config.xml | 67 -------- programs/server/config.d/more_clusters.xml | 49 ------ programs/server/config.xml | 155 +---------------- programs/server/config.yaml.example | 44 +---- tests/config/config.d/clusters.xml | 157 ++++++++++++++++++ .../configs/config.xml | 38 ----- .../test_config_xml_full/configs/config.xml | 85 ---------- .../test_config_xml_main/configs/config.xml | 67 -------- .../configs/config.xml | 67 -------- .../test_config_yaml_full/configs/config.yaml | 46 ----- .../test_config_yaml_main/configs/config.yaml | 40 ----- .../configs/disable_lazy_load.xml | 12 +- .../configs/overrides.xml | 12 ++ .../test_dictionaries_dependency/test.py | 8 +- ...torage_configuration.xml => overrides.xml} | 17 ++ .../test.py | 2 +- .../configs/macros.xml | 1 - .../test_https_replication/configs/config.xml | 25 --- .../{named_collections.xml => overrides.xml} | 12 ++ .../test_mask_sensitive_info/test.py | 2 +- .../configs/config.d/remote_servers.xml | 14 ++ .../test_storage_hdfs/configs/cluster.xml | 15 ++ .../test_storage_url/configs/conf.xml | 34 ++++ utils/clickhouse-diagnostics/README.md | 75 --------- 27 files changed, 289 insertions(+), 866 deletions(-) delete mode 100644 programs/server/config.d/more_clusters.xml create mode 100644 tests/integration/test_dictionaries_dependency/configs/overrides.xml rename tests/integration/test_distributed_storage_configuration/configs/config.d/{storage_configuration.xml => overrides.xml} (54%) rename tests/integration/test_mask_sensitive_info/configs/{named_collections.xml => overrides.xml} (65%) diff --git a/programs/diagnostics/internal/platform/data/file_test.go b/programs/diagnostics/internal/platform/data/file_test.go index 938c34281f14..5df1f8cc359a 100644 --- a/programs/diagnostics/internal/platform/data/file_test.go +++ b/programs/diagnostics/internal/platform/data/file_test.go @@ -135,7 +135,7 @@ func TestConfigFileFrameCopy(t *testing.T) { sizes := map[string]int64{ "users.xml": int64(2017), "default-password.xml": int64(188), - "config.xml": int64(61662), + "config.xml": int64(59506), "server-include.xml": int64(168), "user-include.xml": int64(559), } @@ -189,7 +189,7 @@ func TestConfigFileFrameCopy(t *testing.T) { sizes := map[string]int64{ "users.yaml": int64(1023), "default-password.yaml": int64(132), - "config.yaml": int64(42512), + "config.yaml": int64(41633), "server-include.yaml": int64(21), "user-include.yaml": int64(120), } diff --git a/programs/diagnostics/testdata/configs/xml/config.xml b/programs/diagnostics/testdata/configs/xml/config.xml index 21a0821f89de..c08b0b2970f6 100644 --- a/programs/diagnostics/testdata/configs/xml/config.xml +++ b/programs/diagnostics/testdata/configs/xml/config.xml @@ -649,73 +649,6 @@ - - - - localhost - 9000 - - - - - localhost - 9000 - - - - - - - 127.0.0.1 - 9000 - - - - - 127.0.0.2 - 9000 - - - - - - true - - 127.0.0.1 - 9000 - - - - true - - 127.0.0.2 - 9000 - - - - - - - localhost - 9440 - 1 - - - - - - - localhost - 9000 - - - - - localhost - 1 - - - - + + + - - - - false - - 127.0.0.1 - 9000 - - - 127.0.0.2 - 9000 - - - 127.0.0.3 - 9000 - - - - - - - false - - 127.0.0.1 - 9000 - - - 127.0.0.2 - 9000 - - - 127.0.0.3 - 9000 - - - 127.0.0.4 - 9000 - - - 127.0.0.5 - 9000 - - - 127.0.0.6 - 9000 - - - 127.0.0.7 - 9000 - - - 127.0.0.8 - 9000 - - - 127.0.0.9 - 9000 - - - 127.0.0.10 - 9000 - - - - 127.0.0.11 - 1234 - - - - - - - localhost - 9000 - - - - - localhost - 9000 - - - - - - - 127.0.0.1 - 9000 - - - - - 127.0.0.2 - 9000 - - - - - - true - - 127.0.0.1 - 9000 - - - - true - - 127.0.0.2 - 9000 - - - - - - - localhost - 9440 - 1 - - - - - - - localhost - 9000 - - - - - localhost - 1 - - - + + + 127.0.0.11 + 1234 + + + + + + false + + 127.0.0.1 + 9000 + + + 127.0.0.2 + 9000 + + + 127.0.0.3 + 9000 + + + + + + + + localhost + 9000 + + + + + localhost + 9000 + + + + + + true + + 127.0.0.1 + 9000 + + + + true + + 127.0.0.2 + 9000 + + + + + + + localhost + 9440 + 1 + + + + + + + localhost + 9000 + + + + + localhost + 1 + + + diff --git a/tests/integration/test_config_corresponding_root/configs/config.xml b/tests/integration/test_config_corresponding_root/configs/config.xml index 72014646161c..9a38d02a0369 100644 --- a/tests/integration/test_config_corresponding_root/configs/config.xml +++ b/tests/integration/test_config_corresponding_root/configs/config.xml @@ -136,7 +136,6 @@ https://clickhouse.com/docs/en/table_engines/distributed/ --> - @@ -145,43 +144,6 @@ - - - - localhost - 9000 - - - - - localhost - 9000 - - - - - - - localhost - 9440 - 1 - - - - - - - localhost - 9000 - - - - - localhost - 1 - - - diff --git a/tests/integration/test_config_xml_full/configs/config.xml b/tests/integration/test_config_xml_full/configs/config.xml index 4e3d1def5fcf..d142df18af80 100644 --- a/tests/integration/test_config_xml_full/configs/config.xml +++ b/tests/integration/test_config_xml_full/configs/config.xml @@ -565,91 +565,6 @@ - - - - localhost - 9000 - - - - - localhost - 9000 - - - - - - - 127.0.0.1 - 9000 - - - - - 127.0.0.2 - 9000 - - - - - - true - - 127.0.0.1 - 9000 - - - - true - - 127.0.0.2 - 9000 - - - - - - - localhost - 9440 - 1 - - - - - - - localhost - 9440 - - - - - - - - localhost - 9440 - - - - - - - - localhost - 9000 - - - - - localhost - 1 - - - - - - - - - - localhost - 9000 - - - - - - - localhost - 9440 - 1 - - - - - -