Skip to content

Commit

Permalink
Merge pull request #59897 from ClickHouse/s3queue-fix-uninitialized-v…
Browse files Browse the repository at this point in the history
…alue

s3queue: fix uninitialized value
  • Loading branch information
alexey-milovidov committed Feb 13, 2024
2 parents 773baca + d008ee7 commit 4dc5905
Show file tree
Hide file tree
Showing 3 changed files with 61 additions and 4 deletions.
7 changes: 7 additions & 0 deletions src/Storages/S3Queue/S3QueueTableMetadata.cpp
Expand Up @@ -69,16 +69,23 @@ void S3QueueTableMetadata::read(const String & metadata_str)
{
Poco::JSON::Parser parser;
auto json = parser.parse(metadata_str).extract<Poco::JSON::Object::Ptr>();

after_processing = json->getValue<String>("after_processing");
mode = json->getValue<String>("mode");
s3queue_tracked_files_limit = json->getValue<UInt64>("s3queue_tracked_files_limit");
s3queue_tracked_file_ttl_sec = json->getValue<UInt64>("s3queue_tracked_file_ttl_sec");
format_name = json->getValue<String>("format_name");
columns = json->getValue<String>("columns");

if (json->has("s3queue_total_shards_num"))
s3queue_total_shards_num = json->getValue<UInt64>("s3queue_total_shards_num");
else
s3queue_total_shards_num = 1;

if (json->has("s3queue_processing_threads_num"))
s3queue_processing_threads_num = json->getValue<UInt64>("s3queue_processing_threads_num");
else
s3queue_processing_threads_num = 1;
}

S3QueueTableMetadata S3QueueTableMetadata::parse(const String & metadata_str)
Expand Down
8 changes: 4 additions & 4 deletions src/Storages/S3Queue/S3QueueTableMetadata.h
Expand Up @@ -21,10 +21,10 @@ struct S3QueueTableMetadata
String columns;
String after_processing;
String mode;
UInt64 s3queue_tracked_files_limit;
UInt64 s3queue_tracked_file_ttl_sec;
UInt64 s3queue_total_shards_num;
UInt64 s3queue_processing_threads_num;
UInt64 s3queue_tracked_files_limit = 0;
UInt64 s3queue_tracked_file_ttl_sec = 0;
UInt64 s3queue_total_shards_num = 1;
UInt64 s3queue_processing_threads_num = 1;

S3QueueTableMetadata() = default;
S3QueueTableMetadata(const StorageS3::Configuration & configuration, const S3QueueSettings & engine_settings, const StorageInMemoryMetadata & storage_metadata);
Expand Down
50 changes: 50 additions & 0 deletions tests/integration/test_storage_s3_queue/test.py
Expand Up @@ -101,6 +101,15 @@ def started_cluster():
],
stay_alive=True,
)
cluster.add_instance(
"old_instance",
with_zookeeper=True,
image="clickhouse/clickhouse-server",
tag="23.12",
stay_alive=True,
with_installed_binary=True,
allow_analyzer=False,
)

logging.info("Starting cluster...")
cluster.start()
Expand Down Expand Up @@ -1386,3 +1395,44 @@ def get_count():
break
time.sleep(1)
assert expected_rows == get_count()


def test_upgrade(started_cluster):
node = started_cluster.instances["old_instance"]

table_name = f"test_upgrade"
dst_table_name = f"{table_name}_dst"
keeper_path = f"/clickhouse/test_{table_name}"
files_path = f"{table_name}_data"
files_to_generate = 10

create_table(
started_cluster,
node,
table_name,
"ordered",
files_path,
additional_settings={
"keeper_path": keeper_path,
},
)
total_values = generate_random_files(
started_cluster, files_path, files_to_generate, start_ind=0, row_num=1
)

create_mv(node, table_name, dst_table_name)

def get_count():
return int(node.query(f"SELECT count() FROM {dst_table_name}"))

expected_rows = 10
for _ in range(20):
if expected_rows == get_count():
break
time.sleep(1)

assert expected_rows == get_count()

node.restart_with_latest_version()

assert expected_rows == get_count()

0 comments on commit 4dc5905

Please sign in to comment.