Skip to content

Commit

Permalink
tests: adjust 02980_s3_plain_DROP_TABLE tests
Browse files Browse the repository at this point in the history
Signed-off-by: Azat Khuzhin <a.khuzhin@semrush.com>
  • Loading branch information
azat committed Jan 26, 2024
1 parent 90ab986 commit 332924a
Show file tree
Hide file tree
Showing 4 changed files with 60 additions and 9 deletions.
@@ -1,4 +1,30 @@
data after INSERT 1
data after ATTACH 1
Files before DETACH TABLE
all_1_1_0

backups/ordinary_default/data/ordinary_default/data/all_1_1_0:
primary.cidx
serialization.json
metadata_version.txt
default_compression_codec.txt
data.bin
data.cmrk3
count.txt
columns.txt
checksums.txt

Files after DETACH TABLE
all_1_1_0

backups/ordinary_default/data/ordinary_default/data/all_1_1_0:
primary.cidx
serialization.json
metadata_version.txt
default_compression_codec.txt
data.bin
data.cmrk3
count.txt
columns.txt
checksums.txt

@@ -1,6 +1,7 @@
#!/usr/bin/env bash
# Tags: no-fasttest
# Tags: no-fasttest, no-random-settings, no-random-merge-tree-settings
# Tag no-fasttest: requires S3
# Tag no-random-settings, no-random-merge-tree-settings: to avoid creating extra files like serialization.json, this test too exocit anyway

CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
Expand All @@ -17,23 +18,21 @@ CLICKHOUSE_DATABASE="$new_database"

$CLICKHOUSE_CLIENT -nm -q "
drop table if exists data;
create table data (key Int) engine=MergeTree() order by key settings disk='s3_plain_disk'; -- { serverError TABLE_IS_READ_ONLY }
create table data (key Int) engine=MergeTree() order by key;
insert into data values (1);
select 'data after INSERT', count() from data;
"

# suppress output
$CLICKHOUSE_CLIENT -q "backup table data to S3('http://localhost:11111/test/backups/$CLICKHOUSE_DATABASE', 'test', 'testtest')" > /dev/null
$CLICKHOUSE_CLIENT -q "backup table data to S3('http://localhost:11111/test/s3_plain/backups/$CLICKHOUSE_DATABASE', 'test', 'testtest')" > /dev/null

$CLICKHOUSE_CLIENT -nm -q "
drop table data;
attach table data (key Int) engine=MergeTree() order by key
settings
max_suspicious_broken_parts=0,
disk=disk(type=s3_plain,
endpoint='http://localhost:11111/test/backups/$CLICKHOUSE_DATABASE',
endpoint='http://localhost:11111/test/s3_plain/backups/$CLICKHOUSE_DATABASE',
access_key_id='test',
secret_access_key='testtest');
select 'data after ATTACH', count() from data;
Expand Down
@@ -1,4 +1,30 @@
data after INSERT 1
data after ATTACH 1
Files before DETACH TABLE
all_0_0_0

backups/ordinary_default/data/ordinary_default/data_read/all_0_0_0:
primary.cidx
serialization.json
metadata_version.txt
default_compression_codec.txt
data.bin
data.cmrk3
count.txt
columns.txt
checksums.txt

Files after DETACH TABLE
all_0_0_0

backups/ordinary_default/data/ordinary_default/data_read/all_0_0_0:
primary.cidx
serialization.json
metadata_version.txt
default_compression_codec.txt
data.bin
data.cmrk3
count.txt
columns.txt
checksums.txt

@@ -1,6 +1,7 @@
#!/usr/bin/env bash
# Tags: no-fasttest
# Tags: no-fasttest, no-random-settings, no-random-merge-tree-settings
# Tag no-fasttest: requires S3
# Tag no-random-settings, no-random-merge-tree-settings: to avoid creating extra files like serialization.json, this test too exocit anyway

CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
Expand All @@ -19,7 +20,6 @@ $CLICKHOUSE_CLIENT -nm -q "
drop table if exists data_write;
create table data_write (key Int) engine=ReplicatedMergeTree('/tables/{database}/data', 'write') order by key;
create table data_read (key Int) engine=ReplicatedMergeTree('/tables/{database}/data', 'read') order by key settings disk='s3_plain_disk'; -- { serverError TABLE_IS_READ_ONLY }
create table data_read (key Int) engine=ReplicatedMergeTree('/tables/{database}/data', 'read') order by key;
insert into data_write values (1);
Expand All @@ -28,15 +28,15 @@ $CLICKHOUSE_CLIENT -nm -q "
"

# suppress output
$CLICKHOUSE_CLIENT -q "backup table data_read to S3('http://localhost:11111/test/backups/$CLICKHOUSE_DATABASE', 'test', 'testtest')" > /dev/null
$CLICKHOUSE_CLIENT -q "backup table data_read to S3('http://localhost:11111/test/s3_plain/backups/$CLICKHOUSE_DATABASE', 'test', 'testtest')" > /dev/null

$CLICKHOUSE_CLIENT -nm -q "
drop table data_read;
attach table data_read (key Int) engine=ReplicatedMergeTree('/tables/{database}/data', 'read') order by key
settings
max_suspicious_broken_parts=0,
disk=disk(type=s3_plain,
endpoint='http://localhost:11111/test/backups/$CLICKHOUSE_DATABASE',
endpoint='http://localhost:11111/test/s3_plain/backups/$CLICKHOUSE_DATABASE',
access_key_id='test',
secret_access_key='testtest');
select 'data after ATTACH', count() from data_read;
Expand Down

0 comments on commit 332924a

Please sign in to comment.