Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: gcs endpoint should be independently configured with credential #837

Merged
merged 3 commits into from
Feb 20, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions ReadMe.md
Original file line number Diff line number Diff line change
Expand Up @@ -529,6 +529,7 @@ gcs:
credentials_file: "" # GCS_CREDENTIALS_FILE
credentials_json: "" # GCS_CREDENTIALS_JSON
credentials_json_encoded: "" # GCS_CREDENTIALS_JSON_ENCODED
endpoint: "" # GCS_ENDPOINT, use it for custom GCS endpoint/compatible storage. For example, when using custom endpoint via private service connect
bucket: "" # GCS_BUCKET
path: "" # GCS_PATH, `system.macros` values can be applied as {macro_name}
object_disk_path: "" # GCS_OBJECT_DISK_PATH, path for backup of part from `s3` object disk (clickhouse support only gcs over s3 protocol), if disk present, then shall not be zero and shall not be prefixed by `path`
Expand Down
7 changes: 5 additions & 2 deletions pkg/storage/gcs.go
Original file line number Diff line number Diff line change
Expand Up @@ -90,15 +90,18 @@ func (gcs *GCS) Connect(ctx context.Context) error {

if gcs.Config.Endpoint != "" {
endpoint = gcs.Config.Endpoint
clientOptions = append([]option.ClientOption{option.WithoutAuthentication()}, clientOptions...)
clientOptions = append(clientOptions, option.WithEndpoint(endpoint))
} else if gcs.Config.CredentialsJSON != "" {
}

if gcs.Config.CredentialsJSON != "" {
clientOptions = append(clientOptions, option.WithCredentialsJSON([]byte(gcs.Config.CredentialsJSON)))
} else if gcs.Config.CredentialsJSONEncoded != "" {
d, _ := base64.StdEncoding.DecodeString(gcs.Config.CredentialsJSONEncoded)
clientOptions = append(clientOptions, option.WithCredentialsJSON(d))
} else if gcs.Config.CredentialsFile != "" {
clientOptions = append(clientOptions, option.WithCredentialsFile(gcs.Config.CredentialsFile))
} else {
clientOptions = append(clientOptions, option.WithoutAuthentication())
}

if gcs.Config.ForceHttp {
Expand Down
29 changes: 29 additions & 0 deletions test/integration/config-gcs-custom-endpoint.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
general:
disable_progress_bar: true
remote_storage: gcs
upload_concurrency: 4
download_concurrency: 4
skip_tables:
- " system.*"
- "INFORMATION_SCHEMA.*"
- "information_schema.*"
- "_temporary_and_external_tables.*"
restore_schema_on_cluster: "{cluster}"
clickhouse:
host: clickhouse
port: 9440
username: backup
password: meow=& 123?*%# МЯУ
secure: true
skip_verify: true
sync_replicated_tables: true
timeout: 5s
restart_command: "sql:SYSTEM RELOAD USERS; sql:SYSTEM RELOAD CONFIG; exec:ls -la /var/lib/clickhouse/access; sql:SYSTEM SHUTDOWN"
# restart_command: bash -c 'echo "FAKE RESTART"'
backup_mutations: true
gcs:
bucket: altinity-qa-test
path: backup/{cluster}/{shard}
object_disk_path: object_disks/{cluster}/{shard}
compression_format: tar
endpoint: http://gcs:8080/storage/v1/
31 changes: 14 additions & 17 deletions test/integration/docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -43,17 +43,19 @@ services:
- clickhouse-backup

# todo need to reproduce download after upload
# gcs:
# image: fsouza/fake-gcs-server:latest
# hostname: gcs
# entrypoint:
# - /bin/sh
# command:
# - -c
# - "mkdir -p /data/clickhouse-backup-test-gcs && fake-gcs-server -data /data -scheme http -port 8080 -public-host gsc:8080"
# networks:
# - clickhouse-backup

gcs:
image: fsouza/fake-gcs-server:latest
hostname: gcs
container_name: gcs
entrypoint:
- /bin/sh
command:
- -c
- "mkdir -p /data/altinity-qa-test && mkdir -p /data/${QA_GCS_OVER_S3_BUCKET} && fake-gcs-server -data /data -scheme http -port 8080 -public-host gcs:8080"
networks:
- clickhouse-backup
environment:
QA_GCS_OVER_S3_BUCKET: "${QA_GCS_OVER_S3_BUCKET}"

azure:
image: mcr.microsoft.com/azure-storage/azurite:latest
Expand Down Expand Up @@ -126,9 +128,6 @@ services:
AZBLOB_DEBUG: "${AZBLOB_DEBUG:-false}"
CLICKHOUSE_DEBUG: "${CLICKHOUSE_DEBUG:-false}"
GOCOVERDIR: "/tmp/_coverage_/"
# fake-gcs-server
# STORAGE_EMULATOR_HOST: "http://gsc:8080"
# GOOGLE_API_USE_CLIENT_CERTIFICATE: "false"
# FIPS
QA_AWS_ACCESS_KEY: ${QA_AWS_ACCESS_KEY}
QA_AWS_SECRET_KEY: ${QA_AWS_SECRET_KEY}
Expand Down Expand Up @@ -166,9 +165,6 @@ services:
AZBLOB_DEBUG: "${AZBLOB_DEBUG:-false}"
CLICKHOUSE_DEBUG: "${CLICKHOUSE_DEBUG:-false}"
GOCOVERDIR: "/tmp/_coverage_/"
# fake-gcs-server
# STORAGE_EMULATOR_HOST: "http://gsc:8080"
# GOOGLE_API_USE_CLIENT_CERTIFICATE: "false"
# FIPS
QA_AWS_ACCESS_KEY: ${QA_AWS_ACCESS_KEY}
QA_AWS_SECRET_KEY: ${QA_AWS_SECRET_KEY}
Expand Down Expand Up @@ -196,6 +192,7 @@ services:
- ./config-ftp.yaml:/etc/clickhouse-backup/config-ftp.yaml
- ./config-ftp-old.yaml:/etc/clickhouse-backup/config-ftp-old.yaml
- ./config-gcs.yml:/etc/clickhouse-backup/config-gcs.yml
- ./config-gcs-custom-endpoint.yml:/etc/clickhouse-backup/config-gcs-custom-endpoint.yml
- ./config-s3.yml:/etc/clickhouse-backup/config-s3.yml
- ./config-s3-embedded.yml:/etc/clickhouse-backup/config-s3-embedded.yml
- ./config-s3-fips.yml:/etc/clickhouse-backup/config-s3-fips.yml.template
Expand Down
31 changes: 14 additions & 17 deletions test/integration/docker-compose_advanced.yml
Original file line number Diff line number Diff line change
Expand Up @@ -57,17 +57,19 @@ services:
- clickhouse-backup

# todo need to reproduce download after upload
# gcs:
# image: fsouza/fake-gcs-server:latest
# hostname: gcs
# entrypoint:
# - /bin/sh
# command:
# - -c
# - "mkdir -p /data/clickhouse-backup-test-gcs && fake-gcs-server -data /data -scheme http -port 8080 -public-host gsc:8080"
# networks:
# - clickhouse-backup

gcs:
image: fsouza/fake-gcs-server:latest
hostname: gcs
container_name: gcs
entrypoint:
- /bin/sh
command:
- -c
- "mkdir -p /data/altinity-qa-test && mkdir -p /data/${QA_GCS_OVER_S3_BUCKET} && fake-gcs-server -data /data -scheme http -port 8080 -public-host gcs:8080"
networks:
- clickhouse-backup
environment:
QA_GCS_OVER_S3_BUCKET: "${QA_GCS_OVER_S3_BUCKET}"

azure:
image: mcr.microsoft.com/azure-storage/azurite:latest
Expand Down Expand Up @@ -177,9 +179,6 @@ services:
AZBLOB_DEBUG: "${AZBLOB_DEBUG:-false}"
CLICKHOUSE_DEBUG: "${CLICKHOUSE_DEBUG:-false}"
GOCOVERDIR: "/tmp/_coverage_/"
# fake-gcs-server
# STORAGE_EMULATOR_HOST: "http://gsc:8080"
# GOOGLE_API_USE_CLIENT_CERTIFICATE: "false"
# FIPS
QA_AWS_ACCESS_KEY: ${QA_AWS_ACCESS_KEY}
QA_AWS_SECRET_KEY: ${QA_AWS_SECRET_KEY}
Expand Down Expand Up @@ -217,9 +216,6 @@ services:
AZBLOB_DEBUG: "${AZBLOB_DEBUG:-false}"
CLICKHOUSE_DEBUG: "${CLICKHOUSE_DEBUG:-false}"
GOCOVERDIR: "/tmp/_coverage_/"
# fake-gcs-server
# STORAGE_EMULATOR_HOST: "http://gsc:8080"
# GOOGLE_API_USE_CLIENT_CERTIFICATE: "false"
# FIPS
QA_AWS_ACCESS_KEY: ${QA_AWS_ACCESS_KEY}
QA_AWS_SECRET_KEY: ${QA_AWS_SECRET_KEY}
Expand Down Expand Up @@ -254,6 +250,7 @@ services:
- ./config-ftp.yaml:/etc/clickhouse-backup/config-ftp.yaml
- ./config-ftp-old.yaml:/etc/clickhouse-backup/config-ftp-old.yaml
- ./config-gcs.yml:/etc/clickhouse-backup/config-gcs.yml
- ./config-gcs-custom-endpoint.yml:/etc/clickhouse-backup/config-gcs-custom-endpoint.yml
- ./config-s3.yml:/etc/clickhouse-backup/config-s3.yml
- ./config-s3-embedded.yml:/etc/clickhouse-backup/config-s3-embedded.yml
- ./config-s3-fips.yml:/etc/clickhouse-backup/config-s3-fips.yml.template
Expand Down
Empty file modified test/integration/dynamic_settings.sh
100644 → 100755
Empty file.
15 changes: 11 additions & 4 deletions test/integration/integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1720,6 +1720,15 @@ func TestIntegrationGCS(t *testing.T) {
runMainIntegrationScenario(t, "GCS", "config-gcs.yml")
}

func TestIntegrationGCSWithCustomEndpoint(t *testing.T) {
if isTestShouldSkip("GCS_TESTS") {
t.Skip("Skipping GCS integration tests...")
return
}
//t.Parallel()
runMainIntegrationScenario(t, "GCS_EMULATOR", "config-gcs-custom-endpoint.yml")
}

func TestIntegrationSFTPAuthPassword(t *testing.T) {
//t.Parallel()
runMainIntegrationScenario(t, "SFTP", "config-sftp-auth-password.yaml")
Expand Down Expand Up @@ -2110,7 +2119,6 @@ func checkObjectStorageIsEmpty(t *testing.T, r *require.Assertions, remoteStorag
}
if remoteStorageType == "SFTP" {
checkRemoteDir("total 0", "sshd", "bash", "-c", "ls -lh /root/")

}
if remoteStorageType == "FTP" {
if strings.Contains(os.Getenv("COMPOSE_FILE"), "advanced") {
Expand All @@ -2119,9 +2127,8 @@ func checkObjectStorageIsEmpty(t *testing.T, r *require.Assertions, remoteStorag
checkRemoteDir("total 0", "ftp", "bash", "-c", "ls -lh /home/vsftpd/test_backup/backup/")
}
}
//todo check gcs backup is empty
if remoteStorageType == "GCS" {

if remoteStorageType == "GCS_EMULATOR" {
checkRemoteDir("total 0", "gcs", "sh", "-c", "ls -lh /data/altinity-qa-test/")
}
}

Expand Down
Loading