Skip to content

Commit

Permalink
fix upload command for S3 when object lock policy turned on, fix #829
Browse files Browse the repository at this point in the history
  • Loading branch information
Slach committed Feb 12, 2024
1 parent daaef48 commit bacb270
Show file tree
Hide file tree
Showing 2 changed files with 15 additions and 11 deletions.
1 change: 1 addition & 0 deletions ChangeLog.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
BUG FIXES
- fix `download` command corner cases for increment backup for tables with projections, fix [830](https://github.com/Altinity/clickhouse-backup/issues/830)
- more informative error during try to `restore` not exists local backup
- fix `upload` command for S3 when object lock policy turned on, fix [829](https://github.com/Altinity/clickhouse-backup/issues/829)

# v2.4.29
IMPROVEMENTS
Expand Down
25 changes: 14 additions & 11 deletions pkg/storage/s3.go
Original file line number Diff line number Diff line change
Expand Up @@ -267,10 +267,11 @@ func (s *S3) GetFileReaderWithLocalPath(ctx context.Context, key, localPath stri

func (s *S3) PutFile(ctx context.Context, key string, r io.ReadCloser) error {
params := s3.PutObjectInput{
Bucket: aws.String(s.Config.Bucket),
Key: aws.String(path.Join(s.Config.Path, key)),
Body: r,
StorageClass: s3types.StorageClass(strings.ToUpper(s.Config.StorageClass)),
Bucket: aws.String(s.Config.Bucket),
Key: aws.String(path.Join(s.Config.Path, key)),
Body: r,
StorageClass: s3types.StorageClass(strings.ToUpper(s.Config.StorageClass)),
ChecksumAlgorithm: s3types.ChecksumAlgorithmCrc32,
}
// ACL shall be optional, fix https://github.com/Altinity/clickhouse-backup/issues/785
if s.Config.ACL != "" {
Expand Down Expand Up @@ -456,10 +457,11 @@ func (s *S3) CopyObject(ctx context.Context, srcSize int64, srcBucket, srcKey, d
// just copy object without multipart
if srcSize < 5*1024*1024*1024 || strings.Contains(s.Config.Endpoint, "storage.googleapis.com") {
params := &s3.CopyObjectInput{
Bucket: aws.String(s.Config.Bucket),
Key: aws.String(dstKey),
CopySource: aws.String(path.Join(srcBucket, srcKey)),
StorageClass: s3types.StorageClass(strings.ToUpper(s.Config.StorageClass)),
Bucket: aws.String(s.Config.Bucket),
Key: aws.String(dstKey),
CopySource: aws.String(path.Join(srcBucket, srcKey)),
StorageClass: s3types.StorageClass(strings.ToUpper(s.Config.StorageClass)),
ChecksumAlgorithm: s3types.ChecksumAlgorithmCrc32,
}
s.enrichCopyObjectParams(params)
_, err := s.client.CopyObject(ctx, params)
Expand All @@ -470,9 +472,10 @@ func (s *S3) CopyObject(ctx context.Context, srcSize int64, srcBucket, srcKey, d
}
// Initiate a multipart upload
createMultipartUploadParams := &s3.CreateMultipartUploadInput{
Bucket: aws.String(s.Config.Bucket),
Key: aws.String(dstKey),
StorageClass: s3types.StorageClass(strings.ToUpper(s.Config.StorageClass)),
Bucket: aws.String(s.Config.Bucket),
Key: aws.String(dstKey),
StorageClass: s3types.StorageClass(strings.ToUpper(s.Config.StorageClass)),
ChecksumAlgorithm: s3types.ChecksumAlgorithmCrc32,
}
s.enrichCreateMultipartUploadParams(createMultipartUploadParams)
initResp, err := s.client.CreateMultipartUpload(ctx, createMultipartUploadParams)
Expand Down

0 comments on commit bacb270

Please sign in to comment.