From 6e23540fe4d633a4f4f7049ca1be80defb290f0a Mon Sep 17 00:00:00 2001 From: Katharina Przybill <30441792+kathap@users.noreply.github.com> Date: Thu, 20 Nov 2025 09:38:56 +0100 Subject: [PATCH 1/4] Implement missing storage-cli operations for AliCloud Missing Operations: copy, delete-recursive, list, properties, ensure-bucket-exists. --- alioss/client/client.go | 24 +++ alioss/client/storage_client.go | 234 ++++++++++++++++++++++++- alioss/integration/general_ali_test.go | 163 +++++++++++++++++ alioss/main.go | 64 ++++++- 4 files changed, 480 insertions(+), 5 deletions(-) diff --git a/alioss/client/client.go b/alioss/client/client.go index f48d2ce..7a78fab 100644 --- a/alioss/client/client.go +++ b/alioss/client/client.go @@ -41,6 +41,11 @@ func (client *AliBlobstore) Delete(object string) error { return client.storageClient.Delete(object) } +func (client *AliBlobstore) DeleteRecursive(prefix string) error { + + return client.storageClient.DeleteRecursive(prefix) +} + func (client *AliBlobstore) Exists(object string) (bool, error) { return client.storageClient.Exists(object) } @@ -75,3 +80,22 @@ func (client *AliBlobstore) getMD5(filePath string) (string, error) { return md5, nil } + +func (client *AliBlobstore) List(prefix string) ([]string, error) { + return client.storageClient.List(prefix) +} + +func (client *AliBlobstore) Copy(srcBlob string, dstBlob string) error { + + return client.storageClient.Copy(srcBlob, dstBlob) +} + +func (client *AliBlobstore) Properties(dest string) error { + + return client.storageClient.Properties(dest) +} + +func (client *AliBlobstore) EnsureBucketExists() error { + + return client.storageClient.EnsureBucketExists() +} diff --git a/alioss/client/storage_client.go b/alioss/client/storage_client.go index 2813512..8e447c4 100644 --- a/alioss/client/storage_client.go +++ b/alioss/client/storage_client.go @@ -1,7 +1,13 @@ package client import ( + "encoding/json" + "errors" + "fmt" "log" + "strconv" + "strings" + "time" "github.com/aliyun/aliyun-oss-go-sdk/oss" "github.com/cloudfoundry/storage-cli/alioss/config" @@ -20,10 +26,19 @@ type StorageClient interface { destinationFilePath string, ) error + Copy( + srcBlob string, + destBlob string, + ) error + Delete( object string, ) error + DeleteRecursive( + dest string, + ) error + Exists( object string, ) (bool, error) @@ -37,14 +52,49 @@ type StorageClient interface { object string, expiredInSec int64, ) (string, error) -} + List( + prefix string, + ) ([]string, error) + + Properties( + dest string, + ) error + + EnsureBucketExists() error +} type DefaultStorageClient struct { storageConfig config.AliStorageConfig + client *oss.Client + bucket *oss.Bucket + bucketURL string } func NewStorageClient(storageConfig config.AliStorageConfig) (StorageClient, error) { - return DefaultStorageClient{storageConfig: storageConfig}, nil + client, err := oss.New( + storageConfig.Endpoint, + storageConfig.AccessKeyID, + storageConfig.AccessKeySecret, + ) + if err != nil { + return nil, err + } + + bucket, err := client.Bucket(storageConfig.BucketName) + if err != nil { + return nil, err + } + + endpoint := strings.TrimPrefix(storageConfig.Endpoint, "https://") + endpoint = strings.TrimPrefix(endpoint, "http://") + bucketURL := fmt.Sprintf("https://%s.%s", storageConfig.BucketName, endpoint) + + return DefaultStorageClient{ + storageConfig: storageConfig, + client: client, + bucket: bucket, + bucketURL: bucketURL, + }, nil } func (dsc DefaultStorageClient) Upload( @@ -86,6 +136,19 @@ func (dsc DefaultStorageClient) Download( return bucket.GetObjectToFile(sourceObject, destinationFilePath) } +func (dsc DefaultStorageClient) Copy( + srcObject string, + destObject string, +) error { + log.Printf("Copying object from %s to %s", srcObject, destObject) + + if _, err := dsc.bucket.CopyObject(srcObject, destObject); err != nil { + return fmt.Errorf("failed to copy object from %s to %s: %w", srcObject, destObject, err) + } + + return nil +} + func (dsc DefaultStorageClient) Delete( object string, ) error { @@ -104,6 +167,49 @@ func (dsc DefaultStorageClient) Delete( return bucket.DeleteObject(object) } +func (dsc DefaultStorageClient) DeleteRecursive( + prefix string, +) error { + if prefix != "" { + log.Printf("Deleting all objects in bucket %s with prefix '%s'\n", + dsc.storageConfig.BucketName, prefix) + } else { + log.Printf("Deleting all objects in bucket %s\n", + dsc.storageConfig.BucketName) + } + + marker := "" + + for { + var listOptions []oss.Option + if prefix != "" { + listOptions = append(listOptions, oss.Prefix(prefix)) + } + if marker != "" { + listOptions = append(listOptions, oss.Marker(marker)) + } + + resp, err := dsc.bucket.ListObjects(listOptions...) + if err != nil { + return fmt.Errorf("error listing objects: %w", err) + } + + for _, object := range resp.Objects { + if err := dsc.bucket.DeleteObject(object.Key); err != nil { + log.Printf("Failed to delete object %s: %v\n", object.Key, err) + } + } + + if !resp.IsTruncated { + break + } + + marker = resp.NextMarker + } + + return nil +} + func (dsc DefaultStorageClient) Exists(object string) (bool, error) { log.Printf("Checking if blob: %s/%s\n", dsc.storageConfig.BucketName, object) @@ -170,3 +276,127 @@ func (dsc DefaultStorageClient) SignedUrlGet( return bucket.SignURL(object, oss.HTTPGet, expiredInSec) } + +func (dsc DefaultStorageClient) List( + prefix string, +) ([]string, error) { + if prefix != "" { + log.Printf("Listing objects in bucket %s with prefix '%s'\n", + dsc.storageConfig.BucketName, prefix) + } else { + log.Printf("Listing objects in bucket %s\n", dsc.storageConfig.BucketName) + } + + var ( + objects []string + marker string + ) + + for { + var opts []oss.Option + if prefix != "" { + opts = append(opts, oss.Prefix(prefix)) + } + if marker != "" { + opts = append(opts, oss.Marker(marker)) + } + + resp, err := dsc.bucket.ListObjects(opts...) + if err != nil { + return nil, fmt.Errorf("error retrieving page of objects: %w", err) + } + + for _, obj := range resp.Objects { + objects = append(objects, obj.Key) + } + + if !resp.IsTruncated { + break + } + marker = resp.NextMarker + } + + return objects, nil +} + +type BlobProperties struct { + ETag string `json:"etag,omitempty"` + LastModified time.Time `json:"last_modified,omitempty"` + ContentLength int64 `json:"content_length,omitempty"` +} + +func (dsc DefaultStorageClient) Properties( + dest string, +) error { + log.Printf("Getting properties for object %s/%s\n", + dsc.storageConfig.BucketName, dest) + + meta, err := dsc.bucket.GetObjectDetailedMeta(dest) + if err != nil { + var ossErr oss.ServiceError + if errors.As(err, &ossErr) && ossErr.StatusCode == 404 { + fmt.Println(`{}`) + return nil + } + + return fmt.Errorf("failed to get properties for object %s: %w", dest, err) + } + + eTag := meta.Get("ETag") + lastModifiedStr := meta.Get("Last-Modified") + contentLengthStr := meta.Get("Content-Length") + + var ( + lastModified time.Time + contentLength int64 + ) + + if lastModifiedStr != "" { + t, parseErr := time.Parse(time.RFC1123, lastModifiedStr) + if parseErr == nil { + lastModified = t + } + } + + if contentLengthStr != "" { + cl, convErr := strconv.ParseInt(contentLengthStr, 10, 64) + if convErr == nil { + contentLength = cl + } + } + + props := BlobProperties{ + ETag: strings.Trim(eTag, `"`), + LastModified: lastModified, + ContentLength: contentLength, + } + + output, err := json.MarshalIndent(props, "", " ") + if err != nil { + return fmt.Errorf("failed to marshal object properties: %w", err) + } + + fmt.Println(string(output)) + return nil +} + +func (dsc DefaultStorageClient) EnsureBucketExists() error { + log.Printf("Ensuring bucket '%s' exists\n", dsc.storageConfig.BucketName) + + exists, err := dsc.client.IsBucketExist(dsc.storageConfig.BucketName) + if err != nil { + return fmt.Errorf("failed to check if bucket exists: %w", err) + } + + if exists { + log.Printf("Bucket '%s' already exists\n", dsc.storageConfig.BucketName) + return nil + } + + if err := dsc.client.CreateBucket(dsc.storageConfig.BucketName); err != nil { + return fmt.Errorf("failed to create bucket '%s': %w", dsc.storageConfig.BucketName, err) + } + + log.Printf("Bucket '%s' created successfully\n", dsc.storageConfig.BucketName) + return nil +} diff --git a/alioss/integration/general_ali_test.go b/alioss/integration/general_ali_test.go index 7a25640..76932af 100644 --- a/alioss/integration/general_ali_test.go +++ b/alioss/integration/general_ali_test.go @@ -150,6 +150,105 @@ var _ = Describe("General testing for all Ali regions", func() { }) }) + Describe("Invoking `delete-recursive`", func() { + It("deletes all objects with a given prefix", func() { + prefix := integration.GenerateRandomString() + blob1 := prefix + "/a" + blob2 := prefix + "/b" + otherBlob := integration.GenerateRandomString() + + // Create a temp file for uploads + contentFile1 := integration.MakeContentFile("content-1") + contentFile2 := integration.MakeContentFile("content-2") + contentFileOther := integration.MakeContentFile("other-content") + defer func() { + _ = os.Remove(contentFile1) //nolint:errcheck + _ = os.Remove(contentFile2) //nolint:errcheck + _ = os.Remove(contentFileOther) //nolint:errcheck + // make sure all are gone + for _, b := range []string{blob1, blob2, otherBlob} { + cliSession, err := integration.RunCli(cliPath, configPath, "delete", b) + if err == nil && (cliSession.ExitCode() == 0 || cliSession.ExitCode() == 3) { + continue + } + } + }() + + // Put three blobs + cliSession, err := integration.RunCli(cliPath, configPath, "put", contentFile1, blob1) + Expect(err).ToNot(HaveOccurred()) + Expect(cliSession.ExitCode()).To(BeZero()) + + cliSession, err = integration.RunCli(cliPath, configPath, "put", contentFile2, blob2) + Expect(err).ToNot(HaveOccurred()) + Expect(cliSession.ExitCode()).To(BeZero()) + + cliSession, err = integration.RunCli(cliPath, configPath, "put", contentFileOther, otherBlob) + Expect(err).ToNot(HaveOccurred()) + Expect(cliSession.ExitCode()).To(BeZero()) + + // Call delete-recursive + cliSession, err = integration.RunCli(cliPath, configPath, "delete-recursive", prefix) + Expect(err).ToNot(HaveOccurred()) + Expect(cliSession.ExitCode()).To(BeZero()) + + // Objects with prefix should be gone (exit code 3) + cliSession, err = integration.RunCli(cliPath, configPath, "exists", blob1) + Expect(err).ToNot(HaveOccurred()) + Expect(cliSession.ExitCode()).To(Equal(3)) + + cliSession, err = integration.RunCli(cliPath, configPath, "exists", blob2) + Expect(err).ToNot(HaveOccurred()) + Expect(cliSession.ExitCode()).To(Equal(3)) + + // Other blob should still exist (exit code 0) + cliSession, err = integration.RunCli(cliPath, configPath, "exists", otherBlob) + Expect(err).ToNot(HaveOccurred()) + Expect(cliSession.ExitCode()).To(Equal(0)) + }) + }) + + Describe("Invoking `copy`", func() { + It("copies the contents from one object to another", func() { + srcBlob := blobName + "-src" + destBlob := blobName + "-dest" + + // Clean up both at the end + defer func() { + cliSession, err := integration.RunCli(cliPath, configPath, "delete", srcBlob) + Expect(err).ToNot(HaveOccurred()) + Expect(cliSession.ExitCode()).To(BeZero()) + + cliSession, err = integration.RunCli(cliPath, configPath, "delete", destBlob) + Expect(err).ToNot(HaveOccurred()) + Expect(cliSession.ExitCode()).To(BeZero()) + }() + + // Upload source + contentFile = integration.MakeContentFile("copied content") + cliSession, err := integration.RunCli(cliPath, configPath, "put", contentFile, srcBlob) + Expect(err).ToNot(HaveOccurred()) + Expect(cliSession.ExitCode()).To(BeZero()) + + // Invoke copy + cliSession, err = integration.RunCli(cliPath, configPath, "copy", srcBlob, destBlob) + Expect(err).ToNot(HaveOccurred()) + Expect(cliSession.ExitCode()).To(BeZero()) + + // Download destination and verify content + tmpLocalFile, _ := os.CreateTemp("", "ali-storage-cli-copy") //nolint:errcheck + tmpLocalFile.Close() //nolint:errcheck + defer func() { _ = os.Remove(tmpLocalFile.Name()) }() //nolint:errcheck + + cliSession, err = integration.RunCli(cliPath, configPath, "get", destBlob, tmpLocalFile.Name()) + Expect(err).ToNot(HaveOccurred()) + Expect(cliSession.ExitCode()).To(BeZero()) + + gottenBytes, _ := os.ReadFile(tmpLocalFile.Name()) //nolint:errcheck + Expect(string(gottenBytes)).To(Equal("copied content")) + }) + }) + Describe("Invoking `exists`", func() { It("returns 0 for an existing blob", func() { defer func() { @@ -210,4 +309,68 @@ var _ = Describe("General testing for all Ali regions", func() { Expect(consoleOutput).To(ContainSubstring("version")) }) }) + + Describe("Invoking `list`", func() { + It("lists all blobs with a given prefix", func() { + prefix := integration.GenerateRandomString() + blob1 := prefix + "/a" + blob2 := prefix + "/b" + otherBlob := integration.GenerateRandomString() + + defer func() { + for _, b := range []string{blob1, blob2, otherBlob} { + _, err := integration.RunCli(cliPath, configPath, "delete", b) + Expect(err).ToNot(HaveOccurred()) + } + }() + + contentFile1 := integration.MakeContentFile("list-1") + contentFile2 := integration.MakeContentFile("list-2") + contentFileOther := integration.MakeContentFile("list-other") + defer func() { + _ = os.Remove(contentFile1) //nolint:errcheck + _ = os.Remove(contentFile2) //nolint:errcheck + _ = os.Remove(contentFileOther) //nolint:errcheck + }() + + // Put blobs + cliSession, err := integration.RunCli(cliPath, configPath, "put", contentFile1, blob1) + Expect(err).ToNot(HaveOccurred()) + Expect(cliSession.ExitCode()).To(BeZero()) + + cliSession, err = integration.RunCli(cliPath, configPath, "put", contentFile2, blob2) + Expect(err).ToNot(HaveOccurred()) + Expect(cliSession.ExitCode()).To(BeZero()) + + cliSession, err = integration.RunCli(cliPath, configPath, "put", contentFileOther, otherBlob) + Expect(err).ToNot(HaveOccurred()) + Expect(cliSession.ExitCode()).To(BeZero()) + + // List with prefix + cliSession, err = integration.RunCli(cliPath, configPath, "list", prefix) + Expect(err).ToNot(HaveOccurred()) + Expect(cliSession.ExitCode()).To(BeZero()) + + output := bytes.NewBuffer(cliSession.Out.Contents()).String() + + Expect(output).To(ContainSubstring(blob1)) + Expect(output).To(ContainSubstring(blob2)) + Expect(output).NotTo(ContainSubstring(otherBlob)) + }) + }) + + Describe("Invoking `ensure-bucket-exists`", func() { + It("is idempotent", func() { + // first run + s1, err := integration.RunCli(cliPath, configPath, "ensure-bucket-exists") + Expect(err).ToNot(HaveOccurred()) + Expect(s1.ExitCode()).To(BeZero()) + + // second run should also succeed (bucket already exists) + s2, err := integration.RunCli(cliPath, configPath, "ensure-bucket-exists") + Expect(err).ToNot(HaveOccurred()) + Expect(s2.ExitCode()).To(BeZero()) + }) + }) + }) diff --git a/alioss/main.go b/alioss/main.go index 348de6d..92dfa06 100644 --- a/alioss/main.go +++ b/alioss/main.go @@ -45,10 +45,9 @@ func main() { } nonFlagArgs := flag.Args() - if len(nonFlagArgs) < 2 { - log.Fatalf("Expected at least two arguments got %d\n", len(nonFlagArgs)) + if len(nonFlagArgs) == 0 { + log.Fatalf("Expected at least one argument (command), got %d\n", len(nonFlagArgs)) } - cmd := nonFlagArgs[0] switch cmd { @@ -75,6 +74,16 @@ func main() { err = blobstoreClient.Get(source, destinationFilePath) fatalLog(cmd, err) + case "copy": + if len(nonFlagArgs) != 3 { + log.Fatalf("Get method expected 3 arguments got %d\n", len(nonFlagArgs)) + } + + srcBlob, dstBlob := nonFlagArgs[1], nonFlagArgs[2] + + err = blobstoreClient.Copy(srcBlob, dstBlob) + fatalLog(cmd, err) + case "delete": if len(nonFlagArgs) != 2 { log.Fatalf("Delete method expected 2 arguments got %d\n", len(nonFlagArgs)) @@ -83,6 +92,18 @@ func main() { err = blobstoreClient.Delete(nonFlagArgs[1]) fatalLog(cmd, err) + case "delete-recursive": + var prefix string + if len(nonFlagArgs) > 2 { + log.Fatalf("delete-recursive takes at most one argument (prefix) got %d\n", len(nonFlagArgs)-1) + } else if len(nonFlagArgs) == 2 { + prefix = nonFlagArgs[1] + } else { + prefix = "" + } + err = blobstoreClient.DeleteRecursive(prefix) + fatalLog("delete-recursive", err) + case "exists": if len(nonFlagArgs) != 2 { log.Fatalf("Exists method expected 2 arguments got %d\n", len(nonFlagArgs)) @@ -123,6 +144,43 @@ func main() { fmt.Println(signedURL) os.Exit(0) + case "list": + var prefix string + + if len(nonFlagArgs) == 1 { + prefix = "" + } else if len(nonFlagArgs) == 2 { + prefix = nonFlagArgs[1] + } else { + log.Fatalf("List method expected 1 or 2 arguments, got %d\n", len(nonFlagArgs)-1) + } + + var objects []string + objects, err = blobstoreClient.List(prefix) + if err != nil { + log.Fatalf("Failed to list objects: %s", err) + } + + for _, object := range objects { + fmt.Println(object) + } + + case "properties": + if len(nonFlagArgs) != 2 { + log.Fatalf("Properties method expected 2 arguments got %d\n", len(nonFlagArgs)) + } + + err = blobstoreClient.Properties(nonFlagArgs[1]) + fatalLog("properties", err) + + case "ensure-bucket-exists": + if len(nonFlagArgs) != 1 { + log.Fatalf("EnsureBucketExists method expected 1 arguments got %d\n", len(nonFlagArgs)) + } + + err = blobstoreClient.EnsureBucketExists() + fatalLog("ensure-bucket-exists", err) + default: log.Fatalf("unknown command: '%s'\n", cmd) } From 09538fef53636a1dadbac6d7339f4ba3b9b12563 Mon Sep 17 00:00:00 2001 From: Katharina Przybill <30441792+kathap@users.noreply.github.com> Date: Thu, 20 Nov 2025 10:25:05 +0100 Subject: [PATCH 2/4] add fake storage client --- .../client/clientfakes/fake_storage_client.go | 370 +++++++++++++++++- 1 file changed, 358 insertions(+), 12 deletions(-) diff --git a/alioss/client/clientfakes/fake_storage_client.go b/alioss/client/clientfakes/fake_storage_client.go index b963907..2630565 100644 --- a/alioss/client/clientfakes/fake_storage_client.go +++ b/alioss/client/clientfakes/fake_storage_client.go @@ -8,6 +8,18 @@ import ( ) type FakeStorageClient struct { + CopyStub func(string, string) error + copyMutex sync.RWMutex + copyArgsForCall []struct { + arg1 string + arg2 string + } + copyReturns struct { + result1 error + } + copyReturnsOnCall map[int]struct { + result1 error + } DeleteStub func(string) error deleteMutex sync.RWMutex deleteArgsForCall []struct { @@ -19,6 +31,17 @@ type FakeStorageClient struct { deleteReturnsOnCall map[int]struct { result1 error } + DeleteRecursiveStub func(string) error + deleteRecursiveMutex sync.RWMutex + deleteRecursiveArgsForCall []struct { + arg1 string + } + deleteRecursiveReturns struct { + result1 error + } + deleteRecursiveReturnsOnCall map[int]struct { + result1 error + } DownloadStub func(string, string) error downloadMutex sync.RWMutex downloadArgsForCall []struct { @@ -31,6 +54,16 @@ type FakeStorageClient struct { downloadReturnsOnCall map[int]struct { result1 error } + EnsureBucketExistsStub func() error + ensureBucketExistsMutex sync.RWMutex + ensureBucketExistsArgsForCall []struct { + } + ensureBucketExistsReturns struct { + result1 error + } + ensureBucketExistsReturnsOnCall map[int]struct { + result1 error + } ExistsStub func(string) (bool, error) existsMutex sync.RWMutex existsArgsForCall []struct { @@ -44,6 +77,30 @@ type FakeStorageClient struct { result1 bool result2 error } + ListStub func(string) ([]string, error) + listMutex sync.RWMutex + listArgsForCall []struct { + arg1 string + } + listReturns struct { + result1 []string + result2 error + } + listReturnsOnCall map[int]struct { + result1 []string + result2 error + } + PropertiesStub func(string) error + propertiesMutex sync.RWMutex + propertiesArgsForCall []struct { + arg1 string + } + propertiesReturns struct { + result1 error + } + propertiesReturnsOnCall map[int]struct { + result1 error + } SignedUrlGetStub func(string, int64) (string, error) signedUrlGetMutex sync.RWMutex signedUrlGetArgsForCall []struct { @@ -89,6 +146,68 @@ type FakeStorageClient struct { invocationsMutex sync.RWMutex } +func (fake *FakeStorageClient) Copy(arg1 string, arg2 string) error { + fake.copyMutex.Lock() + ret, specificReturn := fake.copyReturnsOnCall[len(fake.copyArgsForCall)] + fake.copyArgsForCall = append(fake.copyArgsForCall, struct { + arg1 string + arg2 string + }{arg1, arg2}) + stub := fake.CopyStub + fakeReturns := fake.copyReturns + fake.recordInvocation("Copy", []interface{}{arg1, arg2}) + fake.copyMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeStorageClient) CopyCallCount() int { + fake.copyMutex.RLock() + defer fake.copyMutex.RUnlock() + return len(fake.copyArgsForCall) +} + +func (fake *FakeStorageClient) CopyCalls(stub func(string, string) error) { + fake.copyMutex.Lock() + defer fake.copyMutex.Unlock() + fake.CopyStub = stub +} + +func (fake *FakeStorageClient) CopyArgsForCall(i int) (string, string) { + fake.copyMutex.RLock() + defer fake.copyMutex.RUnlock() + argsForCall := fake.copyArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *FakeStorageClient) CopyReturns(result1 error) { + fake.copyMutex.Lock() + defer fake.copyMutex.Unlock() + fake.CopyStub = nil + fake.copyReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeStorageClient) CopyReturnsOnCall(i int, result1 error) { + fake.copyMutex.Lock() + defer fake.copyMutex.Unlock() + fake.CopyStub = nil + if fake.copyReturnsOnCall == nil { + fake.copyReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.copyReturnsOnCall[i] = struct { + result1 error + }{result1} +} + func (fake *FakeStorageClient) Delete(arg1 string) error { fake.deleteMutex.Lock() ret, specificReturn := fake.deleteReturnsOnCall[len(fake.deleteArgsForCall)] @@ -150,6 +269,67 @@ func (fake *FakeStorageClient) DeleteReturnsOnCall(i int, result1 error) { }{result1} } +func (fake *FakeStorageClient) DeleteRecursive(arg1 string) error { + fake.deleteRecursiveMutex.Lock() + ret, specificReturn := fake.deleteRecursiveReturnsOnCall[len(fake.deleteRecursiveArgsForCall)] + fake.deleteRecursiveArgsForCall = append(fake.deleteRecursiveArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.DeleteRecursiveStub + fakeReturns := fake.deleteRecursiveReturns + fake.recordInvocation("DeleteRecursive", []interface{}{arg1}) + fake.deleteRecursiveMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeStorageClient) DeleteRecursiveCallCount() int { + fake.deleteRecursiveMutex.RLock() + defer fake.deleteRecursiveMutex.RUnlock() + return len(fake.deleteRecursiveArgsForCall) +} + +func (fake *FakeStorageClient) DeleteRecursiveCalls(stub func(string) error) { + fake.deleteRecursiveMutex.Lock() + defer fake.deleteRecursiveMutex.Unlock() + fake.DeleteRecursiveStub = stub +} + +func (fake *FakeStorageClient) DeleteRecursiveArgsForCall(i int) string { + fake.deleteRecursiveMutex.RLock() + defer fake.deleteRecursiveMutex.RUnlock() + argsForCall := fake.deleteRecursiveArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeStorageClient) DeleteRecursiveReturns(result1 error) { + fake.deleteRecursiveMutex.Lock() + defer fake.deleteRecursiveMutex.Unlock() + fake.DeleteRecursiveStub = nil + fake.deleteRecursiveReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeStorageClient) DeleteRecursiveReturnsOnCall(i int, result1 error) { + fake.deleteRecursiveMutex.Lock() + defer fake.deleteRecursiveMutex.Unlock() + fake.DeleteRecursiveStub = nil + if fake.deleteRecursiveReturnsOnCall == nil { + fake.deleteRecursiveReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.deleteRecursiveReturnsOnCall[i] = struct { + result1 error + }{result1} +} + func (fake *FakeStorageClient) Download(arg1 string, arg2 string) error { fake.downloadMutex.Lock() ret, specificReturn := fake.downloadReturnsOnCall[len(fake.downloadArgsForCall)] @@ -212,6 +392,59 @@ func (fake *FakeStorageClient) DownloadReturnsOnCall(i int, result1 error) { }{result1} } +func (fake *FakeStorageClient) EnsureBucketExists() error { + fake.ensureBucketExistsMutex.Lock() + ret, specificReturn := fake.ensureBucketExistsReturnsOnCall[len(fake.ensureBucketExistsArgsForCall)] + fake.ensureBucketExistsArgsForCall = append(fake.ensureBucketExistsArgsForCall, struct { + }{}) + stub := fake.EnsureBucketExistsStub + fakeReturns := fake.ensureBucketExistsReturns + fake.recordInvocation("EnsureBucketExists", []interface{}{}) + fake.ensureBucketExistsMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeStorageClient) EnsureBucketExistsCallCount() int { + fake.ensureBucketExistsMutex.RLock() + defer fake.ensureBucketExistsMutex.RUnlock() + return len(fake.ensureBucketExistsArgsForCall) +} + +func (fake *FakeStorageClient) EnsureBucketExistsCalls(stub func() error) { + fake.ensureBucketExistsMutex.Lock() + defer fake.ensureBucketExistsMutex.Unlock() + fake.EnsureBucketExistsStub = stub +} + +func (fake *FakeStorageClient) EnsureBucketExistsReturns(result1 error) { + fake.ensureBucketExistsMutex.Lock() + defer fake.ensureBucketExistsMutex.Unlock() + fake.EnsureBucketExistsStub = nil + fake.ensureBucketExistsReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeStorageClient) EnsureBucketExistsReturnsOnCall(i int, result1 error) { + fake.ensureBucketExistsMutex.Lock() + defer fake.ensureBucketExistsMutex.Unlock() + fake.EnsureBucketExistsStub = nil + if fake.ensureBucketExistsReturnsOnCall == nil { + fake.ensureBucketExistsReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.ensureBucketExistsReturnsOnCall[i] = struct { + result1 error + }{result1} +} + func (fake *FakeStorageClient) Exists(arg1 string) (bool, error) { fake.existsMutex.Lock() ret, specificReturn := fake.existsReturnsOnCall[len(fake.existsArgsForCall)] @@ -276,6 +509,131 @@ func (fake *FakeStorageClient) ExistsReturnsOnCall(i int, result1 bool, result2 }{result1, result2} } +func (fake *FakeStorageClient) List(arg1 string) ([]string, error) { + fake.listMutex.Lock() + ret, specificReturn := fake.listReturnsOnCall[len(fake.listArgsForCall)] + fake.listArgsForCall = append(fake.listArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.ListStub + fakeReturns := fake.listReturns + fake.recordInvocation("List", []interface{}{arg1}) + fake.listMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *FakeStorageClient) ListCallCount() int { + fake.listMutex.RLock() + defer fake.listMutex.RUnlock() + return len(fake.listArgsForCall) +} + +func (fake *FakeStorageClient) ListCalls(stub func(string) ([]string, error)) { + fake.listMutex.Lock() + defer fake.listMutex.Unlock() + fake.ListStub = stub +} + +func (fake *FakeStorageClient) ListArgsForCall(i int) string { + fake.listMutex.RLock() + defer fake.listMutex.RUnlock() + argsForCall := fake.listArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeStorageClient) ListReturns(result1 []string, result2 error) { + fake.listMutex.Lock() + defer fake.listMutex.Unlock() + fake.ListStub = nil + fake.listReturns = struct { + result1 []string + result2 error + }{result1, result2} +} + +func (fake *FakeStorageClient) ListReturnsOnCall(i int, result1 []string, result2 error) { + fake.listMutex.Lock() + defer fake.listMutex.Unlock() + fake.ListStub = nil + if fake.listReturnsOnCall == nil { + fake.listReturnsOnCall = make(map[int]struct { + result1 []string + result2 error + }) + } + fake.listReturnsOnCall[i] = struct { + result1 []string + result2 error + }{result1, result2} +} + +func (fake *FakeStorageClient) Properties(arg1 string) error { + fake.propertiesMutex.Lock() + ret, specificReturn := fake.propertiesReturnsOnCall[len(fake.propertiesArgsForCall)] + fake.propertiesArgsForCall = append(fake.propertiesArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.PropertiesStub + fakeReturns := fake.propertiesReturns + fake.recordInvocation("Properties", []interface{}{arg1}) + fake.propertiesMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeStorageClient) PropertiesCallCount() int { + fake.propertiesMutex.RLock() + defer fake.propertiesMutex.RUnlock() + return len(fake.propertiesArgsForCall) +} + +func (fake *FakeStorageClient) PropertiesCalls(stub func(string) error) { + fake.propertiesMutex.Lock() + defer fake.propertiesMutex.Unlock() + fake.PropertiesStub = stub +} + +func (fake *FakeStorageClient) PropertiesArgsForCall(i int) string { + fake.propertiesMutex.RLock() + defer fake.propertiesMutex.RUnlock() + argsForCall := fake.propertiesArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeStorageClient) PropertiesReturns(result1 error) { + fake.propertiesMutex.Lock() + defer fake.propertiesMutex.Unlock() + fake.PropertiesStub = nil + fake.propertiesReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeStorageClient) PropertiesReturnsOnCall(i int, result1 error) { + fake.propertiesMutex.Lock() + defer fake.propertiesMutex.Unlock() + fake.PropertiesStub = nil + if fake.propertiesReturnsOnCall == nil { + fake.propertiesReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.propertiesReturnsOnCall[i] = struct { + result1 error + }{result1} +} + func (fake *FakeStorageClient) SignedUrlGet(arg1 string, arg2 int64) (string, error) { fake.signedUrlGetMutex.Lock() ret, specificReturn := fake.signedUrlGetReturnsOnCall[len(fake.signedUrlGetArgsForCall)] @@ -472,18 +830,6 @@ func (fake *FakeStorageClient) UploadReturnsOnCall(i int, result1 error) { func (fake *FakeStorageClient) Invocations() map[string][][]interface{} { fake.invocationsMutex.RLock() defer fake.invocationsMutex.RUnlock() - fake.deleteMutex.RLock() - defer fake.deleteMutex.RUnlock() - fake.downloadMutex.RLock() - defer fake.downloadMutex.RUnlock() - fake.existsMutex.RLock() - defer fake.existsMutex.RUnlock() - fake.signedUrlGetMutex.RLock() - defer fake.signedUrlGetMutex.RUnlock() - fake.signedUrlPutMutex.RLock() - defer fake.signedUrlPutMutex.RUnlock() - fake.uploadMutex.RLock() - defer fake.uploadMutex.RUnlock() copiedInvocations := map[string][][]interface{}{} for key, value := range fake.invocations { copiedInvocations[key] = value From 2bfbdec1644ce3eff5d3a5353d917bbffe1fe1ac Mon Sep 17 00:00:00 2001 From: Katharina Przybill <30441792+kathap@users.noreply.github.com> Date: Tue, 25 Nov 2025 18:12:13 +0100 Subject: [PATCH 3/4] clean up comments --- alioss/integration/general_ali_test.go | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) diff --git a/alioss/integration/general_ali_test.go b/alioss/integration/general_ali_test.go index 76932af..39bdb89 100644 --- a/alioss/integration/general_ali_test.go +++ b/alioss/integration/general_ali_test.go @@ -157,7 +157,6 @@ var _ = Describe("General testing for all Ali regions", func() { blob2 := prefix + "/b" otherBlob := integration.GenerateRandomString() - // Create a temp file for uploads contentFile1 := integration.MakeContentFile("content-1") contentFile2 := integration.MakeContentFile("content-2") contentFileOther := integration.MakeContentFile("other-content") @@ -165,7 +164,7 @@ var _ = Describe("General testing for all Ali regions", func() { _ = os.Remove(contentFile1) //nolint:errcheck _ = os.Remove(contentFile2) //nolint:errcheck _ = os.Remove(contentFileOther) //nolint:errcheck - // make sure all are gone + for _, b := range []string{blob1, blob2, otherBlob} { cliSession, err := integration.RunCli(cliPath, configPath, "delete", b) if err == nil && (cliSession.ExitCode() == 0 || cliSession.ExitCode() == 3) { @@ -174,7 +173,6 @@ var _ = Describe("General testing for all Ali regions", func() { } }() - // Put three blobs cliSession, err := integration.RunCli(cliPath, configPath, "put", contentFile1, blob1) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) @@ -187,12 +185,10 @@ var _ = Describe("General testing for all Ali regions", func() { Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) - // Call delete-recursive cliSession, err = integration.RunCli(cliPath, configPath, "delete-recursive", prefix) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) - // Objects with prefix should be gone (exit code 3) cliSession, err = integration.RunCli(cliPath, configPath, "exists", blob1) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(Equal(3)) @@ -201,7 +197,6 @@ var _ = Describe("General testing for all Ali regions", func() { Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(Equal(3)) - // Other blob should still exist (exit code 0) cliSession, err = integration.RunCli(cliPath, configPath, "exists", otherBlob) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(Equal(0)) @@ -213,7 +208,6 @@ var _ = Describe("General testing for all Ali regions", func() { srcBlob := blobName + "-src" destBlob := blobName + "-dest" - // Clean up both at the end defer func() { cliSession, err := integration.RunCli(cliPath, configPath, "delete", srcBlob) Expect(err).ToNot(HaveOccurred()) @@ -224,18 +218,15 @@ var _ = Describe("General testing for all Ali regions", func() { Expect(cliSession.ExitCode()).To(BeZero()) }() - // Upload source contentFile = integration.MakeContentFile("copied content") cliSession, err := integration.RunCli(cliPath, configPath, "put", contentFile, srcBlob) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) - // Invoke copy cliSession, err = integration.RunCli(cliPath, configPath, "copy", srcBlob, destBlob) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) - // Download destination and verify content tmpLocalFile, _ := os.CreateTemp("", "ali-storage-cli-copy") //nolint:errcheck tmpLocalFile.Close() //nolint:errcheck defer func() { _ = os.Remove(tmpLocalFile.Name()) }() //nolint:errcheck @@ -333,7 +324,6 @@ var _ = Describe("General testing for all Ali regions", func() { _ = os.Remove(contentFileOther) //nolint:errcheck }() - // Put blobs cliSession, err := integration.RunCli(cliPath, configPath, "put", contentFile1, blob1) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) @@ -346,7 +336,6 @@ var _ = Describe("General testing for all Ali regions", func() { Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) - // List with prefix cliSession, err = integration.RunCli(cliPath, configPath, "list", prefix) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) @@ -361,12 +350,10 @@ var _ = Describe("General testing for all Ali regions", func() { Describe("Invoking `ensure-bucket-exists`", func() { It("is idempotent", func() { - // first run s1, err := integration.RunCli(cliPath, configPath, "ensure-bucket-exists") Expect(err).ToNot(HaveOccurred()) Expect(s1.ExitCode()).To(BeZero()) - // second run should also succeed (bucket already exists) s2, err := integration.RunCli(cliPath, configPath, "ensure-bucket-exists") Expect(err).ToNot(HaveOccurred()) Expect(s2.ExitCode()).To(BeZero()) From 9de39f70b541630a59ba98bedb4390c24ef0aa1a Mon Sep 17 00:00:00 2001 From: Katharina Przybill <30441792+kathap@users.noreply.github.com> Date: Thu, 4 Dec 2025 15:51:05 +0100 Subject: [PATCH 4/4] enhancement as proposed from review --- alioss/client/client.go | 4 -- alioss/client/storage_client.go | 34 +++++----- alioss/integration/general_ali_test.go | 91 ++++++++++++++++++++++++-- 3 files changed, 101 insertions(+), 28 deletions(-) diff --git a/alioss/client/client.go b/alioss/client/client.go index 7a78fab..083e0de 100644 --- a/alioss/client/client.go +++ b/alioss/client/client.go @@ -42,7 +42,6 @@ func (client *AliBlobstore) Delete(object string) error { } func (client *AliBlobstore) DeleteRecursive(prefix string) error { - return client.storageClient.DeleteRecursive(prefix) } @@ -86,16 +85,13 @@ func (client *AliBlobstore) List(prefix string) ([]string, error) { } func (client *AliBlobstore) Copy(srcBlob string, dstBlob string) error { - return client.storageClient.Copy(srcBlob, dstBlob) } func (client *AliBlobstore) Properties(dest string) error { - return client.storageClient.Properties(dest) } func (client *AliBlobstore) EnsureBucketExists() error { - return client.storageClient.EnsureBucketExists() } diff --git a/alioss/client/storage_client.go b/alioss/client/storage_client.go index 8e447c4..4442a85 100644 --- a/alioss/client/storage_client.go +++ b/alioss/client/storage_client.go @@ -36,7 +36,7 @@ type StorageClient interface { ) error DeleteRecursive( - dest string, + objects string, ) error Exists( @@ -54,11 +54,11 @@ type StorageClient interface { ) (string, error) List( - prefix string, + bucketPrefix string, ) ([]string, error) Properties( - dest string, + object string, ) error EnsureBucketExists() error @@ -137,13 +137,13 @@ func (dsc DefaultStorageClient) Download( } func (dsc DefaultStorageClient) Copy( - srcObject string, - destObject string, + sourceObject string, + destinationObject string, ) error { - log.Printf("Copying object from %s to %s", srcObject, destObject) + log.Printf("Copying object from %s to %s", sourceObject, destinationObject) - if _, err := dsc.bucket.CopyObject(srcObject, destObject); err != nil { - return fmt.Errorf("failed to copy object from %s to %s: %w", srcObject, destObject, err) + if _, err := dsc.bucket.CopyObject(sourceObject, destinationObject); err != nil { + return fmt.Errorf("failed to copy object from %s to %s: %w", sourceObject, destinationObject, err) } return nil @@ -168,11 +168,11 @@ func (dsc DefaultStorageClient) Delete( } func (dsc DefaultStorageClient) DeleteRecursive( - prefix string, + objectPrefix string, ) error { - if prefix != "" { + if objectPrefix != "" { log.Printf("Deleting all objects in bucket %s with prefix '%s'\n", - dsc.storageConfig.BucketName, prefix) + dsc.storageConfig.BucketName, objectPrefix) } else { log.Printf("Deleting all objects in bucket %s\n", dsc.storageConfig.BucketName) @@ -182,8 +182,8 @@ func (dsc DefaultStorageClient) DeleteRecursive( for { var listOptions []oss.Option - if prefix != "" { - listOptions = append(listOptions, oss.Prefix(prefix)) + if objectPrefix != "" { + listOptions = append(listOptions, oss.Prefix(objectPrefix)) } if marker != "" { listOptions = append(listOptions, oss.Marker(marker)) @@ -326,12 +326,12 @@ type BlobProperties struct { } func (dsc DefaultStorageClient) Properties( - dest string, + bucketObject string, ) error { log.Printf("Getting properties for object %s/%s\n", - dsc.storageConfig.BucketName, dest) + dsc.storageConfig.BucketName, bucketObject) - meta, err := dsc.bucket.GetObjectDetailedMeta(dest) + meta, err := dsc.bucket.GetObjectDetailedMeta(bucketObject) if err != nil { var ossErr oss.ServiceError if errors.As(err, &ossErr) && ossErr.StatusCode == 404 { @@ -339,7 +339,7 @@ func (dsc DefaultStorageClient) Properties( return nil } - return fmt.Errorf("failed to get properties for object %s: %w", dest, err) + return fmt.Errorf("failed to get properties for object %s: %w", bucketObject, err) } eTag := meta.Get("ETag") diff --git a/alioss/integration/general_ali_test.go b/alioss/integration/general_ali_test.go index 39bdb89..46d64f5 100644 --- a/alioss/integration/general_ali_test.go +++ b/alioss/integration/general_ali_test.go @@ -2,9 +2,11 @@ package integration_test import ( "bytes" + "fmt" "os" + "github.com/aliyun/aliyun-oss-go-sdk/oss" "github.com/cloudfoundry/storage-cli/alioss/config" "github.com/cloudfoundry/storage-cli/alioss/integration" @@ -209,13 +211,16 @@ var _ = Describe("General testing for all Ali regions", func() { destBlob := blobName + "-dest" defer func() { - cliSession, err := integration.RunCli(cliPath, configPath, "delete", srcBlob) - Expect(err).ToNot(HaveOccurred()) - Expect(cliSession.ExitCode()).To(BeZero()) - - cliSession, err = integration.RunCli(cliPath, configPath, "delete", destBlob) - Expect(err).ToNot(HaveOccurred()) - Expect(cliSession.ExitCode()).To(BeZero()) + for _, b := range []string{srcBlob, destBlob} { + cliSession, err := integration.RunCli(cliPath, configPath, "delete", b) + if err != nil { + GinkgoWriter.Printf("cleanup: error deleting %s: %v\n", b, err) + continue + } + if cliSession.ExitCode() != 0 && cliSession.ExitCode() != 3 { + GinkgoWriter.Printf("cleanup: delete %s exited with code %d\n", b, cliSession.ExitCode()) + } + } }() contentFile = integration.MakeContentFile("copied content") @@ -346,9 +351,81 @@ var _ = Describe("General testing for all Ali regions", func() { Expect(output).To(ContainSubstring(blob2)) Expect(output).NotTo(ContainSubstring(otherBlob)) }) + + It("lists all blobs across multiple pages", func() { + prefix := integration.GenerateRandomString() + const totalObjects = 120 + + var blobNames []string + var contentFiles []string + + for i := 0; i < totalObjects; i++ { + blobName := fmt.Sprintf("%s/%03d", prefix, i) + blobNames = append(blobNames, blobName) + + contentFile := integration.MakeContentFile(fmt.Sprintf("content-%d", i)) + contentFiles = append(contentFiles, contentFile) + + cliSession, err := integration.RunCli(cliPath, configPath, "put", contentFile, blobName) + Expect(err).ToNot(HaveOccurred()) + Expect(cliSession.ExitCode()).To(BeZero()) + } + + defer func() { + for _, f := range contentFiles { + _ = os.Remove(f) //nolint:errcheck + } + + for _, b := range blobNames { + cliSession, err := integration.RunCli(cliPath, configPath, "delete", b) + if err == nil && (cliSession.ExitCode() == 0 || cliSession.ExitCode() == 3) { + continue + } + } + }() + + cliSession, err := integration.RunCli(cliPath, configPath, "list", prefix) + Expect(err).ToNot(HaveOccurred()) + Expect(cliSession.ExitCode()).To(BeZero()) + + output := bytes.NewBuffer(cliSession.Out.Contents()).String() + + for _, b := range blobNames { + Expect(output).To(ContainSubstring(b)) + } + }) }) Describe("Invoking `ensure-bucket-exists`", func() { + It("creates a bucket that can be observed via the OSS API", func() { + newBucketName := bucketName + "-bommel" + + cfg := defaultConfig + cfg.BucketName = newBucketName + + configPath = integration.MakeConfigFile(&cfg) + defer func() { _ = os.Remove(configPath) }() //nolint:errcheck + + ossClient, err := oss.New(cfg.Endpoint, cfg.AccessKeyID, cfg.AccessKeySecret) + Expect(err).ToNot(HaveOccurred()) + + defer func() { + if err := ossClient.DeleteBucket(newBucketName); err != nil { + if _, ferr := fmt.Fprintf(GinkgoWriter, "cleanup: failed to delete bucket %s: %v\n", newBucketName, err); ferr != nil { + GinkgoWriter.Printf("cleanup: failed to write cleanup message: %v\n", ferr) + } + } + }() + + s1, err := integration.RunCli(cliPath, configPath, "ensure-bucket-exists") + Expect(err).ToNot(HaveOccurred()) + Expect(s1.ExitCode()).To(BeZero()) + + exists, err := ossClient.IsBucketExist(newBucketName) + Expect(err).ToNot(HaveOccurred()) + Expect(exists).To(BeTrue()) + }) + It("is idempotent", func() { s1, err := integration.RunCli(cliPath, configPath, "ensure-bucket-exists") Expect(err).ToNot(HaveOccurred())