Skip to content

Commit 3ced487

Browse files
authored
feat(storage): support custom file options for upload (#1903)
1 parent a71ded2 commit 3ced487

File tree

3 files changed

+44
-20
lines changed

3 files changed

+44
-20
lines changed

cmd/storage.go

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@ import (
44
"github.com/spf13/afero"
55
"github.com/spf13/cobra"
66
"github.com/supabase/cli/internal/storage"
7+
"github.com/supabase/cli/internal/storage/client"
78
"github.com/supabase/cli/internal/storage/cp"
89
"github.com/supabase/cli/internal/storage/ls"
910
"github.com/supabase/cli/internal/storage/mv"
@@ -33,6 +34,7 @@ var (
3334
},
3435
}
3536

37+
options client.FileOptions
3638
maxJobs uint
3739

3840
cpCmd = &cobra.Command{
@@ -44,7 +46,11 @@ cp -r ss:///bucket/docs .
4446
Short: "Copy objects from src to dst path",
4547
Args: cobra.ExactArgs(2),
4648
RunE: func(cmd *cobra.Command, args []string) error {
47-
return cp.Run(cmd.Context(), args[0], args[1], recursive, maxJobs, afero.NewOsFs())
49+
opts := func(fo *client.FileOptions) {
50+
fo.CacheControl = options.CacheControl
51+
fo.ContentType = options.ContentType
52+
}
53+
return cp.Run(cmd.Context(), args[0], args[1], recursive, maxJobs, afero.NewOsFs(), opts)
4854
},
4955
}
5056

@@ -76,6 +82,9 @@ func init() {
7682
storageCmd.AddCommand(lsCmd)
7783
cpFlags := cpCmd.Flags()
7884
cpFlags.BoolVarP(&recursive, "recursive", "r", false, "Recursively copy a directory.")
85+
cpFlags.StringVar(&options.CacheControl, "cache-control", "max-age=3600", "Custom Cache-Control header for HTTP upload.")
86+
cpFlags.StringVar(&options.ContentType, "content-type", "", "Custom Content-Type header for HTTP upload.")
87+
cpFlags.Lookup("content-type").DefValue = "auto-detect"
7988
cpFlags.UintVarP(&maxJobs, "jobs", "j", 1, "Maximum number of parallel jobs.")
8089
storageCmd.AddCommand(cpCmd)
8190
rmCmd.Flags().BoolVarP(&recursive, "recursive", "r", false, "Recursively remove a directory.")

internal/storage/client/objects.go

Lines changed: 28 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -63,22 +63,38 @@ func ListStorageObjects(ctx context.Context, projectRef, bucket, prefix string,
6363
return *data, nil
6464
}
6565

66-
func UploadStorageObject(ctx context.Context, projectRef, remotePath, localPath string, fsys afero.Fs) error {
66+
type FileOptions struct {
67+
CacheControl string
68+
ContentType string
69+
}
70+
71+
func UploadStorageObject(ctx context.Context, projectRef, remotePath, localPath string, fsys afero.Fs, opts ...func(*FileOptions)) error {
6772
f, err := fsys.Open(localPath)
6873
if err != nil {
6974
return errors.Errorf("failed to open file: %w", err)
7075
}
7176
defer f.Close()
72-
// Decode mimetype
73-
header := io.LimitReader(f, 512)
74-
buf, err := io.ReadAll(header)
75-
if err != nil {
76-
return errors.Errorf("failed to read file: %w", err)
77+
// Customise file options
78+
fo := &FileOptions{}
79+
for _, apply := range opts {
80+
apply(fo)
7781
}
78-
mimetype := http.DetectContentType(buf)
79-
_, err = f.Seek(0, io.SeekStart)
80-
if err != nil {
81-
return errors.Errorf("failed to seek file: %w", err)
82+
// Use default value of storage-js: https://github.com/supabase/storage-js/blob/main/src/packages/StorageFileApi.ts#L22
83+
if len(fo.CacheControl) == 0 {
84+
fo.CacheControl = "max-age=3600"
85+
}
86+
// Decode mimetype
87+
if len(fo.ContentType) == 0 {
88+
header := io.LimitReader(f, 512)
89+
buf, err := io.ReadAll(header)
90+
if err != nil {
91+
return errors.Errorf("failed to read file: %w", err)
92+
}
93+
fo.ContentType = http.DetectContentType(buf)
94+
_, err = f.Seek(0, io.SeekStart)
95+
if err != nil {
96+
return errors.Errorf("failed to seek file: %w", err)
97+
}
8298
}
8399
// Prepare request
84100
apiKey, err := tenant.GetApiKeys(ctx, projectRef)
@@ -92,9 +108,8 @@ func UploadStorageObject(ctx context.Context, projectRef, remotePath, localPath
92108
return errors.Errorf("failed to initialise http request: %w", err)
93109
}
94110
req.Header.Add("Authorization", "Bearer "+apiKey.ServiceRole)
95-
req.Header.Add("Content-Type", mimetype)
96-
// Use default value of storage-js: https://github.com/supabase/storage-js/blob/main/src/packages/StorageFileApi.ts#L22
97-
req.Header.Add("Cache-Control", "max-age=3600")
111+
req.Header.Add("Content-Type", fo.ContentType)
112+
req.Header.Add("Cache-Control", fo.CacheControl)
98113
// Sends request
99114
resp, err := http.DefaultClient.Do(req)
100115
if err != nil {

internal/storage/cp/cp.go

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ import (
2121

2222
var errUnsupportedOperation = errors.New("Unsupported operation")
2323

24-
func Run(ctx context.Context, src, dst string, recursive bool, maxJobs uint, fsys afero.Fs) error {
24+
func Run(ctx context.Context, src, dst string, recursive bool, maxJobs uint, fsys afero.Fs, opts ...func(*client.FileOptions)) error {
2525
srcParsed, err := url.Parse(src)
2626
if err != nil {
2727
return errors.Errorf("failed to parse src url: %w", err)
@@ -41,9 +41,9 @@ func Run(ctx context.Context, src, dst string, recursive bool, maxJobs uint, fsy
4141
return client.DownloadStorageObject(ctx, projectRef, srcParsed.Path, dst, fsys)
4242
} else if srcParsed.Scheme == "" && strings.ToLower(dstParsed.Scheme) == storage.STORAGE_SCHEME {
4343
if recursive {
44-
return UploadStorageObjectAll(ctx, projectRef, dstParsed.Path, src, maxJobs, fsys)
44+
return UploadStorageObjectAll(ctx, projectRef, dstParsed.Path, src, maxJobs, fsys, opts...)
4545
}
46-
return client.UploadStorageObject(ctx, projectRef, dstParsed.Path, src, fsys)
46+
return client.UploadStorageObject(ctx, projectRef, dstParsed.Path, src, fsys, opts...)
4747
} else if strings.ToLower(srcParsed.Scheme) == storage.STORAGE_SCHEME && strings.ToLower(dstParsed.Scheme) == storage.STORAGE_SCHEME {
4848
return errors.New("Copying between buckets is not supported")
4949
}
@@ -81,7 +81,7 @@ func DownloadStorageObjectAll(ctx context.Context, projectRef, remotePath, local
8181
return errors.Join(err, jq.Collect())
8282
}
8383

84-
func UploadStorageObjectAll(ctx context.Context, projectRef, remotePath, localPath string, maxJobs uint, fsys afero.Fs) error {
84+
func UploadStorageObjectAll(ctx context.Context, projectRef, remotePath, localPath string, maxJobs uint, fsys afero.Fs, opts ...func(*client.FileOptions)) error {
8585
noSlash := strings.TrimSuffix(remotePath, "/")
8686
// Check if directory exists on remote
8787
dirExists := false
@@ -125,14 +125,14 @@ func UploadStorageObjectAll(ctx context.Context, projectRef, remotePath, localPa
125125
}
126126
fmt.Fprintln(os.Stderr, "Uploading:", filePath, "=>", dstPath)
127127
job := func() error {
128-
err := client.UploadStorageObject(ctx, projectRef, dstPath, filePath, fsys)
128+
err := client.UploadStorageObject(ctx, projectRef, dstPath, filePath, fsys, opts...)
129129
if err != nil && strings.Contains(err.Error(), `"error":"Bucket not found"`) {
130130
// Retry after creating bucket
131131
if bucket, prefix := storage.SplitBucketPrefix(dstPath); len(prefix) > 0 {
132132
if _, err := client.CreateStorageBucket(ctx, projectRef, bucket); err != nil {
133133
return err
134134
}
135-
err = client.UploadStorageObject(ctx, projectRef, dstPath, filePath, fsys)
135+
err = client.UploadStorageObject(ctx, projectRef, dstPath, filePath, fsys, opts...)
136136
}
137137
}
138138
return err

0 commit comments

Comments
 (0)