diff --git a/internal/seed/buckets/buckets.go b/internal/seed/buckets/buckets.go index 365c3a395..b922e63d8 100644 --- a/internal/seed/buckets/buckets.go +++ b/internal/seed/buckets/buckets.go @@ -3,6 +3,7 @@ package buckets import ( "context" "fmt" + "os" "github.com/spf13/afero" "github.com/supabase/cli/internal/storage/client" @@ -29,5 +30,25 @@ func Run(ctx context.Context, projectRef string, interactive bool, fsys afero.Fs if err := api.UpsertBuckets(ctx, utils.Config.Storage.Buckets, filter); err != nil { return err } + prune := func(name string) bool { + label := fmt.Sprintf("Bucket %s not found in %s. Do you want to prune it?", utils.Bold(name), utils.Bold(utils.ConfigPath)) + shouldPrune, err := console.PromptYesNo(ctx, label, false) + if err != nil { + fmt.Fprintln(utils.GetDebugLogger(), err) + } + return shouldPrune + } + if utils.Config.Storage.AnalyticsBuckets.Enabled && len(projectRef) > 0 { + fmt.Fprintln(os.Stderr, "Updating analytics buckets...") + if err := api.UpsertAnalyticsBuckets(ctx, utils.Config.Storage.AnalyticsBuckets.Buckets, prune); err != nil { + return err + } + } + if utils.Config.Storage.VectorBuckets.Enabled && len(projectRef) > 0 { + fmt.Fprintln(os.Stderr, "Updating vector buckets...") + if err := api.UpsertVectorBuckets(ctx, utils.Config.Storage.VectorBuckets.Buckets, prune); err != nil { + return err + } + } return api.UpsertObjects(ctx, utils.Config.Storage.Buckets, utils.NewRootFS(fsys)) } diff --git a/pkg/config/config.go b/pkg/config/config.go index 230cc3e16..ee899f4ac 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -291,6 +291,8 @@ func (a *auth) Clone() auth { func (s *storage) Clone() storage { copy := *s copy.Buckets = maps.Clone(s.Buckets) + copy.AnalyticsBuckets.Buckets = maps.Clone(s.AnalyticsBuckets.Buckets) + copy.VectorBuckets.Buckets = maps.Clone(s.VectorBuckets.Buckets) if s.ImageTransformation != nil { img := *s.ImageTransformation copy.ImageTransformation = &img @@ -544,6 +546,15 @@ func (c *config) load(v *viper.Viper) error { }); err != nil { return errors.Errorf("failed to parse config: %w", err) } + // Manually parse config to map + c.Storage.AnalyticsBuckets.Buckets = map[string]struct{}{} + for key := range v.GetStringMap("storage.analytics.buckets") { + c.Storage.AnalyticsBuckets.Buckets[key] = struct{}{} + } + c.Storage.VectorBuckets.Buckets = map[string]struct{}{} + for key := range v.GetStringMap("storage.vector.buckets") { + c.Storage.VectorBuckets.Buckets[key] = struct{}{} + } // Convert keys to upper case: https://github.com/spf13/viper/issues/1014 secrets := make(SecretsConfig, len(c.EdgeRuntime.Secrets)) for k, v := range c.EdgeRuntime.Secrets { diff --git a/pkg/config/storage.go b/pkg/config/storage.go index 35a74586f..7fdffb3a8 100644 --- a/pkg/config/storage.go +++ b/pkg/config/storage.go @@ -17,12 +17,29 @@ type ( S3Protocol *s3Protocol `toml:"s3_protocol"` S3Credentials storageS3Credentials `toml:"-"` Buckets BucketConfig `toml:"buckets"` + AnalyticsBuckets analyticsBuckets `toml:"analytics"` + VectorBuckets vectorBuckets `toml:"vector"` } imageTransformation struct { Enabled bool `toml:"enabled"` } + analyticsBuckets struct { + Enabled bool `toml:"enabled"` + MaxNamespaces uint `toml:"max_namespaces"` + MaxTables uint `toml:"max_tables"` + MaxCatalogs uint `toml:"max_catalogs"` + Buckets map[string]struct{} `toml:"buckets"` + } + + vectorBuckets struct { + Enabled bool `toml:"enabled"` + MaxBuckets uint `toml:"max_buckets"` + MaxIndexes uint `toml:"max_indexes"` + Buckets map[string]struct{} `toml:"buckets"` + } + s3Protocol struct { Enabled bool `toml:"enabled"` } @@ -68,10 +85,43 @@ func (s *storage) ToUpdateStorageConfigBody() v1API.UpdateStorageConfigBody { } // When local config is not set, we assume platform defaults should not change if s.ImageTransformation != nil { - body.Features.ImageTransformation.Enabled = s.ImageTransformation.Enabled + body.Features.ImageTransformation = &struct { + Enabled bool `json:"enabled"` + }{ + Enabled: s.ImageTransformation.Enabled, + } + } + // Disabling analytics and vector buckets means leaving platform values unchanged + if s.AnalyticsBuckets.Enabled { + body.Features.IcebergCatalog = &struct { + Enabled bool `json:"enabled"` + MaxCatalogs int `json:"maxCatalogs"` + MaxNamespaces int `json:"maxNamespaces"` + MaxTables int `json:"maxTables"` + }{ + Enabled: true, + MaxNamespaces: cast.UintToInt(s.AnalyticsBuckets.MaxNamespaces), + MaxTables: cast.UintToInt(s.AnalyticsBuckets.MaxTables), + MaxCatalogs: cast.UintToInt(s.AnalyticsBuckets.MaxCatalogs), + } + } + if s.VectorBuckets.Enabled { + body.Features.VectorBuckets = &struct { + Enabled bool `json:"enabled"` + MaxBuckets int `json:"maxBuckets"` + MaxIndexes int `json:"maxIndexes"` + }{ + Enabled: true, + MaxBuckets: cast.UintToInt(s.VectorBuckets.MaxBuckets), + MaxIndexes: cast.UintToInt(s.VectorBuckets.MaxIndexes), + } } if s.S3Protocol != nil { - body.Features.S3Protocol.Enabled = s.S3Protocol.Enabled + body.Features.S3Protocol = &struct { + Enabled bool `json:"enabled"` + }{ + Enabled: s.S3Protocol.Enabled, + } } return body } @@ -83,6 +133,17 @@ func (s *storage) FromRemoteStorageConfig(remoteConfig v1API.StorageConfigRespon if s.ImageTransformation != nil { s.ImageTransformation.Enabled = remoteConfig.Features.ImageTransformation.Enabled } + if s.AnalyticsBuckets.Enabled { + s.AnalyticsBuckets.Enabled = remoteConfig.Features.IcebergCatalog.Enabled + s.AnalyticsBuckets.MaxNamespaces = cast.IntToUint(remoteConfig.Features.IcebergCatalog.MaxNamespaces) + s.AnalyticsBuckets.MaxTables = cast.IntToUint(remoteConfig.Features.IcebergCatalog.MaxTables) + s.AnalyticsBuckets.MaxCatalogs = cast.IntToUint(remoteConfig.Features.IcebergCatalog.MaxCatalogs) + } + if s.VectorBuckets.Enabled { + s.VectorBuckets.Enabled = remoteConfig.Features.VectorBuckets.Enabled + s.VectorBuckets.MaxBuckets = cast.IntToUint(remoteConfig.Features.VectorBuckets.MaxBuckets) + s.VectorBuckets.MaxIndexes = cast.IntToUint(remoteConfig.Features.VectorBuckets.MaxIndexes) + } if s.S3Protocol != nil { s.S3Protocol.Enabled = remoteConfig.Features.S3Protocol.Enabled } diff --git a/pkg/config/templates/config.toml b/pkg/config/templates/config.toml index 6a070de52..3fb0d0b54 100644 --- a/pkg/config/templates/config.toml +++ b/pkg/config/templates/config.toml @@ -105,13 +105,6 @@ enabled = true # The maximum file size allowed (e.g. "5MB", "500KB"). file_size_limit = "50MiB" -# Image transformation API is available to Supabase Pro plan. -# [storage.image_transformation] -# enabled = true - -# [storage.s3_protocol] -# enabled = false - # Uncomment to configure local storage buckets # [storage.buckets.images] # public = false @@ -119,6 +112,35 @@ file_size_limit = "50MiB" # allowed_mime_types = ["image/png", "image/jpeg"] # objects_path = "./images" +# Uncomment to allow connections via S3 compatible clients +# [storage.s3_protocol] +# enabled = true + +# Image transformation API is available to Supabase Pro plan. +# [storage.image_transformation] +# enabled = true + +# Store analytical data in S3 for running ETL jobs over Iceberg Catalog +# This feature is only available on the hosted platform. +[storage.analytics] +enabled = false +max_namespaces = 5 +max_tables = 10 +max_catalogs = 2 + +# Analytics Buckets is available to Supabase Pro plan. +# [storage.analytics.buckets.my-warehouse] + +# Store vector embeddings in S3 for large and durable datasets +# This feature is only available on the hosted platform. +[storage.vector] +enabled = false +max_buckets = 10 +max_indexes = 5 + +# Vector Buckets is available to Supabase Pro plan. +# [storage.vector.buckets.documents-openai] + [auth] enabled = true # The base URL of your website. Used as an allow-list for redirects and for constructing URLs used diff --git a/pkg/config/testdata/config.toml b/pkg/config/testdata/config.toml index 7804e2df3..0b4974321 100644 --- a/pkg/config/testdata/config.toml +++ b/pkg/config/testdata/config.toml @@ -105,13 +105,6 @@ enabled = true # The maximum file size allowed (e.g. "5MB", "500KB"). file_size_limit = "50MiB" -# Image transformation API is available to Supabase Pro plan. -[storage.image_transformation] -enabled = true - -[storage.s3_protocol] -enabled = true - # Uncomment to configure local storage buckets [storage.buckets.images] public = false @@ -119,6 +112,33 @@ file_size_limit = "50MiB" allowed_mime_types = ["image/png", "image/jpeg"] objects_path = "./images" +# Uncomment to allow connections via S3 compatible clients +[storage.s3_protocol] +enabled = true + +# Image transformation API is available to Supabase Pro plan. +[storage.image_transformation] +enabled = true + +# Store analytical data in S3 for running ETL jobs over Iceberg Catalog +[storage.analytics] +enabled = true +max_namespaces = 5 +max_tables = 10 +max_catalogs = 2 + +# Analytics Buckets is available to Supabase Pro plan. +[storage.analytics.buckets.my-warehouse] + +# Store vector embeddings in S3 for large and durable datasets +[storage.vector] +enabled = true +max_buckets = 10 +max_indexes = 5 + +# Vector Buckets is available to Supabase Pro plan. +# [storage.vector.buckets.documents-openai] + [auth] enabled = true # The base URL of your website. Used as an allow-list for redirects and for constructing URLs used diff --git a/pkg/storage/analytics.go b/pkg/storage/analytics.go new file mode 100644 index 000000000..12f2abe01 --- /dev/null +++ b/pkg/storage/analytics.go @@ -0,0 +1,68 @@ +package storage + +import ( + "context" + "fmt" + "net/http" + "os" + + "github.com/supabase/cli/pkg/fetcher" +) + +type AnalyticsBucketResponse struct { + Id string `json:"id"` // "test" + Name string `json:"name"` // "test" + CreatedAt string `json:"created_at"` // "2023-10-13T17:48:58.491Z" + UpdatedAt string `json:"updated_at"` // "2023-10-13T17:48:58.491Z" +} + +type CreateAnalyticsBucketRequest struct { + BucketName string `json:"bucketName"` +} + +func (s *StorageAPI) UpsertAnalyticsBuckets(ctx context.Context, bucketConfig map[string]struct{}, filter ...func(string) bool) error { + resp, err := s.Send(ctx, http.MethodGet, "/storage/v1/iceberg/bucket", nil) + if err != nil { + return err + } + buckets, err := fetcher.ParseJSON[[]AnalyticsBucketResponse](resp.Body) + if err != nil { + return err + } + var toDelete []string + exists := make(map[string]struct{}, len(buckets)) + for _, b := range buckets { + exists[b.Name] = struct{}{} + if _, ok := bucketConfig[b.Name]; !ok { + toDelete = append(toDelete, b.Name) + } + } + for name := range bucketConfig { + if _, ok := exists[name]; ok { + fmt.Fprintln(os.Stderr, "Bucket already exists:", name) + continue + } + fmt.Fprintln(os.Stderr, "Creating analytics bucket:", name) + body := CreateAnalyticsBucketRequest{BucketName: name} + if resp, err := s.Send(ctx, http.MethodPost, "/storage/v1/iceberg/bucket", body); err != nil { + return err + } else if err := resp.Body.Close(); err != nil { + fmt.Fprintln(os.Stderr, err) + } + } +OUTER: + for _, name := range toDelete { + for _, keep := range filter { + if !keep(name) { + continue OUTER + } + } + fmt.Fprintln(os.Stderr, "Pruning analytics bucket:", name) + if resp, err := s.Send(ctx, http.MethodDelete, "/storage/v1/iceberg/bucket/"+name, nil); err != nil { + return err + } else if err := resp.Body.Close(); err != nil { + fmt.Fprintln(os.Stderr, err) + } + } + return nil +} diff --git a/pkg/storage/vector.go b/pkg/storage/vector.go new file mode 100644 index 000000000..7218f7b28 --- /dev/null +++ b/pkg/storage/vector.go @@ -0,0 +1,81 @@ +package storage + +import ( + "context" + "fmt" + "net/http" + "os" + + "github.com/supabase/cli/pkg/fetcher" +) + +type VectorBucket struct { + VectorBucketName string `json:"vectorBucketName"` + CreationTime uint64 `json:"creationTime"` +} + +type ListVectorBucketsResponse struct { + VectorBuckets []VectorBucket `json:"vectorBuckets"` +} + +type ListVectorBucketsRequest struct { + MaxResults uint64 `json:"maxResults,omitempty"` + NextToken string `json:"nextToken,omitempty"` + Prefix string `json:"prefix,omitempty"` +} + +type CreateVectorBucketRequest struct { + VectorBucketName string `json:"vectorBucketName"` +} + +type DeleteVectorBucketRequest struct { + VectorBucketName string `json:"vectorBucketName"` +} + +func (s *StorageAPI) UpsertVectorBuckets(ctx context.Context, bucketConfig map[string]struct{}, filter ...func(string) bool) error { + resp, err := s.Send(ctx, http.MethodPost, "/storage/v1/vector/ListVectorBuckets", ListVectorBucketsRequest{}) + if err != nil { + return err + } + result, err := fetcher.ParseJSON[ListVectorBucketsResponse](resp.Body) + if err != nil { + return err + } + var toDelete []string + exists := make(map[string]struct{}, len(result.VectorBuckets)) + for _, b := range result.VectorBuckets { + exists[b.VectorBucketName] = struct{}{} + if _, ok := bucketConfig[b.VectorBucketName]; !ok { + toDelete = append(toDelete, b.VectorBucketName) + } + } + for name := range bucketConfig { + if _, ok := exists[name]; ok { + fmt.Fprintln(os.Stderr, "Bucket already exists:", name) + continue + } + fmt.Fprintln(os.Stderr, "Creating vector bucket:", name) + body := CreateVectorBucketRequest{VectorBucketName: name} + if resp, err := s.Send(ctx, http.MethodPost, "/storage/v1/vector/CreateVectorBucket", body); err != nil { + return err + } else if err := resp.Body.Close(); err != nil { + fmt.Fprintln(os.Stderr, err) + } + } +OUTER: + for _, name := range toDelete { + for _, keep := range filter { + if !keep(name) { + continue OUTER + } + } + fmt.Fprintln(os.Stderr, "Pruning vector bucket:", name) + body := DeleteVectorBucketRequest{VectorBucketName: name} + if resp, err := s.Send(ctx, http.MethodPost, "/storage/v1/vector/DeleteVectorBucket", body); err != nil { + return err + } else if err := resp.Body.Close(); err != nil { + fmt.Fprintln(os.Stderr, err) + } + } + return nil +}