Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 21 additions & 0 deletions internal/seed/buckets/buckets.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ package buckets
import (
"context"
"fmt"
"os"

"github.com/spf13/afero"
"github.com/supabase/cli/internal/storage/client"
Expand All @@ -29,5 +30,25 @@ func Run(ctx context.Context, projectRef string, interactive bool, fsys afero.Fs
if err := api.UpsertBuckets(ctx, utils.Config.Storage.Buckets, filter); err != nil {
return err
}
prune := func(name string) bool {
label := fmt.Sprintf("Bucket %s not found in %s. Do you want to prune it?", utils.Bold(name), utils.Bold(utils.ConfigPath))
shouldPrune, err := console.PromptYesNo(ctx, label, false)
if err != nil {
fmt.Fprintln(utils.GetDebugLogger(), err)
}
return shouldPrune
}
if utils.Config.Storage.AnalyticsBuckets.Enabled && len(projectRef) > 0 {
fmt.Fprintln(os.Stderr, "Updating analytics buckets...")
if err := api.UpsertAnalyticsBuckets(ctx, utils.Config.Storage.AnalyticsBuckets.Buckets, prune); err != nil {
return err
}
}
if utils.Config.Storage.VectorBuckets.Enabled && len(projectRef) > 0 {
fmt.Fprintln(os.Stderr, "Updating vector buckets...")
if err := api.UpsertVectorBuckets(ctx, utils.Config.Storage.VectorBuckets.Buckets, prune); err != nil {
return err
}
}
return api.UpsertObjects(ctx, utils.Config.Storage.Buckets, utils.NewRootFS(fsys))
}
11 changes: 11 additions & 0 deletions pkg/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -291,6 +291,8 @@ func (a *auth) Clone() auth {
func (s *storage) Clone() storage {
copy := *s
copy.Buckets = maps.Clone(s.Buckets)
copy.AnalyticsBuckets.Buckets = maps.Clone(s.AnalyticsBuckets.Buckets)
copy.VectorBuckets.Buckets = maps.Clone(s.VectorBuckets.Buckets)
if s.ImageTransformation != nil {
img := *s.ImageTransformation
copy.ImageTransformation = &img
Expand Down Expand Up @@ -544,6 +546,15 @@ func (c *config) load(v *viper.Viper) error {
}); err != nil {
return errors.Errorf("failed to parse config: %w", err)
}
// Manually parse config to map
c.Storage.AnalyticsBuckets.Buckets = map[string]struct{}{}
for key := range v.GetStringMap("storage.analytics.buckets") {
c.Storage.AnalyticsBuckets.Buckets[key] = struct{}{}
}
c.Storage.VectorBuckets.Buckets = map[string]struct{}{}
for key := range v.GetStringMap("storage.vector.buckets") {
c.Storage.VectorBuckets.Buckets[key] = struct{}{}
}
// Convert keys to upper case: https://github.com/spf13/viper/issues/1014
secrets := make(SecretsConfig, len(c.EdgeRuntime.Secrets))
for k, v := range c.EdgeRuntime.Secrets {
Expand Down
65 changes: 63 additions & 2 deletions pkg/config/storage.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,29 @@ type (
S3Protocol *s3Protocol `toml:"s3_protocol"`
S3Credentials storageS3Credentials `toml:"-"`
Buckets BucketConfig `toml:"buckets"`
AnalyticsBuckets analyticsBuckets `toml:"analytics"`
VectorBuckets vectorBuckets `toml:"vector"`
}

imageTransformation struct {
Enabled bool `toml:"enabled"`
}

analyticsBuckets struct {
Enabled bool `toml:"enabled"`
MaxNamespaces uint `toml:"max_namespaces"`
MaxTables uint `toml:"max_tables"`
MaxCatalogs uint `toml:"max_catalogs"`
Buckets map[string]struct{} `toml:"buckets"`
}

vectorBuckets struct {
Enabled bool `toml:"enabled"`
MaxBuckets uint `toml:"max_buckets"`
MaxIndexes uint `toml:"max_indexes"`
Buckets map[string]struct{} `toml:"buckets"`
}

s3Protocol struct {
Enabled bool `toml:"enabled"`
}
Expand Down Expand Up @@ -68,10 +85,43 @@ func (s *storage) ToUpdateStorageConfigBody() v1API.UpdateStorageConfigBody {
}
// When local config is not set, we assume platform defaults should not change
if s.ImageTransformation != nil {
body.Features.ImageTransformation.Enabled = s.ImageTransformation.Enabled
body.Features.ImageTransformation = &struct {
Enabled bool `json:"enabled"`
}{
Enabled: s.ImageTransformation.Enabled,
}
}
// Disabling analytics and vector buckets means leaving platform values unchanged
if s.AnalyticsBuckets.Enabled {
body.Features.IcebergCatalog = &struct {
Enabled bool `json:"enabled"`
MaxCatalogs int `json:"maxCatalogs"`
MaxNamespaces int `json:"maxNamespaces"`
MaxTables int `json:"maxTables"`
}{
Enabled: true,
MaxNamespaces: cast.UintToInt(s.AnalyticsBuckets.MaxNamespaces),
MaxTables: cast.UintToInt(s.AnalyticsBuckets.MaxTables),
MaxCatalogs: cast.UintToInt(s.AnalyticsBuckets.MaxCatalogs),
}
}
if s.VectorBuckets.Enabled {
body.Features.VectorBuckets = &struct {
Enabled bool `json:"enabled"`
MaxBuckets int `json:"maxBuckets"`
MaxIndexes int `json:"maxIndexes"`
}{
Enabled: true,
MaxBuckets: cast.UintToInt(s.VectorBuckets.MaxBuckets),
MaxIndexes: cast.UintToInt(s.VectorBuckets.MaxIndexes),
}
}
if s.S3Protocol != nil {
body.Features.S3Protocol.Enabled = s.S3Protocol.Enabled
body.Features.S3Protocol = &struct {
Enabled bool `json:"enabled"`
}{
Enabled: s.S3Protocol.Enabled,
}
}
return body
}
Expand All @@ -83,6 +133,17 @@ func (s *storage) FromRemoteStorageConfig(remoteConfig v1API.StorageConfigRespon
if s.ImageTransformation != nil {
s.ImageTransformation.Enabled = remoteConfig.Features.ImageTransformation.Enabled
}
if s.AnalyticsBuckets.Enabled {
s.AnalyticsBuckets.Enabled = remoteConfig.Features.IcebergCatalog.Enabled
s.AnalyticsBuckets.MaxNamespaces = cast.IntToUint(remoteConfig.Features.IcebergCatalog.MaxNamespaces)
s.AnalyticsBuckets.MaxTables = cast.IntToUint(remoteConfig.Features.IcebergCatalog.MaxTables)
s.AnalyticsBuckets.MaxCatalogs = cast.IntToUint(remoteConfig.Features.IcebergCatalog.MaxCatalogs)
}
if s.VectorBuckets.Enabled {
s.VectorBuckets.Enabled = remoteConfig.Features.VectorBuckets.Enabled
s.VectorBuckets.MaxBuckets = cast.IntToUint(remoteConfig.Features.VectorBuckets.MaxBuckets)
s.VectorBuckets.MaxIndexes = cast.IntToUint(remoteConfig.Features.VectorBuckets.MaxIndexes)
}
if s.S3Protocol != nil {
s.S3Protocol.Enabled = remoteConfig.Features.S3Protocol.Enabled
}
Expand Down
36 changes: 29 additions & 7 deletions pkg/config/templates/config.toml
Original file line number Diff line number Diff line change
Expand Up @@ -105,20 +105,42 @@ enabled = true
# The maximum file size allowed (e.g. "5MB", "500KB").
file_size_limit = "50MiB"

# Image transformation API is available to Supabase Pro plan.
# [storage.image_transformation]
# enabled = true

# [storage.s3_protocol]
# enabled = false

# Uncomment to configure local storage buckets
# [storage.buckets.images]
# public = false
# file_size_limit = "50MiB"
# allowed_mime_types = ["image/png", "image/jpeg"]
# objects_path = "./images"

# Uncomment to allow connections via S3 compatible clients
# [storage.s3_protocol]
# enabled = true

# Image transformation API is available to Supabase Pro plan.
# [storage.image_transformation]
# enabled = true

# Store analytical data in S3 for running ETL jobs over Iceberg Catalog
# This feature is only available on the hosted platform.
[storage.analytics]
enabled = false
max_namespaces = 5
max_tables = 10
max_catalogs = 2

# Analytics Buckets is available to Supabase Pro plan.
# [storage.analytics.buckets.my-warehouse]

# Store vector embeddings in S3 for large and durable datasets
# This feature is only available on the hosted platform.
[storage.vector]
enabled = false
max_buckets = 10
max_indexes = 5

# Vector Buckets is available to Supabase Pro plan.
# [storage.vector.buckets.documents-openai]

[auth]
enabled = true
# The base URL of your website. Used as an allow-list for redirects and for constructing URLs used
Expand Down
34 changes: 27 additions & 7 deletions pkg/config/testdata/config.toml
Original file line number Diff line number Diff line change
Expand Up @@ -105,20 +105,40 @@ enabled = true
# The maximum file size allowed (e.g. "5MB", "500KB").
file_size_limit = "50MiB"

# Image transformation API is available to Supabase Pro plan.
[storage.image_transformation]
enabled = true

[storage.s3_protocol]
enabled = true

# Uncomment to configure local storage buckets
[storage.buckets.images]
public = false
file_size_limit = "50MiB"
allowed_mime_types = ["image/png", "image/jpeg"]
objects_path = "./images"

# Uncomment to allow connections via S3 compatible clients
[storage.s3_protocol]
enabled = true

# Image transformation API is available to Supabase Pro plan.
[storage.image_transformation]
enabled = true

# Store analytical data in S3 for running ETL jobs over Iceberg Catalog
[storage.analytics]
enabled = true
max_namespaces = 5
max_tables = 10
max_catalogs = 2

# Analytics Buckets is available to Supabase Pro plan.
[storage.analytics.buckets.my-warehouse]

# Store vector embeddings in S3 for large and durable datasets
[storage.vector]
enabled = true
max_buckets = 10
max_indexes = 5

# Vector Buckets is available to Supabase Pro plan.
# [storage.vector.buckets.documents-openai]

[auth]
enabled = true
# The base URL of your website. Used as an allow-list for redirects and for constructing URLs used
Expand Down
68 changes: 68 additions & 0 deletions pkg/storage/analytics.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
package storage

import (
"context"
"fmt"
"net/http"
"os"

"github.com/supabase/cli/pkg/fetcher"
)

type AnalyticsBucketResponse struct {
Id string `json:"id"` // "test"
Name string `json:"name"` // "test"
CreatedAt string `json:"created_at"` // "2023-10-13T17:48:58.491Z"
UpdatedAt string `json:"updated_at"` // "2023-10-13T17:48:58.491Z"
}

type CreateAnalyticsBucketRequest struct {
BucketName string `json:"bucketName"`
}

func (s *StorageAPI) UpsertAnalyticsBuckets(ctx context.Context, bucketConfig map[string]struct{}, filter ...func(string) bool) error {
resp, err := s.Send(ctx, http.MethodGet, "/storage/v1/iceberg/bucket", nil)
if err != nil {
return err
}
buckets, err := fetcher.ParseJSON[[]AnalyticsBucketResponse](resp.Body)
if err != nil {
return err
}
var toDelete []string
exists := make(map[string]struct{}, len(buckets))
for _, b := range buckets {
exists[b.Name] = struct{}{}
if _, ok := bucketConfig[b.Name]; !ok {
toDelete = append(toDelete, b.Name)
}
}
for name := range bucketConfig {
if _, ok := exists[name]; ok {
fmt.Fprintln(os.Stderr, "Bucket already exists:", name)
continue
}
fmt.Fprintln(os.Stderr, "Creating analytics bucket:", name)
body := CreateAnalyticsBucketRequest{BucketName: name}
if resp, err := s.Send(ctx, http.MethodPost, "/storage/v1/iceberg/bucket", body); err != nil {
return err
} else if err := resp.Body.Close(); err != nil {
fmt.Fprintln(os.Stderr, err)
}
}
OUTER:
for _, name := range toDelete {
for _, keep := range filter {
if !keep(name) {
continue OUTER
}
}
fmt.Fprintln(os.Stderr, "Pruning analytics bucket:", name)
if resp, err := s.Send(ctx, http.MethodDelete, "/storage/v1/iceberg/bucket/"+name, nil); err != nil {
return err
} else if err := resp.Body.Close(); err != nil {
fmt.Fprintln(os.Stderr, err)
}
}
return nil
}
Loading