Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 29 additions & 0 deletions cmd/apple_log_forwarder.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
package cmd

import (
"github.com/spf13/cobra"
"github.com/supabase/cli/internal/utils"
)

var (
appleLogForwarderContainer string
appleLogForwarderOutput string

appleLogForwarderCmd = &cobra.Command{
Use: "apple-log-forwarder",
Short: "Internal Apple analytics log forwarder",
Hidden: true,
RunE: func(cmd *cobra.Command, args []string) error {
return utils.RunAppleAnalyticsLogForwarder(cmd.Context(), appleLogForwarderContainer, appleLogForwarderOutput)
},
}
)

func init() {
flags := appleLogForwarderCmd.Flags()
flags.StringVar(&appleLogForwarderContainer, "container", "", "container id to follow")
flags.StringVar(&appleLogForwarderOutput, "output", "", "output path for JSONL logs")
cobra.CheckErr(appleLogForwarderCmd.MarkFlagRequired("container"))
cobra.CheckErr(appleLogForwarderCmd.MarkFlagRequired("output"))
rootCmd.AddCommand(appleLogForwarderCmd)
}
6 changes: 4 additions & 2 deletions cmd/root.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ import (
"os"
"os/signal"
"strings"
"syscall"
"time"

"github.com/getsentry/sentry-go"
Expand Down Expand Up @@ -94,7 +95,7 @@ var (
}
cmd.SilenceUsage = true
// Load profile before changing workdir
ctx, _ := signal.NotifyContext(cmd.Context(), os.Interrupt)
ctx, _ := signal.NotifyContext(cmd.Context(), os.Interrupt, syscall.SIGTERM)
fsys := afero.NewOsFs()
if err := utils.LoadProfile(ctx, fsys); err != nil {
return err
Expand Down Expand Up @@ -203,7 +204,7 @@ func recoverAndExit() {
!viper.GetBool("DEBUG") {
utils.CmdSuggestion = utils.SuggestDebugFlag
}
if e, ok := err.(*errors.Error); ok && len(utils.Version) == 0 {
if e, ok := err.(*errors.Error); ok && viper.GetBool("DEBUG") {
fmt.Fprintln(os.Stderr, string(e.Stack()))
}
msg = err.Error()
Expand Down Expand Up @@ -240,6 +241,7 @@ func init() {
flags.String("workdir", "", "path to a Supabase project directory")
flags.Bool("experimental", false, "enable experimental features")
flags.String("network-id", "", "use the specified docker network instead of a generated one")
flags.String("runtime", "", "container runtime for local development (docker|apple-container)")
flags.String("profile", "supabase", "use a specific profile for connecting to Supabase API")
flags.VarP(&utils.OutputFormat, "output", "o", "output format of status variables")
flags.Var(&utils.DNSResolver, "dns-resolver", "lookup domain names using the specified resolver")
Expand Down
2 changes: 2 additions & 0 deletions docs/supabase/start.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@ Starts the Supabase local development stack.

Requires `supabase/config.toml` to be created in your current working directory by running `supabase init`.

Use `--runtime` to override the local container runtime for the current command. To make it persistent for the project, set `[local].runtime` in `supabase/config.toml`.

All service containers are started by default. You can exclude those not needed by passing in `-x` flag. To exclude multiple containers, either pass in a comma separated string, such as `-x gotrue,imgproxy`, or specify `-x` flag multiple times.

> It is recommended to have at least 7GB of RAM to start all services.
Expand Down
2 changes: 2 additions & 0 deletions docs/supabase/status.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,4 +4,6 @@ Shows status of the Supabase local development stack.

Requires the local development stack to be started by running `supabase start` or `supabase db start`.

The pretty output includes a runtime summary with the selected local runtime, project id, and tracked containers, networks, and volumes.

You can export the connection parameters for [initializing supabase-js](https://supabase.com/docs/reference/javascript/initializing) locally by specifying the `-o env` flag. Supported parameters include `JWT_SECRET`, `ANON_KEY`, and `SERVICE_ROLE_KEY`.
4 changes: 2 additions & 2 deletions docs/supabase/stop.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,6 @@ Stops the Supabase local development stack.

Requires `supabase/config.toml` to be created in your current working directory by running `supabase init`.

All Docker resources are maintained across restarts. Use `--no-backup` flag to reset your local development data between restarts.
Local container resources are maintained across restarts for both the `docker` and `apple-container` runtimes. Use `--no-backup` flag to reset your local development data between restarts.

Use the `--all` flag to stop all local Supabase projects instances on the machine. Use with caution with `--no-backup` as it will delete all supabase local projects data.
Use the `--all` flag to stop all local Supabase projects instances on the machine. Use with caution with `--no-backup` as it will delete all supabase local projects data.
119 changes: 98 additions & 21 deletions internal/db/reset/reset.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,31 +5,47 @@ import (
_ "embed"
"fmt"
"io"
"net"
"os"
"strconv"
"strings"
"time"

"github.com/cenkalti/backoff/v4"
"github.com/containerd/errdefs"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/network"
"github.com/go-errors/errors"
"github.com/jackc/pgconn"
"github.com/jackc/pgerrcode"
"github.com/jackc/pgx/v4"
"github.com/spf13/afero"
"github.com/supabase/cli/internal/db/start"
dbstart "github.com/supabase/cli/internal/db/start"
"github.com/supabase/cli/internal/migration/apply"
"github.com/supabase/cli/internal/migration/down"
"github.com/supabase/cli/internal/migration/list"
"github.com/supabase/cli/internal/migration/repair"
"github.com/supabase/cli/internal/seed/buckets"
stackstart "github.com/supabase/cli/internal/start"
"github.com/supabase/cli/internal/utils"
"github.com/supabase/cli/pkg/migration"
)

var (
assertSupabaseDbIsRunning = utils.AssertSupabaseDbIsRunning
removeContainer = utils.RemoveContainer
removeVolume = utils.RemoveVolume
startContainer = utils.DockerStart
inspectContainer = utils.InspectContainer
restartContainer = utils.RestartContainer
waitForHealthyService = dbstart.WaitForHealthyService
waitForLocalDatabase = waitForDatabaseReady
waitForLocalAPI = waitForAPIReady
setupLocalDatabase = dbstart.SetupLocalDatabase
restartKong = stackstart.RestartKong
runBucketSeed = buckets.Run
seedBuckets = seedBucketsWithRetry
)

func Run(ctx context.Context, version string, last uint, config pgconn.Config, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error {
if len(version) > 0 {
if _, err := strconv.Atoi(version); err != nil {
Expand All @@ -54,21 +70,38 @@ func Run(ctx context.Context, version string, last uint, config pgconn.Config, f
return resetRemote(ctx, version, config, fsys, options...)
}
// Config file is loaded before parsing --linked or --local flags
if err := utils.AssertSupabaseDbIsRunning(); err != nil {
if err := assertSupabaseDbIsRunning(); err != nil {
return err
}
// Reset postgres database because extensions (pg_cron, pg_net) require postgres
if err := resetDatabase(ctx, version, fsys, options...); err != nil {
return err
}
// Seed objects from supabase/buckets directory
if resp, err := utils.Docker.ContainerInspect(ctx, utils.StorageId); err == nil {
if resp.State.Health == nil || resp.State.Health.Status != types.Healthy {
if err := start.WaitForHealthyService(ctx, 30*time.Second, utils.StorageId); err != nil {
if _, err := inspectContainer(ctx, utils.StorageId); err == nil {
if shouldRefreshAPIAfterReset() {
// Kong caches upstream addresses; recreate it after the db container gets a new IP.
if err := restartKong(ctx, stackstart.KongDependencies{
Gotrue: utils.Config.Auth.Enabled,
Rest: utils.Config.Api.Enabled,
Realtime: utils.Config.Realtime.Enabled,
Storage: utils.Config.Storage.Enabled,
Studio: utils.Config.Studio.Enabled,
Pgmeta: utils.Config.Studio.Enabled,
Edge: true,
Logflare: utils.Config.Analytics.Enabled,
Pooler: utils.Config.Db.Pooler.Enabled,
}); err != nil {
return err
}
if err := waitForLocalAPI(ctx, 30*time.Second); err != nil {
return err
}
}
if err := buckets.Run(ctx, "", false, fsys); err != nil {
if err := waitForHealthyService(ctx, 30*time.Second, utils.StorageId); err != nil {
return err
}
if err := seedBuckets(ctx, fsys); err != nil {
return err
}
}
Expand All @@ -77,6 +110,13 @@ func Run(ctx context.Context, version string, last uint, config pgconn.Config, f
return nil
}

// shouldRefreshAPIAfterReset returns true when Kong must be recreated after a
// database reset. Apple containers assign dynamic IPs, so Kong's cached
// upstream addresses become stale when the database container is replaced.
func shouldRefreshAPIAfterReset() bool {
return utils.UsesAppleContainerRuntime() && utils.Config.Api.Enabled
}

func resetDatabase(ctx context.Context, version string, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error {
fmt.Fprintln(os.Stderr, "Resetting local database"+toLogMessage(version))
if utils.Config.Db.MajorVersion <= 14 {
Expand Down Expand Up @@ -111,14 +151,14 @@ func resetDatabase14(ctx context.Context, version string, fsys afero.Fs, options
}

func resetDatabase15(ctx context.Context, version string, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error {
if err := utils.Docker.ContainerRemove(ctx, utils.DbId, container.RemoveOptions{Force: true}); err != nil {
if err := removeContainer(ctx, utils.DbId, true, true); err != nil {
return errors.Errorf("failed to remove container: %w", err)
}
if err := utils.Docker.VolumeRemove(ctx, utils.DbId, true); err != nil {
if err := removeVolume(ctx, utils.DbId, true); err != nil {
return errors.Errorf("failed to remove volume: %w", err)
}
config := start.NewContainerConfig()
hostConfig := start.NewHostConfig()
config := dbstart.NewContainerConfig()
hostConfig := dbstart.NewHostConfig()
networkingConfig := network.NetworkingConfig{
EndpointsConfig: map[string]*network.EndpointSettings{
utils.NetId: {
Expand All @@ -127,13 +167,16 @@ func resetDatabase15(ctx context.Context, version string, fsys afero.Fs, options
},
}
fmt.Fprintln(os.Stderr, "Recreating database...")
if _, err := utils.DockerStart(ctx, config, hostConfig, networkingConfig, utils.DbId); err != nil {
if _, err := startContainer(ctx, config, hostConfig, networkingConfig, utils.DbId); err != nil {
return err
}
if err := start.WaitForHealthyService(ctx, utils.Config.Db.HealthTimeout, utils.DbId); err != nil {
if err := waitForHealthyService(ctx, utils.Config.Db.HealthTimeout, utils.DbId); err != nil {
return err
}
if err := start.SetupLocalDatabase(ctx, version, fsys, os.Stderr, options...); err != nil {
if err := waitForLocalDatabase(ctx, utils.Config.Db.HealthTimeout, options...); err != nil {
return err
}
if err := setupLocalDatabase(ctx, version, fsys, os.Stderr, options...); err != nil {
return err
}
fmt.Fprintln(os.Stderr, "Restarting containers...")
Expand All @@ -146,7 +189,7 @@ func initDatabase(ctx context.Context, options ...func(*pgx.ConnConfig)) error {
return err
}
defer conn.Close(context.Background())
return start.InitSchema14(ctx, conn)
return dbstart.InitSchema14(ctx, conn)
}

// Recreate postgres database by connecting to template1
Expand Down Expand Up @@ -193,7 +236,7 @@ func DisconnectClients(ctx context.Context, conn *pgx.Conn) error {
}
}
// Wait for WAL senders to drop their replication slots
policy := start.NewBackoffPolicy(ctx, 10*time.Second)
policy := dbstart.NewBackoffPolicy(ctx, 10*time.Second)
waitForDrop := func() error {
var count int
if err := conn.QueryRow(ctx, COUNT_REPLICATION_SLOTS).Scan(&count); err != nil {
Expand All @@ -211,20 +254,50 @@ func RestartDatabase(ctx context.Context, w io.Writer) error {
fmt.Fprintln(w, "Restarting containers...")
// Some extensions must be manually restarted after pg_terminate_backend
// Ref: https://github.com/citusdata/pg_cron/issues/99
if err := utils.Docker.ContainerRestart(ctx, utils.DbId, container.StopOptions{}); err != nil {
if err := restartContainer(ctx, utils.DbId); err != nil {
return errors.Errorf("failed to restart container: %w", err)
}
if err := start.WaitForHealthyService(ctx, utils.Config.Db.HealthTimeout, utils.DbId); err != nil {
if err := waitForHealthyService(ctx, utils.Config.Db.HealthTimeout, utils.DbId); err != nil {
return err
}
return restartServices(ctx)
}

func waitForDatabaseReady(ctx context.Context, timeout time.Duration, options ...func(*pgx.ConnConfig)) error {
policy := dbstart.NewBackoffPolicy(ctx, timeout)
return backoff.Retry(func() error {
conn, err := utils.ConnectLocalPostgres(ctx, pgconn.Config{}, options...)
if err != nil {
return err
}
return conn.Close(ctx)
}, policy)
}

func seedBucketsWithRetry(ctx context.Context, fsys afero.Fs) error {
policy := dbstart.NewBackoffPolicy(ctx, 30*time.Second)
return backoff.Retry(func() error {
return runBucketSeed(ctx, "", false, fsys)
}, policy)
}

func waitForAPIReady(ctx context.Context, timeout time.Duration) error {
addr := net.JoinHostPort(utils.Config.Hostname, strconv.FormatUint(uint64(utils.Config.Api.Port), 10))
policy := dbstart.NewBackoffPolicy(ctx, timeout)
return backoff.Retry(func() error {
conn, err := net.DialTimeout("tcp", addr, time.Second)
if err != nil {
return err
}
return conn.Close()
}, policy)
}

func restartServices(ctx context.Context) error {
// No need to restart PostgREST because it automatically reconnects and listens for schema changes
services := listServicesToRestart()
result := utils.WaitAll(services, func(id string) error {
if err := utils.Docker.ContainerRestart(ctx, id, container.StopOptions{}); err != nil && !errdefs.IsNotFound(err) {
if err := restartContainer(ctx, id); err != nil && !errdefs.IsNotFound(err) {
return errors.Errorf("failed to restart %s: %w", id, err)
}
return nil
Expand All @@ -233,8 +306,12 @@ func restartServices(ctx context.Context) error {
return errors.Join(result...)
}

// listServicesToRestart returns containers that need restarting after a
// database reset. Kong is included because it caches upstream addresses that
// may change when the database container is recreated (especially on Apple
// containers which use dynamic IPs).
func listServicesToRestart() []string {
return []string{utils.StorageId, utils.GotrueId, utils.RealtimeId, utils.PoolerId}
return []string{utils.StorageId, utils.GotrueId, utils.RealtimeId, utils.PoolerId, utils.KongId}
}

func resetRemote(ctx context.Context, version string, config pgconn.Config, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error {
Expand Down
Loading
Loading