Skip to content

Commit

Permalink
fix: squash auth and storage schema changes (#2027)
Browse files Browse the repository at this point in the history
* fix: squash auth and storage schema changes

* chore: update unit tests
  • Loading branch information
sweatybridge committed Mar 7, 2024
1 parent eb08493 commit c444a62
Show file tree
Hide file tree
Showing 6 changed files with 345 additions and 14 deletions.
8 changes: 2 additions & 6 deletions internal/db/diff/diff.go
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ func CreateShadowDatabase(ctx context.Context) (string, error) {
return utils.DockerStart(ctx, config, hostConfig, networkingConfig, "")
}

func connectShadowDatabase(ctx context.Context, timeout time.Duration, options ...func(*pgx.ConnConfig)) (conn *pgx.Conn, err error) {
func ConnectShadowDatabase(ctx context.Context, timeout time.Duration, options ...func(*pgx.ConnConfig)) (conn *pgx.Conn, err error) {
// Retry until connected, cancelled, or timeout
policy := backoff.WithMaxRetries(backoff.NewConstantBackOff(time.Second), uint64(timeout.Seconds()))
config := pgconn.Config{Port: uint16(utils.Config.Db.ShadowPort)}
Expand All @@ -133,11 +133,7 @@ func MigrateShadowDatabase(ctx context.Context, container string, fsys afero.Fs,
if err != nil {
return err
}
return MigrateShadowDatabaseVersions(ctx, container, migrations, fsys, options...)
}

func MigrateShadowDatabaseVersions(ctx context.Context, container string, migrations []string, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error {
conn, err := connectShadowDatabase(ctx, 10*time.Second, options...)
conn, err := ConnectShadowDatabase(ctx, 10*time.Second, options...)
if err != nil {
return err
}
Expand Down
69 changes: 61 additions & 8 deletions internal/migration/squash/squash.go
Original file line number Diff line number Diff line change
@@ -1,18 +1,24 @@
package squash

import (
"bufio"
"bytes"
"context"
"fmt"
"io"
"os"
"path/filepath"
"strconv"
"time"

"github.com/go-errors/errors"
"github.com/jackc/pgconn"
"github.com/jackc/pgx/v4"
"github.com/spf13/afero"
"github.com/supabase/cli/internal/db/diff"
"github.com/supabase/cli/internal/db/dump"
"github.com/supabase/cli/internal/db/start"
"github.com/supabase/cli/internal/migration/apply"
"github.com/supabase/cli/internal/migration/history"
"github.com/supabase/cli/internal/migration/list"
"github.com/supabase/cli/internal/migration/repair"
Expand Down Expand Up @@ -79,8 +85,32 @@ func squashMigrations(ctx context.Context, migrations []string, fsys afero.Fs, o
return err
}
defer utils.DockerRemove(shadow)
conn, err := diff.ConnectShadowDatabase(ctx, 10*time.Second, options...)
if err != nil {
return err
}
defer conn.Close(context.Background())
if err := start.SetupDatabase(ctx, conn, shadow[:12], os.Stderr, fsys); err != nil {
return err
}
// Assuming entities in managed schemas are not altered, we can simply diff the dumps before and after migrations.
schemas := []string{"auth", "storage"}
config := pgconn.Config{
Host: utils.Config.Hostname,
Port: uint16(utils.Config.Db.ShadowPort),
User: "postgres",
Password: utils.Config.Db.Password,
Database: "postgres",
}
var before, after bytes.Buffer
if err := dump.DumpSchema(ctx, config, schemas, false, false, &before); err != nil {
return err
}
// 2. Migrate to target version
if err := diff.MigrateShadowDatabaseVersions(ctx, shadow, migrations, fsys, options...); err != nil {
if err := apply.MigrateUp(ctx, conn, migrations, fsys); err != nil {
return err
}
if err := dump.DumpSchema(ctx, config, schemas, false, false, &after); err != nil {
return err
}
// 3. Dump migrated schema
Expand All @@ -90,14 +120,37 @@ func squashMigrations(ctx context.Context, migrations []string, fsys afero.Fs, o
return errors.Errorf("failed to open migration file: %w", err)
}
defer f.Close()
config := pgconn.Config{
Host: utils.Config.Hostname,
Port: uint16(utils.Config.Db.ShadowPort),
User: "postgres",
Password: utils.Config.Db.Password,
Database: "postgres",
if err := dump.DumpSchema(ctx, config, nil, false, false, f); err != nil {
return err
}
return dump.DumpSchema(ctx, config, nil, false, false, f)
// 4. Append managed schema diffs
fmt.Fprint(f, separatorComment)
return lineByLineDiff(&before, &after, f)
}

const separatorComment = `
--
-- Dumped schema changes for auth and storage
--
`

func lineByLineDiff(before, after io.Reader, f io.Writer) error {
anchor := bufio.NewScanner(before)
anchor.Scan()
// Assuming before is always a subset of after
scanner := bufio.NewScanner(after)
for scanner.Scan() {
line := scanner.Text()
if line == anchor.Text() {
anchor.Scan()
continue
}
if _, err := fmt.Fprintln(f, line); err != nil {
return errors.Errorf("failed to write line: %w", err)
}
}
return nil
}

func baselineMigrations(ctx context.Context, config pgconn.Config, version string, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error {
Expand Down
64 changes: 64 additions & 0 deletions internal/migration/squash/squash_test.go
Original file line number Diff line number Diff line change
@@ -1,12 +1,15 @@
package squash

import (
"bytes"
"context"
"embed"
"errors"
"fmt"
"net/http"
"os"
"path/filepath"
"strings"
"testing"

"github.com/jackc/pgconn"
Expand Down Expand Up @@ -57,6 +60,10 @@ func TestSquashCommand(t *testing.T) {
require.NoError(t, apitest.MockDockerLogs(utils.Docker, "test-auth", ""))
apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Pg15Image), "test-db")
require.NoError(t, apitest.MockDockerLogs(utils.Docker, "test-db", sql))
apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Pg15Image), "test-db")
require.NoError(t, apitest.MockDockerLogs(utils.Docker, "test-db", sql))
apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Pg15Image), "test-db")
require.NoError(t, apitest.MockDockerLogs(utils.Docker, "test-db", sql))
// Setup mock postgres
conn := pgtest.NewConn()
defer conn.Close(t)
Expand Down Expand Up @@ -227,6 +234,10 @@ func TestSquashMigrations(t *testing.T) {
require.NoError(t, apitest.MockDockerLogs(utils.Docker, "test-storage", ""))
apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.GotrueImage), "test-auth")
require.NoError(t, apitest.MockDockerLogs(utils.Docker, "test-auth", ""))
apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Pg15Image), "test-db")
require.NoError(t, apitest.MockDockerLogs(utils.Docker, "test-db", sql))
apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Pg15Image), "test-db")
require.NoError(t, apitest.MockDockerLogs(utils.Docker, "test-db", sql))
// Setup mock postgres
conn := pgtest.NewConn()
defer conn.Close(t)
Expand Down Expand Up @@ -309,3 +320,56 @@ func TestBaselineMigration(t *testing.T) {
assert.ErrorIs(t, err, os.ErrNotExist)
})
}

//go:embed testdata/*.sql
var testdata embed.FS

func TestLineByLine(t *testing.T) {
t.Run("diffs output from pg_dump", func(t *testing.T) {
before, err := testdata.Open("testdata/before.sql")
require.NoError(t, err)
after, err := testdata.Open("testdata/after.sql")
require.NoError(t, err)
expected, err := testdata.ReadFile("testdata/diff.sql")
require.NoError(t, err)
// Run test
var out bytes.Buffer
err = lineByLineDiff(before, after, &out)
// Check error
assert.NoError(t, err)
assert.Equal(t, expected, out.Bytes())
})

t.Run("diffs shorter before", func(t *testing.T) {
before := strings.NewReader("select 1;")
after := strings.NewReader("select 0;\nselect 1;\nselect 2;")
// Run test
var out bytes.Buffer
err := lineByLineDiff(before, after, &out)
// Check error
assert.NoError(t, err)
assert.Equal(t, "select 0;\nselect 2;\n", out.String())
})

t.Run("diffs shorter after", func(t *testing.T) {
before := strings.NewReader("select 1;\nselect 2;")
after := strings.NewReader("select 1;")
// Run test
var out bytes.Buffer
err := lineByLineDiff(before, after, &out)
// Check error
assert.NoError(t, err)
assert.Equal(t, "", out.String())
})

t.Run("diffs no match", func(t *testing.T) {
before := strings.NewReader("select 0;\nselect 1;")
after := strings.NewReader("select 1;")
// Run test
var out bytes.Buffer
err := lineByLineDiff(before, after, &out)
// Check error
assert.NoError(t, err)
assert.Equal(t, "select 1;\n", out.String())
})
}
109 changes: 109 additions & 0 deletions internal/migration/squash/testdata/after.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,109 @@

SET statement_timeout = 0;
SET lock_timeout = 0;
SET idle_in_transaction_session_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = on;
SELECT pg_catalog.set_config('search_path', '', false);
SET check_function_bodies = false;
SET xmloption = content;
SET client_min_messages = warning;
SET row_security = off;

CREATE SCHEMA IF NOT EXISTS "storage";

ALTER SCHEMA "storage" OWNER TO "supabase_admin";

CREATE OR REPLACE FUNCTION "storage"."can_insert_object"("bucketid" "text", "name" "text", "owner" "uuid", "metadata" "jsonb") RETURNS "void"
LANGUAGE "plpgsql"
AS $$
BEGIN
INSERT INTO "storage"."objects" ("bucket_id", "name", "owner", "metadata") VALUES (bucketid, name, owner, metadata);
-- hack to rollback the successful insert
RAISE sqlstate 'PT200' using
message = 'ROLLBACK',
detail = 'rollback successful insert';
END
$$;

ALTER FUNCTION "storage"."can_insert_object"("bucketid" "text", "name" "text", "owner" "uuid", "metadata" "jsonb") OWNER TO "supabase_storage_admin";

CREATE TABLE IF NOT EXISTS "storage"."objects" (
"id" "uuid" DEFAULT "gen_random_uuid"() NOT NULL,
"bucket_id" "text",
"name" "text",
"owner" "uuid",
"created_at" timestamp with time zone DEFAULT "now"(),
"updated_at" timestamp with time zone DEFAULT "now"(),
"last_accessed_at" timestamp with time zone DEFAULT "now"(),
"metadata" "jsonb",
"path_tokens" "text"[] GENERATED ALWAYS AS ("string_to_array"("name", '/'::"text")) STORED,
"version" "text",
"owner_id" "text"
);

ALTER TABLE "storage"."objects" OWNER TO "supabase_storage_admin";

COMMENT ON COLUMN "storage"."objects"."owner" IS 'Field is deprecated, use owner_id instead';

ALTER TABLE ONLY "storage"."buckets"
ADD CONSTRAINT "buckets_pkey" PRIMARY KEY ("id");

ALTER TABLE ONLY "storage"."migrations"
ADD CONSTRAINT "migrations_name_key" UNIQUE ("name");

ALTER TABLE ONLY "storage"."migrations"
ADD CONSTRAINT "migrations_pkey" PRIMARY KEY ("id");

ALTER TABLE ONLY "storage"."objects"
ADD CONSTRAINT "objects_pkey" PRIMARY KEY ("id");

CREATE UNIQUE INDEX "bname" ON "storage"."buckets" USING "btree" ("name");

CREATE UNIQUE INDEX "bucketid_objname" ON "storage"."objects" USING "btree" ("bucket_id", "name");

CREATE INDEX "name_prefix_search" ON "storage"."objects" USING "btree" ("name" "text_pattern_ops");

CREATE OR REPLACE TRIGGER "delete_images" AFTER DELETE ON "storage"."objects" FOR EACH ROW EXECUTE FUNCTION "public"."check_can_upload"();

CREATE OR REPLACE TRIGGER "insert_images" AFTER INSERT ON "storage"."objects" FOR EACH ROW EXECUTE FUNCTION "public"."check_can_upload"();

CREATE OR REPLACE TRIGGER "update_objects_updated_at" BEFORE UPDATE ON "storage"."objects" FOR EACH ROW EXECUTE FUNCTION "storage"."update_updated_at_column"();

ALTER TABLE ONLY "storage"."objects"
ADD CONSTRAINT "objects_bucketId_fkey" FOREIGN KEY ("bucket_id") REFERENCES "storage"."buckets"("id");

ALTER TABLE ONLY "storage"."objects"
ADD CONSTRAINT "objects_owner_fkey" FOREIGN KEY ("owner") REFERENCES "auth"."users"("id");

CREATE POLICY "Anyone can read owner" ON "storage"."objects" FOR SELECT USING ((("bucket_id" = 'public-images'::"text") AND ("owner" IS NULL)));

CREATE POLICY "Authenticated users can delete images" ON "storage"."objects" FOR DELETE TO "authenticated" USING ((("bucket_id" = 'public-images'::"text") AND ("auth"."uid"() = "owner")));

CREATE POLICY "Authenticated users can insert images" ON "storage"."objects" FOR INSERT TO "authenticated" WITH CHECK ((("bucket_id" = 'public-images'::"text") AND ("auth"."uid"() = "owner")));

CREATE POLICY "Authenticated users can read images" ON "storage"."objects" FOR SELECT TO "authenticated" USING ((("bucket_id" = 'public-images'::"text") AND ("auth"."uid"() = "owner")));

CREATE POLICY "Authenticated users can update images" ON "storage"."objects" FOR UPDATE TO "authenticated" USING (("bucket_id" = 'public-images'::"text")) WITH CHECK (("auth"."uid"() = "owner"));

ALTER TABLE "storage"."buckets" ENABLE ROW LEVEL SECURITY;

ALTER TABLE "storage"."migrations" ENABLE ROW LEVEL SECURITY;

ALTER TABLE "storage"."objects" ENABLE ROW LEVEL SECURITY;

CREATE POLICY "objects_auth_select" ON "storage"."objects" FOR SELECT TO "authenticated" USING (("owner" = "auth"."uid"()));

GRANT ALL ON SCHEMA "storage" TO "postgres";
GRANT USAGE ON SCHEMA "storage" TO "anon";
GRANT USAGE ON SCHEMA "storage" TO "authenticated";
GRANT USAGE ON SCHEMA "storage" TO "service_role";
GRANT ALL ON SCHEMA "storage" TO "supabase_storage_admin";
GRANT ALL ON SCHEMA "storage" TO "dashboard_user";

ALTER DEFAULT PRIVILEGES FOR ROLE "postgres" IN SCHEMA "storage" GRANT ALL ON SEQUENCES TO "postgres";
ALTER DEFAULT PRIVILEGES FOR ROLE "postgres" IN SCHEMA "storage" GRANT ALL ON SEQUENCES TO "anon";
ALTER DEFAULT PRIVILEGES FOR ROLE "postgres" IN SCHEMA "storage" GRANT ALL ON SEQUENCES TO "authenticated";
ALTER DEFAULT PRIVILEGES FOR ROLE "postgres" IN SCHEMA "storage" GRANT ALL ON SEQUENCES TO "service_role";

RESET ALL;
Loading

0 comments on commit c444a62

Please sign in to comment.