From ee621c5990cf6c1791b384891f13810a616a34cc Mon Sep 17 00:00:00 2001 From: Chris Gwilliams <517923+encima@users.noreply.github.com> Date: Tue, 21 May 2024 15:39:58 +0300 Subject: [PATCH] feat: add `inspect report` subcommand and embed queries as SQL scripts (#2246) * initial work to split out queries into standalone scripts * add report command to export all command output to CSV files and embed SQL scripts * add progress printing to report command * add output flag to specify directory * Update internal/inspect/utils.go Co-authored-by: Han Qiao * use pgx for csv output and rename flag for output path * chore(deps): bump tar from 7.0.1 to 7.1.0 (#2240) Bumps [tar](https://github.com/isaacs/node-tar) from 7.0.1 to 7.1.0. - [Release notes](https://github.com/isaacs/node-tar/releases) - [Changelog](https://github.com/isaacs/node-tar/blob/main/CHANGELOG.md) - [Commits](https://github.com/isaacs/node-tar/compare/v7.0.1...v7.1.0) --- updated-dependencies: - dependency-name: tar dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * chore(deps): bump github.com/golangci/golangci-lint from 1.57.2 to 1.58.0 (#2241) chore(deps): bump github.com/golangci/golangci-lint Bumps [github.com/golangci/golangci-lint](https://github.com/golangci/golangci-lint) from 1.57.2 to 1.58.0. - [Release notes](https://github.com/golangci/golangci-lint/releases) - [Changelog](https://github.com/golangci/golangci-lint/blob/master/CHANGELOG.md) - [Commits](https://github.com/golangci/golangci-lint/compare/v1.57.2...v1.58.0) --- updated-dependencies: - dependency-name: github.com/golangci/golangci-lint dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * chore(deps): bump golang.org/x/term from 0.19.0 to 0.20.0 (#2243) Bumps [golang.org/x/term](https://github.com/golang/term) from 0.19.0 to 0.20.0. - [Commits](https://github.com/golang/term/compare/v0.19.0...v0.20.0) --- updated-dependencies: - dependency-name: golang.org/x/term dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * chore(deps): bump golang.org/x/oauth2 from 0.19.0 to 0.20.0 (#2242) Bumps [golang.org/x/oauth2](https://github.com/golang/oauth2) from 0.19.0 to 0.20.0. - [Commits](https://github.com/golang/oauth2/compare/v0.19.0...v0.20.0) --- updated-dependencies: - dependency-name: golang.org/x/oauth2 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * fix: bump edge-runtime to 1.47.0 * fix: bump studio version (#2245) * chore(deps): bump github.com/gin-gonic/gin from 1.9.1 to 1.10.0 (#2249) Bumps [github.com/gin-gonic/gin](https://github.com/gin-gonic/gin) from 1.9.1 to 1.10.0. - [Release notes](https://github.com/gin-gonic/gin/releases) - [Changelog](https://github.com/gin-gonic/gin/blob/master/CHANGELOG.md) - [Commits](https://github.com/gin-gonic/gin/compare/v1.9.1...v1.10.0) --- updated-dependencies: - dependency-name: github.com/gin-gonic/gin dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * fix: bump edge-runtime to 1.48.0 * chore(deps): bump golangci/golangci-lint-action from 5 to 6 (#2248) Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 5 to 6. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/v5...v6) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * chore: clean up db pull implementation (#2175) * chore: clean up db pull implementation * chore: update unit tests * feat: add hook secrets * Apply suggestions from code review * fix: add send sms hook as test * chore: typo in variable name * fix: index out of bound when applying migrations * chore(deps): bump github.com/golangci/golangci-lint from 1.58.0 to 1.58.1 (#2256) chore(deps): bump github.com/golangci/golangci-lint Bumps [github.com/golangci/golangci-lint](https://github.com/golangci/golangci-lint) from 1.58.0 to 1.58.1. - [Release notes](https://github.com/golangci/golangci-lint/releases) - [Changelog](https://github.com/golangci/golangci-lint/blob/master/CHANGELOG.md) - [Commits](https://github.com/golangci/golangci-lint/compare/v1.58.0...v1.58.1) --- updated-dependencies: - dependency-name: github.com/golangci/golangci-lint dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * chore(deps): bump github.com/charmbracelet/bubbletea from 0.26.1 to 0.26.2 (#2257) chore(deps): bump github.com/charmbracelet/bubbletea Bumps [github.com/charmbracelet/bubbletea](https://github.com/charmbracelet/bubbletea) from 0.26.1 to 0.26.2. - [Release notes](https://github.com/charmbracelet/bubbletea/releases) - [Commits](https://github.com/charmbracelet/bubbletea/compare/v0.26.1...v0.26.2) --- updated-dependencies: - dependency-name: github.com/charmbracelet/bubbletea dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * chore: support struct return type for pgmock * fix: add unit test for cache hit * fix: create migrations dir before saving diff (#2260) * chore(deps): bump github.com/docker/cli from 26.1.1+incompatible to 26.1.2+incompatible (#2261) chore(deps): bump github.com/docker/cli Bumps [github.com/docker/cli](https://github.com/docker/cli) from 26.1.1+incompatible to 26.1.2+incompatible. - [Commits](https://github.com/docker/cli/compare/v26.1.1...v26.1.2) --- updated-dependencies: - dependency-name: github.com/docker/cli dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * chore(deps): bump github.com/containers/common from 0.58.2 to 0.58.3 (#2262) Bumps [github.com/containers/common](https://github.com/containers/common) from 0.58.2 to 0.58.3. - [Release notes](https://github.com/containers/common/releases) - [Commits](https://github.com/containers/common/compare/v0.58.2...v0.58.3) --- updated-dependencies: - dependency-name: github.com/containers/common dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * chore(deps): bump github.com/docker/docker from 26.1.1+incompatible to 26.1.2+incompatible (#2263) chore(deps): bump github.com/docker/docker Bumps [github.com/docker/docker](https://github.com/docker/docker) from 26.1.1+incompatible to 26.1.2+incompatible. - [Release notes](https://github.com/docker/docker/releases) - [Commits](https://github.com/docker/docker/compare/v26.1.1...v26.1.2) --- updated-dependencies: - dependency-name: github.com/docker/docker dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * fix: bump edge-runtime to 1.49.0 * initial work to split out queries into standalone scripts * add test cases for inspect commands * chore: use pg schemas in unit tests * chore: test report command * chore: verify connection is closed * chore: embed queries within each command package * chore: move query reading to file walker --------- Signed-off-by: dependabot[bot] Co-authored-by: Han Qiao Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Lakshan Perera Co-authored-by: Terry Sutton Co-authored-by: joel Co-authored-by: Qiao Han Co-authored-by: Nyannyacha --- cmd/inspect.go | 29 +- internal/inspect/bloat/bloat.go | 8 +- internal/inspect/bloat/bloat.sql | 61 ++++ internal/inspect/bloat/bloat_test.go | 43 +++ internal/inspect/blocking/blocking.go | 9 +- internal/inspect/blocking/blocking.sql | 15 + internal/inspect/blocking/blocking_test.go | 42 +++ internal/inspect/cache/cache.go | 8 +- internal/inspect/cache/cache.sql | 9 + internal/inspect/cache/cache_test.go | 5 +- internal/inspect/calls/calls.go | 8 +- internal/inspect/calls/calls.sql | 9 + internal/inspect/calls/calls_test.go | 41 +++ internal/inspect/index_sizes/index_sizes.go | 8 +- internal/inspect/index_sizes/index_sizes.sql | 9 + .../inspect/index_sizes/index_sizes_test.go | 40 +++ internal/inspect/index_usage/index_usage.go | 8 +- internal/inspect/index_usage/index_usage.sql | 17 + .../inspect/index_usage/index_usage_test.go | 41 +++ internal/inspect/locks/locks.go | 8 +- internal/inspect/locks/locks.sql | 12 + internal/inspect/locks/locks_test.go | 42 +++ .../long_running_queries.go | 8 +- .../long_running_queries.sql | 12 + .../long_running_queries_test.go | 39 +++ internal/inspect/outliers/outliers.go | 8 +- internal/inspect/outliers/outliers.sql | 9 + internal/inspect/outliers/outliers_test.go | 41 +++ internal/inspect/queries.go | 305 ------------------ .../replication_slots/replication_slots.go | 8 +- .../replication_slots/replication_slots.sql | 11 + .../replication_slots_test.go | 41 +++ internal/inspect/report.go | 79 +++++ internal/inspect/report_test.go | 110 +++++++ .../role_connections/role_connections.go | 8 +- .../role_connections/role_connections.sql | 16 + .../role_connections/role_connections_test.go | 39 +++ internal/inspect/seq_scans/seq_scans.go | 8 +- internal/inspect/seq_scans/seq_scans.sql | 6 + internal/inspect/seq_scans/seq_scans_test.go | 40 +++ .../table_index_sizes/table_index_sizes.go | 8 +- .../table_index_sizes/table_index_sizes.sql | 8 + .../table_index_sizes_test.go | 40 +++ .../table_record_counts.go | 8 +- .../table_record_counts.sql | 7 + .../table_record_counts_test.go | 41 +++ internal/inspect/table_sizes/table_sizes.go | 8 +- internal/inspect/table_sizes/table_sizes.sql | 9 + .../inspect/table_sizes/table_sizes_test.go | 41 +++ .../total_index_size/total_index_size.go | 8 +- .../total_index_size/total_index_size.sql | 6 + .../total_index_size/total_index_size_test.go | 39 +++ .../total_table_sizes/total_table_sizes.go | 8 +- .../total_table_sizes/total_table_sizes.sql | 9 + .../total_table_sizes_test.go | 41 +++ .../inspect/unused_indexes/unused_indexes.go | 8 +- .../inspect/unused_indexes/unused_indexes.sql | 13 + .../unused_indexes/unused_indexes_test.go | 42 +++ internal/inspect/vacuum_stats/vacuum_stats.go | 8 +- .../inspect/vacuum_stats/vacuum_stats.sql | 45 +++ .../inspect/vacuum_stats/vacuum_stats_test.go | 46 +++ 61 files changed, 1354 insertions(+), 349 deletions(-) create mode 100644 internal/inspect/bloat/bloat.sql create mode 100644 internal/inspect/bloat/bloat_test.go create mode 100644 internal/inspect/blocking/blocking.sql create mode 100644 internal/inspect/blocking/blocking_test.go create mode 100644 internal/inspect/cache/cache.sql create mode 100644 internal/inspect/calls/calls.sql create mode 100644 internal/inspect/calls/calls_test.go create mode 100644 internal/inspect/index_sizes/index_sizes.sql create mode 100644 internal/inspect/index_sizes/index_sizes_test.go create mode 100644 internal/inspect/index_usage/index_usage.sql create mode 100644 internal/inspect/index_usage/index_usage_test.go create mode 100644 internal/inspect/locks/locks.sql create mode 100644 internal/inspect/locks/locks_test.go create mode 100644 internal/inspect/long_running_queries/long_running_queries.sql create mode 100644 internal/inspect/long_running_queries/long_running_queries_test.go create mode 100644 internal/inspect/outliers/outliers.sql create mode 100644 internal/inspect/outliers/outliers_test.go delete mode 100644 internal/inspect/queries.go create mode 100644 internal/inspect/replication_slots/replication_slots.sql create mode 100644 internal/inspect/replication_slots/replication_slots_test.go create mode 100644 internal/inspect/report.go create mode 100644 internal/inspect/report_test.go create mode 100644 internal/inspect/role_connections/role_connections.sql create mode 100644 internal/inspect/role_connections/role_connections_test.go create mode 100644 internal/inspect/seq_scans/seq_scans.sql create mode 100644 internal/inspect/seq_scans/seq_scans_test.go create mode 100644 internal/inspect/table_index_sizes/table_index_sizes.sql create mode 100644 internal/inspect/table_index_sizes/table_index_sizes_test.go create mode 100644 internal/inspect/table_record_counts/table_record_counts.sql create mode 100644 internal/inspect/table_record_counts/table_record_counts_test.go create mode 100644 internal/inspect/table_sizes/table_sizes.sql create mode 100644 internal/inspect/table_sizes/table_sizes_test.go create mode 100644 internal/inspect/total_index_size/total_index_size.sql create mode 100644 internal/inspect/total_index_size/total_index_size_test.go create mode 100644 internal/inspect/total_table_sizes/total_table_sizes.sql create mode 100644 internal/inspect/total_table_sizes/total_table_sizes_test.go create mode 100644 internal/inspect/unused_indexes/unused_indexes.sql create mode 100644 internal/inspect/unused_indexes/unused_indexes_test.go create mode 100644 internal/inspect/vacuum_stats/vacuum_stats.sql create mode 100644 internal/inspect/vacuum_stats/vacuum_stats_test.go diff --git a/cmd/inspect.go b/cmd/inspect.go index bbc42ecfa..526669428 100644 --- a/cmd/inspect.go +++ b/cmd/inspect.go @@ -1,16 +1,20 @@ package cmd import ( + "fmt" "os" "os/signal" + "path/filepath" "github.com/spf13/afero" "github.com/spf13/cobra" "github.com/supabase/cli/internal/inspect/bloat" "github.com/supabase/cli/internal/inspect/blocking" "github.com/supabase/cli/internal/inspect/cache" + "github.com/supabase/cli/internal/utils" "github.com/supabase/cli/internal/utils/flags" + "github.com/supabase/cli/internal/inspect" "github.com/supabase/cli/internal/inspect/calls" "github.com/supabase/cli/internal/inspect/index_sizes" "github.com/supabase/cli/internal/inspect/index_usage" @@ -197,15 +201,31 @@ var ( return role_connections.Run(cmd.Context(), flags.DbConfig, afero.NewOsFs()) }, } + + outputDir string + + reportCmd = &cobra.Command{ + Use: "report", + Short: "Generate a CSV output for all inspect commands", + RunE: func(cmd *cobra.Command, args []string) error { + if len(outputDir) == 0 { + defaultPath := filepath.Join(utils.CurrentDirAbs, "report") + title := fmt.Sprintf("Enter a directory to save output files (or leave blank to use %s): ", utils.Bold(defaultPath)) + if outputDir = utils.NewConsole().PromptText(title); len(outputDir) == 0 { + outputDir = defaultPath + } + } + return inspect.Report(cmd.Context(), outputDir, flags.DbConfig, afero.NewOsFs()) + }, + } ) func init() { - inspectFlags := inspectDBCmd.PersistentFlags() + inspectFlags := inspectCmd.PersistentFlags() inspectFlags.String("db-url", "", "Inspect the database specified by the connection string (must be percent-encoded).") inspectFlags.Bool("linked", true, "Inspect the linked project.") inspectFlags.Bool("local", false, "Inspect the local database.") - inspectDBCmd.MarkFlagsMutuallyExclusive("db-url", "linked", "local") - inspectCmd.AddCommand(inspectDBCmd) + inspectCmd.MarkFlagsMutuallyExclusive("db-url", "linked", "local") inspectDBCmd.AddCommand(inspectCacheHitCmd) inspectDBCmd.AddCommand(inspectReplicationSlotsCmd) inspectDBCmd.AddCommand(inspectIndexUsageCmd) @@ -225,5 +245,8 @@ func init() { inspectDBCmd.AddCommand(inspectBloatCmd) inspectDBCmd.AddCommand(inspectVacuumStatsCmd) inspectDBCmd.AddCommand(inspectRoleConnectionsCmd) + inspectCmd.AddCommand(inspectDBCmd) + reportCmd.Flags().StringVar(&outputDir, "output-dir", "", "Path to save CSV files in") + inspectCmd.AddCommand(reportCmd) rootCmd.AddCommand(inspectCmd) } diff --git a/internal/inspect/bloat/bloat.go b/internal/inspect/bloat/bloat.go index 6bb2c57c9..13d31dd06 100644 --- a/internal/inspect/bloat/bloat.go +++ b/internal/inspect/bloat/bloat.go @@ -2,6 +2,7 @@ package bloat import ( "context" + _ "embed" "fmt" "github.com/go-errors/errors" @@ -9,12 +10,14 @@ import ( "github.com/jackc/pgx/v4" "github.com/spf13/afero" "github.com/supabase/cli/internal/db/reset" - "github.com/supabase/cli/internal/inspect" "github.com/supabase/cli/internal/migration/list" "github.com/supabase/cli/internal/utils" "github.com/supabase/cli/internal/utils/pgxv5" ) +//go:embed bloat.sql +var BloatQuery string + type Result struct { Type string Schemaname string @@ -28,7 +31,8 @@ func Run(ctx context.Context, config pgconn.Config, fsys afero.Fs, options ...fu if err != nil { return err } - rows, err := conn.Query(ctx, inspect.BLOAT_QUERY, reset.LikeEscapeSchema(utils.InternalSchemas)) + defer conn.Close(context.Background()) + rows, err := conn.Query(ctx, BloatQuery, reset.LikeEscapeSchema(utils.InternalSchemas)) if err != nil { return errors.Errorf("failed to query rows: %w", err) } diff --git a/internal/inspect/bloat/bloat.sql b/internal/inspect/bloat/bloat.sql new file mode 100644 index 000000000..25917d874 --- /dev/null +++ b/internal/inspect/bloat/bloat.sql @@ -0,0 +1,61 @@ +WITH constants AS ( + SELECT current_setting('block_size')::numeric AS bs, 23 AS hdr, 4 AS ma +), bloat_info AS ( + SELECT + ma,bs,schemaname,tablename, + (datawidth+(hdr+ma-(case when hdr%ma=0 THEN ma ELSE hdr%ma END)))::numeric AS datahdr, + (maxfracsum*(nullhdr+ma-(case when nullhdr%ma=0 THEN ma ELSE nullhdr%ma END))) AS nullhdr2 + FROM ( + SELECT + schemaname, tablename, hdr, ma, bs, + SUM((1-null_frac)*avg_width) AS datawidth, + MAX(null_frac) AS maxfracsum, + hdr+( + SELECT 1+count(*)/8 + FROM pg_stats s2 + WHERE null_frac<>0 AND s2.schemaname = s.schemaname AND s2.tablename = s.tablename + ) AS nullhdr + FROM pg_stats s, constants + GROUP BY 1,2,3,4,5 + ) AS foo +), table_bloat AS ( + SELECT + schemaname, tablename, cc.relpages, bs, + CEIL((cc.reltuples*((datahdr+ma- + (CASE WHEN datahdr%ma=0 THEN ma ELSE datahdr%ma END))+nullhdr2+4))/(bs-20::float)) AS otta + FROM bloat_info + JOIN pg_class cc ON cc.relname = bloat_info.tablename + JOIN pg_namespace nn ON cc.relnamespace = nn.oid AND nn.nspname = bloat_info.schemaname AND nn.nspname <> 'information_schema' +), index_bloat AS ( + SELECT + schemaname, tablename, bs, + COALESCE(c2.relname,'?') AS iname, COALESCE(c2.reltuples,0) AS ituples, COALESCE(c2.relpages,0) AS ipages, + COALESCE(CEIL((c2.reltuples*(datahdr-12))/(bs-20::float)),0) AS iotta -- very rough approximation, assumes all cols + FROM bloat_info + JOIN pg_class cc ON cc.relname = bloat_info.tablename + JOIN pg_namespace nn ON cc.relnamespace = nn.oid AND nn.nspname = bloat_info.schemaname AND nn.nspname <> 'information_schema' + JOIN pg_index i ON indrelid = cc.oid + JOIN pg_class c2 ON c2.oid = i.indexrelid +) +SELECT + type, schemaname, object_name, bloat, pg_size_pretty(raw_waste) as waste +FROM +(SELECT + 'table' as type, + schemaname, + tablename as object_name, + ROUND(CASE WHEN otta=0 THEN 0.0 ELSE table_bloat.relpages/otta::numeric END,1) AS bloat, + CASE WHEN relpages < otta THEN '0' ELSE (bs*(table_bloat.relpages-otta)::bigint)::bigint END AS raw_waste +FROM + table_bloat + UNION +SELECT + 'index' as type, + schemaname, + tablename || '::' || iname as object_name, + ROUND(CASE WHEN iotta=0 OR ipages=0 THEN 0.0 ELSE ipages/iotta::numeric END,1) AS bloat, + CASE WHEN ipages < iotta THEN '0' ELSE (bs*(ipages-iotta))::bigint END AS raw_waste +FROM + index_bloat) bloat_summary +WHERE NOT schemaname LIKE ANY($1) +ORDER BY raw_waste DESC, bloat DESC diff --git a/internal/inspect/bloat/bloat_test.go b/internal/inspect/bloat/bloat_test.go new file mode 100644 index 000000000..5cbbfcbfe --- /dev/null +++ b/internal/inspect/bloat/bloat_test.go @@ -0,0 +1,43 @@ +package bloat + +import ( + "context" + "testing" + + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/internal/db/reset" + "github.com/supabase/cli/internal/testing/pgtest" + "github.com/supabase/cli/internal/utils" +) + +var dbConfig = pgconn.Config{ + Host: "127.0.0.1", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestBloat(t *testing.T) { + t.Run("inspects bloat", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(BloatQuery, reset.LikeEscapeSchema(utils.InternalSchemas)). + Reply("SELECT 1", Result{ + Type: "index hit rate", + Schemaname: "public", + Object_name: "table", + Bloat: "0.9", + Waste: "0.1", + }) + // Run test + err := Run(context.Background(), dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + }) +} diff --git a/internal/inspect/blocking/blocking.go b/internal/inspect/blocking/blocking.go index 2367cf124..77870f949 100644 --- a/internal/inspect/blocking/blocking.go +++ b/internal/inspect/blocking/blocking.go @@ -2,6 +2,7 @@ package blocking import ( "context" + _ "embed" "fmt" "regexp" @@ -9,12 +10,14 @@ import ( "github.com/jackc/pgconn" "github.com/jackc/pgx/v4" "github.com/spf13/afero" - "github.com/supabase/cli/internal/inspect" "github.com/supabase/cli/internal/migration/list" "github.com/supabase/cli/internal/utils" "github.com/supabase/cli/internal/utils/pgxv5" ) +//go:embed blocking.sql +var BlockingQuery string + type Result struct { Blocked_pid int Blocking_statement string @@ -29,7 +32,9 @@ func Run(ctx context.Context, config pgconn.Config, fsys afero.Fs, options ...fu if err != nil { return err } - rows, err := conn.Query(ctx, inspect.BLOCKING_QUERY) + defer conn.Close(context.Background()) + // Ref: https://github.com/heroku/heroku-pg-extras/blob/main/commands/blocking.js#L7 + rows, err := conn.Query(ctx, BlockingQuery) if err != nil { return errors.Errorf("failed to query rows: %w", err) } diff --git a/internal/inspect/blocking/blocking.sql b/internal/inspect/blocking/blocking.sql new file mode 100644 index 000000000..ea7f7671c --- /dev/null +++ b/internal/inspect/blocking/blocking.sql @@ -0,0 +1,15 @@ +SELECT + bl.pid AS blocked_pid, + ka.query AS blocking_statement, + age(now(), ka.query_start)::text AS blocking_duration, + kl.pid AS blocking_pid, + a.query AS blocked_statement, + age(now(), a.query_start)::text AS blocked_duration +FROM pg_catalog.pg_locks bl +JOIN pg_catalog.pg_stat_activity a + ON bl.pid = a.pid +JOIN pg_catalog.pg_locks kl +JOIN pg_catalog.pg_stat_activity ka + ON kl.pid = ka.pid + ON bl.transactionid = kl.transactionid AND bl.pid != kl.pid +WHERE NOT bl.granted diff --git a/internal/inspect/blocking/blocking_test.go b/internal/inspect/blocking/blocking_test.go new file mode 100644 index 000000000..d41f24066 --- /dev/null +++ b/internal/inspect/blocking/blocking_test.go @@ -0,0 +1,42 @@ +package blocking + +import ( + "context" + "testing" + + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/internal/testing/pgtest" +) + +var dbConfig = pgconn.Config{ + Host: "127.0.0.1", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestBloatCommand(t *testing.T) { + t.Run("inspects blocking", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(BlockingQuery). + Reply("SELECT 1", Result{ + Blocked_pid: 1, + Blocking_statement: "select 1", + Blocking_duration: "2s", + Blocking_pid: 1, + Blocked_statement: "select 1", + Blocked_duration: "2s", + }) + // Run test + err := Run(context.Background(), dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + }) +} diff --git a/internal/inspect/cache/cache.go b/internal/inspect/cache/cache.go index e689be658..4b775515f 100644 --- a/internal/inspect/cache/cache.go +++ b/internal/inspect/cache/cache.go @@ -2,30 +2,34 @@ package cache import ( "context" + _ "embed" "fmt" "github.com/go-errors/errors" "github.com/jackc/pgconn" "github.com/jackc/pgx/v4" "github.com/spf13/afero" - "github.com/supabase/cli/internal/inspect" "github.com/supabase/cli/internal/migration/list" "github.com/supabase/cli/internal/utils" "github.com/supabase/cli/internal/utils/pgxv5" ) +//go:embed cache.sql +var CacheQuery string + type Result struct { Name string Ratio float64 } func Run(ctx context.Context, config pgconn.Config, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { + // Ref: https://github.com/heroku/heroku-pg-extras/blob/main/commands/cache_hit.js#L7 conn, err := utils.ConnectByConfig(ctx, config, options...) if err != nil { return err } defer conn.Close(context.Background()) - rows, err := conn.Query(ctx, inspect.CACHE_QUERY) + rows, err := conn.Query(ctx, CacheQuery) if err != nil { return errors.Errorf("failed to query rows: %w", err) } diff --git a/internal/inspect/cache/cache.sql b/internal/inspect/cache/cache.sql new file mode 100644 index 000000000..fc14b288c --- /dev/null +++ b/internal/inspect/cache/cache.sql @@ -0,0 +1,9 @@ +SELECT + 'index hit rate' AS name, + (sum(idx_blks_hit)) / nullif(sum(idx_blks_hit + idx_blks_read),0) AS ratio +FROM pg_statio_user_indexes +UNION ALL +SELECT + 'table hit rate' AS name, + sum(heap_blks_hit) / nullif(sum(heap_blks_hit) + sum(heap_blks_read),0) AS ratio +FROM pg_statio_user_tables diff --git a/internal/inspect/cache/cache_test.go b/internal/inspect/cache/cache_test.go index 974ba1742..36b508185 100644 --- a/internal/inspect/cache/cache_test.go +++ b/internal/inspect/cache/cache_test.go @@ -7,7 +7,6 @@ import ( "github.com/jackc/pgconn" "github.com/spf13/afero" "github.com/stretchr/testify/assert" - "github.com/supabase/cli/internal/inspect" "github.com/supabase/cli/internal/testing/pgtest" ) @@ -26,7 +25,7 @@ func TestCacheCommand(t *testing.T) { // Setup mock postgres conn := pgtest.NewConn() defer conn.Close(t) - conn.Query(inspect.CACHE_QUERY). + conn.Query(CacheQuery). Reply("SELECT 1", Result{ Name: "index hit rate", Ratio: 0.9, @@ -43,7 +42,7 @@ func TestCacheCommand(t *testing.T) { // Setup mock postgres conn := pgtest.NewConn() defer conn.Close(t) - conn.Query(inspect.CACHE_QUERY). + conn.Query(CacheQuery). Reply("SELECT 1", []interface{}{}) // Run test err := Run(context.Background(), dbConfig, fsys, conn.Intercept) diff --git a/internal/inspect/calls/calls.go b/internal/inspect/calls/calls.go index 02808cb07..94bd8eafb 100644 --- a/internal/inspect/calls/calls.go +++ b/internal/inspect/calls/calls.go @@ -2,6 +2,7 @@ package calls import ( "context" + _ "embed" "fmt" "regexp" @@ -9,12 +10,14 @@ import ( "github.com/jackc/pgconn" "github.com/jackc/pgx/v4" "github.com/spf13/afero" - "github.com/supabase/cli/internal/inspect" "github.com/supabase/cli/internal/migration/list" "github.com/supabase/cli/internal/utils" "github.com/supabase/cli/internal/utils/pgxv5" ) +//go:embed calls.sql +var CallsQuery string + type Result struct { Total_exec_time string Prop_exec_time string @@ -28,7 +31,8 @@ func Run(ctx context.Context, config pgconn.Config, fsys afero.Fs, options ...fu if err != nil { return err } - rows, err := conn.Query(ctx, inspect.CALLS_QUERY) + defer conn.Close(context.Background()) + rows, err := conn.Query(ctx, CallsQuery) if err != nil { return errors.Errorf("failed to query rows: %w", err) } diff --git a/internal/inspect/calls/calls.sql b/internal/inspect/calls/calls.sql new file mode 100644 index 000000000..57c3ecee2 --- /dev/null +++ b/internal/inspect/calls/calls.sql @@ -0,0 +1,9 @@ +SELECT + query, + (interval '1 millisecond' * total_exec_time)::text AS total_exec_time, + to_char((total_exec_time/sum(total_exec_time) OVER()) * 100, 'FM90D0') || '%' AS prop_exec_time, + to_char(calls, 'FM999G999G990') AS ncalls, + (interval '1 millisecond' * (blk_read_time + blk_write_time))::text AS sync_io_time +FROM pg_stat_statements +ORDER BY calls DESC +LIMIT 10 diff --git a/internal/inspect/calls/calls_test.go b/internal/inspect/calls/calls_test.go new file mode 100644 index 000000000..5620ec6a8 --- /dev/null +++ b/internal/inspect/calls/calls_test.go @@ -0,0 +1,41 @@ +package calls + +import ( + "context" + "testing" + + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/internal/testing/pgtest" +) + +var dbConfig = pgconn.Config{ + Host: "127.0.0.1", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestCallsCommand(t *testing.T) { + t.Run("inspects calls", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(CallsQuery). + Reply("SELECT 1", Result{ + Total_exec_time: "0.9", + Prop_exec_time: "0.9", + Ncalls: "0.9", + Sync_io_time: "0.9", + Query: "SELECT 1", + }) + // Run test + err := Run(context.Background(), dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + }) +} diff --git a/internal/inspect/index_sizes/index_sizes.go b/internal/inspect/index_sizes/index_sizes.go index 37e6f273f..c2fad6c6d 100644 --- a/internal/inspect/index_sizes/index_sizes.go +++ b/internal/inspect/index_sizes/index_sizes.go @@ -2,6 +2,7 @@ package index_sizes import ( "context" + _ "embed" "fmt" "github.com/go-errors/errors" @@ -9,12 +10,14 @@ import ( "github.com/jackc/pgx/v4" "github.com/spf13/afero" "github.com/supabase/cli/internal/db/reset" - "github.com/supabase/cli/internal/inspect" "github.com/supabase/cli/internal/migration/list" "github.com/supabase/cli/internal/utils" "github.com/supabase/cli/internal/utils/pgxv5" ) +//go:embed index_sizes.sql +var IndexSizesQuery string + type Result struct { Name string Size string @@ -25,7 +28,8 @@ func Run(ctx context.Context, config pgconn.Config, fsys afero.Fs, options ...fu if err != nil { return err } - rows, err := conn.Query(ctx, inspect.INDEX_SIZES_QUERY, reset.LikeEscapeSchema(utils.InternalSchemas)) + defer conn.Close(context.Background()) + rows, err := conn.Query(ctx, IndexSizesQuery, reset.LikeEscapeSchema(utils.InternalSchemas)) if err != nil { return errors.Errorf("failed to query rows: %w", err) } diff --git a/internal/inspect/index_sizes/index_sizes.sql b/internal/inspect/index_sizes/index_sizes.sql new file mode 100644 index 000000000..25b6f96d5 --- /dev/null +++ b/internal/inspect/index_sizes/index_sizes.sql @@ -0,0 +1,9 @@ +SELECT + n.nspname || '.' || c.relname AS name, + pg_size_pretty(sum(c.relpages::bigint*8192)::bigint) AS size +FROM pg_class c +LEFT JOIN pg_namespace n ON (n.oid = c.relnamespace) +WHERE NOT n.nspname LIKE ANY($1) +AND c.relkind = 'i' +GROUP BY n.nspname, c.relname +ORDER BY sum(c.relpages) DESC diff --git a/internal/inspect/index_sizes/index_sizes_test.go b/internal/inspect/index_sizes/index_sizes_test.go new file mode 100644 index 000000000..72742e74b --- /dev/null +++ b/internal/inspect/index_sizes/index_sizes_test.go @@ -0,0 +1,40 @@ +package index_sizes + +import ( + "context" + "testing" + + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/internal/db/reset" + "github.com/supabase/cli/internal/testing/pgtest" + "github.com/supabase/cli/internal/utils" +) + +var dbConfig = pgconn.Config{ + Host: "127.0.0.1", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestIndexSizes(t *testing.T) { + t.Run("inspects index sizes", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(IndexSizesQuery, reset.LikeEscapeSchema(utils.InternalSchemas)). + Reply("SELECT 1", Result{ + Name: "test_table_idx", + Size: "100GB", + }) + // Run test + err := Run(context.Background(), dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + }) +} diff --git a/internal/inspect/index_usage/index_usage.go b/internal/inspect/index_usage/index_usage.go index c34d7deb9..b2165f346 100644 --- a/internal/inspect/index_usage/index_usage.go +++ b/internal/inspect/index_usage/index_usage.go @@ -2,6 +2,7 @@ package index_usage import ( "context" + _ "embed" "fmt" "github.com/go-errors/errors" @@ -9,12 +10,14 @@ import ( "github.com/jackc/pgx/v4" "github.com/spf13/afero" "github.com/supabase/cli/internal/db/reset" - "github.com/supabase/cli/internal/inspect" "github.com/supabase/cli/internal/migration/list" "github.com/supabase/cli/internal/utils" "github.com/supabase/cli/internal/utils/pgxv5" ) +//go:embed index_usage.sql +var IndexUsageQuery string + type Result struct { Name string Percent_of_times_index_used string @@ -26,7 +29,8 @@ func Run(ctx context.Context, config pgconn.Config, fsys afero.Fs, options ...fu if err != nil { return err } - rows, err := conn.Query(ctx, inspect.INDEX_USAGE_QUERY, reset.LikeEscapeSchema(utils.InternalSchemas)) + defer conn.Close(context.Background()) + rows, err := conn.Query(ctx, IndexUsageQuery, reset.LikeEscapeSchema(utils.InternalSchemas)) if err != nil { return errors.Errorf("failed to query rows: %w", err) } diff --git a/internal/inspect/index_usage/index_usage.sql b/internal/inspect/index_usage/index_usage.sql new file mode 100644 index 000000000..fa83e97db --- /dev/null +++ b/internal/inspect/index_usage/index_usage.sql @@ -0,0 +1,17 @@ +SELECT + schemaname || '.' || relname AS name, + CASE + WHEN idx_scan IS NULL THEN 'Insufficient data' + WHEN idx_scan = 0 THEN 'Insufficient data' + ELSE ROUND(100.0 * idx_scan / (seq_scan + idx_scan), 1) || '%' + END percent_of_times_index_used, + n_live_tup rows_in_table +FROM pg_stat_user_tables +WHERE NOT schemaname LIKE ANY($1) +ORDER BY + CASE + WHEN idx_scan is null then 1 + WHEN idx_scan = 0 then 1 + ELSE 0 + END, + n_live_tup DESC diff --git a/internal/inspect/index_usage/index_usage_test.go b/internal/inspect/index_usage/index_usage_test.go new file mode 100644 index 000000000..24d61ebf9 --- /dev/null +++ b/internal/inspect/index_usage/index_usage_test.go @@ -0,0 +1,41 @@ +package index_usage + +import ( + "context" + "testing" + + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/internal/db/reset" + "github.com/supabase/cli/internal/testing/pgtest" + "github.com/supabase/cli/internal/utils" +) + +var dbConfig = pgconn.Config{ + Host: "127.0.0.1", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestIndexUsage(t *testing.T) { + t.Run("inspects index usage", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(IndexUsageQuery, reset.LikeEscapeSchema(utils.InternalSchemas)). + Reply("SELECT 1", Result{ + Name: "test_table_idx", + Percent_of_times_index_used: "0.9", + Rows_in_table: 300, + }) + // Run test + err := Run(context.Background(), dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + }) +} diff --git a/internal/inspect/locks/locks.go b/internal/inspect/locks/locks.go index efeb82ae9..f7d18f0ce 100644 --- a/internal/inspect/locks/locks.go +++ b/internal/inspect/locks/locks.go @@ -2,6 +2,7 @@ package locks import ( "context" + _ "embed" "fmt" "regexp" @@ -9,12 +10,14 @@ import ( "github.com/jackc/pgconn" "github.com/jackc/pgx/v4" "github.com/spf13/afero" - "github.com/supabase/cli/internal/inspect" "github.com/supabase/cli/internal/migration/list" "github.com/supabase/cli/internal/utils" "github.com/supabase/cli/internal/utils/pgxv5" ) +//go:embed locks.sql +var LocksQuery string + type Result struct { Pid int Relname string @@ -29,7 +32,8 @@ func Run(ctx context.Context, config pgconn.Config, fsys afero.Fs, options ...fu if err != nil { return err } - rows, err := conn.Query(ctx, inspect.LOCKS_QUERY) + defer conn.Close(context.Background()) + rows, err := conn.Query(ctx, LocksQuery) if err != nil { return errors.Errorf("failed to query rows: %w", err) } diff --git a/internal/inspect/locks/locks.sql b/internal/inspect/locks/locks.sql new file mode 100644 index 000000000..e140f4786 --- /dev/null +++ b/internal/inspect/locks/locks.sql @@ -0,0 +1,12 @@ +SELECT + pg_stat_activity.pid, + COALESCE(pg_class.relname, 'null') AS relname, + COALESCE(pg_locks.transactionid, 'null') AS transactionid, + pg_locks.granted, + pg_stat_activity.query, + age(now(), pg_stat_activity.query_start)::text AS age +FROM pg_stat_activity, pg_locks LEFT OUTER JOIN pg_class ON (pg_locks.relation = pg_class.oid) +WHERE pg_stat_activity.query <> '' +AND pg_locks.pid = pg_stat_activity.pid +AND pg_locks.mode = 'ExclusiveLock' +ORDER BY query_start diff --git a/internal/inspect/locks/locks_test.go b/internal/inspect/locks/locks_test.go new file mode 100644 index 000000000..7c342b2df --- /dev/null +++ b/internal/inspect/locks/locks_test.go @@ -0,0 +1,42 @@ +package locks + +import ( + "context" + "testing" + + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/internal/testing/pgtest" +) + +var dbConfig = pgconn.Config{ + Host: "127.0.0.1", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestLocksCommand(t *testing.T) { + t.Run("inspects locks", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(LocksQuery). + Reply("SELECT 1", Result{ + Pid: 1, + Relname: "rel", + Transactionid: "9301", + Granted: true, + Query: "select 1", + Age: "300ms", + }) + // Run test + err := Run(context.Background(), dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + }) +} diff --git a/internal/inspect/long_running_queries/long_running_queries.go b/internal/inspect/long_running_queries/long_running_queries.go index e091515df..4e7ce6f8d 100644 --- a/internal/inspect/long_running_queries/long_running_queries.go +++ b/internal/inspect/long_running_queries/long_running_queries.go @@ -2,18 +2,21 @@ package long_running_queries import ( "context" + _ "embed" "fmt" "github.com/go-errors/errors" "github.com/jackc/pgconn" "github.com/jackc/pgx/v4" "github.com/spf13/afero" - "github.com/supabase/cli/internal/inspect" "github.com/supabase/cli/internal/migration/list" "github.com/supabase/cli/internal/utils" "github.com/supabase/cli/internal/utils/pgxv5" ) +//go:embed long_running_queries.sql +var LongRunningQueriesQuery string + type Result struct { Pid int Duration string @@ -25,7 +28,8 @@ func Run(ctx context.Context, config pgconn.Config, fsys afero.Fs, options ...fu if err != nil { return err } - rows, err := conn.Query(ctx, inspect.LONG_RUNNING_QUERY) + defer conn.Close(context.Background()) + rows, err := conn.Query(ctx, LongRunningQueriesQuery) if err != nil { return errors.Errorf("failed to query rows: %w", err) } diff --git a/internal/inspect/long_running_queries/long_running_queries.sql b/internal/inspect/long_running_queries/long_running_queries.sql new file mode 100644 index 000000000..68c20ba99 --- /dev/null +++ b/internal/inspect/long_running_queries/long_running_queries.sql @@ -0,0 +1,12 @@ +SELECT + pid, + age(now(), pg_stat_activity.query_start)::text AS duration, + query AS query +FROM + pg_stat_activity +WHERE + pg_stat_activity.query <> ''::text + AND state <> 'idle' + AND age(now(), pg_stat_activity.query_start) > interval '5 minutes' +ORDER BY + age(now(), pg_stat_activity.query_start) DESC diff --git a/internal/inspect/long_running_queries/long_running_queries_test.go b/internal/inspect/long_running_queries/long_running_queries_test.go new file mode 100644 index 000000000..edb97d6d0 --- /dev/null +++ b/internal/inspect/long_running_queries/long_running_queries_test.go @@ -0,0 +1,39 @@ +package long_running_queries + +import ( + "context" + "testing" + + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/internal/testing/pgtest" +) + +var dbConfig = pgconn.Config{ + Host: "127.0.0.1", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestLongQueriesCommand(t *testing.T) { + t.Run("inspects long running queries", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(LongRunningQueriesQuery). + Reply("SELECT 1", Result{ + Pid: 1, + Duration: "300ms", + Query: "select 1", + }) + // Run test + err := Run(context.Background(), dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + }) +} diff --git a/internal/inspect/outliers/outliers.go b/internal/inspect/outliers/outliers.go index d19a691e5..a487c9fb1 100644 --- a/internal/inspect/outliers/outliers.go +++ b/internal/inspect/outliers/outliers.go @@ -2,6 +2,7 @@ package outliers import ( "context" + _ "embed" "fmt" "regexp" @@ -9,12 +10,14 @@ import ( "github.com/jackc/pgconn" "github.com/jackc/pgx/v4" "github.com/spf13/afero" - "github.com/supabase/cli/internal/inspect" "github.com/supabase/cli/internal/migration/list" "github.com/supabase/cli/internal/utils" "github.com/supabase/cli/internal/utils/pgxv5" ) +//go:embed outliers.sql +var OutliersQuery string + type Result struct { Total_exec_time string Prop_exec_time string @@ -28,7 +31,8 @@ func Run(ctx context.Context, config pgconn.Config, fsys afero.Fs, options ...fu if err != nil { return err } - rows, err := conn.Query(ctx, inspect.OUTLIERS_QUERY) + defer conn.Close(context.Background()) + rows, err := conn.Query(ctx, OutliersQuery) if err != nil { return errors.Errorf("failed to query rows: %w", err) } diff --git a/internal/inspect/outliers/outliers.sql b/internal/inspect/outliers/outliers.sql new file mode 100644 index 000000000..d222cc8de --- /dev/null +++ b/internal/inspect/outliers/outliers.sql @@ -0,0 +1,9 @@ +SELECT + (interval '1 millisecond' * total_exec_time)::text AS total_exec_time, + to_char((total_exec_time/sum(total_exec_time) OVER()) * 100, 'FM90D0') || '%' AS prop_exec_time, + to_char(calls, 'FM999G999G999G990') AS ncalls, + (interval '1 millisecond' * (blk_read_time + blk_write_time))::text AS sync_io_time, + query +FROM pg_stat_statements WHERE userid = (SELECT usesysid FROM pg_user WHERE usename = current_user LIMIT 1) +ORDER BY total_exec_time DESC +LIMIT 10 diff --git a/internal/inspect/outliers/outliers_test.go b/internal/inspect/outliers/outliers_test.go new file mode 100644 index 000000000..b34a87cf8 --- /dev/null +++ b/internal/inspect/outliers/outliers_test.go @@ -0,0 +1,41 @@ +package outliers + +import ( + "context" + "testing" + + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/internal/testing/pgtest" +) + +var dbConfig = pgconn.Config{ + Host: "127.0.0.1", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestOutliersCommand(t *testing.T) { + t.Run("inspects outliers", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(OutliersQuery). + Reply("SELECT 1", Result{ + Total_exec_time: "0.9", + Prop_exec_time: "0.9", + Ncalls: "0.9", + Sync_io_time: "0.9", + Query: "SELECT 1", + }) + // Run test + err := Run(context.Background(), dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + }) +} diff --git a/internal/inspect/queries.go b/internal/inspect/queries.go deleted file mode 100644 index 42139729d..000000000 --- a/internal/inspect/queries.go +++ /dev/null @@ -1,305 +0,0 @@ -package inspect - -const BLOAT_QUERY = `WITH constants AS ( - SELECT current_setting('block_size')::numeric AS bs, 23 AS hdr, 4 AS ma -), bloat_info AS ( - SELECT - ma,bs,schemaname,tablename, - (datawidth+(hdr+ma-(case when hdr%ma=0 THEN ma ELSE hdr%ma END)))::numeric AS datahdr, - (maxfracsum*(nullhdr+ma-(case when nullhdr%ma=0 THEN ma ELSE nullhdr%ma END))) AS nullhdr2 - FROM ( - SELECT - schemaname, tablename, hdr, ma, bs, - SUM((1-null_frac)*avg_width) AS datawidth, - MAX(null_frac) AS maxfracsum, - hdr+( - SELECT 1+count(*)/8 - FROM pg_stats s2 - WHERE null_frac<>0 AND s2.schemaname = s.schemaname AND s2.tablename = s.tablename - ) AS nullhdr - FROM pg_stats s, constants - GROUP BY 1,2,3,4,5 - ) AS foo -), table_bloat AS ( - SELECT - schemaname, tablename, cc.relpages, bs, - CEIL((cc.reltuples*((datahdr+ma- - (CASE WHEN datahdr%ma=0 THEN ma ELSE datahdr%ma END))+nullhdr2+4))/(bs-20::float)) AS otta - FROM bloat_info - JOIN pg_class cc ON cc.relname = bloat_info.tablename - JOIN pg_namespace nn ON cc.relnamespace = nn.oid AND nn.nspname = bloat_info.schemaname AND nn.nspname <> 'information_schema' -), index_bloat AS ( - SELECT - schemaname, tablename, bs, - COALESCE(c2.relname,'?') AS iname, COALESCE(c2.reltuples,0) AS ituples, COALESCE(c2.relpages,0) AS ipages, - COALESCE(CEIL((c2.reltuples*(datahdr-12))/(bs-20::float)),0) AS iotta -- very rough approximation, assumes all cols - FROM bloat_info - JOIN pg_class cc ON cc.relname = bloat_info.tablename - JOIN pg_namespace nn ON cc.relnamespace = nn.oid AND nn.nspname = bloat_info.schemaname AND nn.nspname <> 'information_schema' - JOIN pg_index i ON indrelid = cc.oid - JOIN pg_class c2 ON c2.oid = i.indexrelid -) -SELECT - type, schemaname, object_name, bloat, pg_size_pretty(raw_waste) as waste -FROM -(SELECT - 'table' as type, - schemaname, - tablename as object_name, - ROUND(CASE WHEN otta=0 THEN 0.0 ELSE table_bloat.relpages/otta::numeric END,1) AS bloat, - CASE WHEN relpages < otta THEN '0' ELSE (bs*(table_bloat.relpages-otta)::bigint)::bigint END AS raw_waste -FROM - table_bloat - UNION -SELECT - 'index' as type, - schemaname, - tablename || '::' || iname as object_name, - ROUND(CASE WHEN iotta=0 OR ipages=0 THEN 0.0 ELSE ipages/iotta::numeric END,1) AS bloat, - CASE WHEN ipages < iotta THEN '0' ELSE (bs*(ipages-iotta))::bigint END AS raw_waste -FROM - index_bloat) bloat_summary -WHERE NOT schemaname LIKE ANY($1) -ORDER BY raw_waste DESC, bloat DESC` - -// Ref: https://github.com/heroku/heroku-pg-extras/blob/main/commands/blocking.js#L7 -const BLOCKING_QUERY = `SELECT - bl.pid AS blocked_pid, - ka.query AS blocking_statement, - age(now(), ka.query_start)::text AS blocking_duration, - kl.pid AS blocking_pid, - a.query AS blocked_statement, - age(now(), a.query_start)::text AS blocked_duration -FROM pg_catalog.pg_locks bl -JOIN pg_catalog.pg_stat_activity a - ON bl.pid = a.pid -JOIN pg_catalog.pg_locks kl -JOIN pg_catalog.pg_stat_activity ka - ON kl.pid = ka.pid - ON bl.transactionid = kl.transactionid AND bl.pid != kl.pid -WHERE NOT bl.granted` - -// Ref: https://github.com/heroku/heroku-pg-extras/blob/main/commands/cache_hit.js#L7 -const CACHE_QUERY = `SELECT - 'index hit rate' AS name, - (sum(idx_blks_hit)) / nullif(sum(idx_blks_hit + idx_blks_read),0) AS ratio -FROM pg_statio_user_indexes -UNION ALL -SELECT - 'table hit rate' AS name, - sum(heap_blks_hit) / nullif(sum(heap_blks_hit) + sum(heap_blks_read),0) AS ratio -FROM pg_statio_user_tables;` - -const CALLS_QUERY = `SELECT - query, - (interval '1 millisecond' * total_exec_time)::text AS total_exec_time, - to_char((total_exec_time/sum(total_exec_time) OVER()) * 100, 'FM90D0') || '%' AS prop_exec_time, - to_char(calls, 'FM999G999G990') AS ncalls, - (interval '1 millisecond' * (blk_read_time + blk_write_time))::text AS sync_io_time -FROM pg_stat_statements -ORDER BY calls DESC -LIMIT 10` - -const INDEX_SIZES_QUERY = `SELECT - n.nspname || '.' || c.relname AS name, - pg_size_pretty(sum(c.relpages::bigint*8192)::bigint) AS size -FROM pg_class c -LEFT JOIN pg_namespace n ON (n.oid = c.relnamespace) -WHERE NOT n.nspname LIKE ANY($1) -AND c.relkind = 'i' -GROUP BY n.nspname, c.relname -ORDER BY sum(c.relpages) DESC` - -const INDEX_USAGE_QUERY = `SELECT - schemaname || '.' || relname AS name, - CASE - WHEN idx_scan IS NULL THEN 'Insufficient data' - WHEN idx_scan = 0 THEN 'Insufficient data' - ELSE ROUND(100.0 * idx_scan / (seq_scan + idx_scan), 1) || '%' - END percent_of_times_index_used, - n_live_tup rows_in_table -FROM pg_stat_user_tables -WHERE NOT schemaname LIKE ANY($1) -ORDER BY - CASE - WHEN idx_scan is null then 1 - WHEN idx_scan = 0 then 1 - ELSE 0 - END, - n_live_tup DESC` - -const LOCKS_QUERY = `SELECT - pg_stat_activity.pid, - COALESCE(pg_class.relname, 'null') AS relname, - COALESCE(pg_locks.transactionid, 'null') AS transactionid, - pg_locks.granted, - pg_stat_activity.query, - age(now(), pg_stat_activity.query_start)::text AS age -FROM pg_stat_activity, pg_locks LEFT OUTER JOIN pg_class ON (pg_locks.relation = pg_class.oid) -WHERE pg_stat_activity.query <> '' -AND pg_locks.pid = pg_stat_activity.pid -AND pg_locks.mode = 'ExclusiveLock' -ORDER BY query_start` - -const LONG_RUNNING_QUERY = `SELECT - pid, - age(now(), pg_stat_activity.query_start)::text AS duration, - query AS query -FROM - pg_stat_activity -WHERE - pg_stat_activity.query <> ''::text - AND state <> 'idle' - AND age(now(), pg_stat_activity.query_start) > interval '5 minutes' -ORDER BY - age(now(), pg_stat_activity.query_start) DESC` - -const OUTLIERS_QUERY = `SELECT - (interval '1 millisecond' * total_exec_time)::text AS total_exec_time, - to_char((total_exec_time/sum(total_exec_time) OVER()) * 100, 'FM90D0') || '%' AS prop_exec_time, - to_char(calls, 'FM999G999G999G990') AS ncalls, - (interval '1 millisecond' * (blk_read_time + blk_write_time))::text AS sync_io_time, - query -FROM pg_stat_statements WHERE userid = (SELECT usesysid FROM pg_user WHERE usename = current_user LIMIT 1) -ORDER BY total_exec_time DESC -LIMIT 10` - -const REPLICATION_SLOTS_QUERY = `SELECT - s.slot_name, - s.active, - COALESCE(r.state, 'N/A') as state, - CASE WHEN r.client_addr IS NULL - THEN 'N/A' - ELSE r.client_addr::text - END replication_client_address, - GREATEST(0, ROUND((redo_lsn-restart_lsn)/1024/1024/1024, 2)) as replication_lag_gb -FROM pg_control_checkpoint(), pg_replication_slots s -LEFT JOIN pg_stat_replication r ON (r.pid = s.active_pid)` - -const ROLE_CONNECTIONS_QUERY = `SELECT - rolname, - ( - SELECT - count(*) - FROM - pg_stat_activity - WHERE - pg_roles.rolname = pg_stat_activity.usename - ) AS active_connections, - CASE WHEN rolconnlimit = -1 - THEN current_setting('max_connections')::int8 - ELSE rolconnlimit - END AS connection_limit -FROM pg_roles -ORDER BY 2 DESC` - -const SEQ_SCANS_QUERY = `SELECT - schemaname || '.' || relname AS name, - seq_scan as count -FROM pg_stat_user_tables -WHERE NOT schemaname LIKE ANY($1) -ORDER BY seq_scan DESC` - -const TABLE_INDEX_SIZES_QUERY = `SELECT - n.nspname || '.' || c.relname AS table, - pg_size_pretty(pg_indexes_size(c.oid)) AS index_size -FROM pg_class c -LEFT JOIN pg_namespace n ON (n.oid = c.relnamespace) -WHERE NOT n.nspname LIKE ANY($1) -AND c.relkind = 'r' -ORDER BY pg_indexes_size(c.oid) DESC` - -const TABLE_RECORD_COUNTS_QUERY = `SELECT - schemaname AS schema, - relname AS name, - n_live_tup AS estimated_count -FROM pg_stat_user_tables -WHERE NOT schemaname LIKE ANY($1) -ORDER BY n_live_tup DESC` - -const TABLE_SIZES_QUERY = `SELECT - n.nspname AS schema, - c.relname AS name, - pg_size_pretty(pg_table_size(c.oid)) AS size -FROM pg_class c -LEFT JOIN pg_namespace n ON (n.oid = c.relnamespace) -WHERE NOT n.nspname LIKE ANY($1) -AND c.relkind = 'r' -ORDER BY pg_table_size(c.oid) DESC` - -const TOTAL_INDEX_SIZE_QUERY = `SELECT - pg_size_pretty(sum(c.relpages::bigint*8192)::bigint) AS size -FROM pg_class c -LEFT JOIN pg_namespace n ON (n.oid = c.relnamespace) -WHERE NOT n.nspname LIKE ANY($1) -AND c.relkind = 'i'` - -const TOTAL_TABLE_SIZES_QUERY = `SELECT - n.nspname AS schema, - c.relname AS name, - pg_size_pretty(pg_total_relation_size(c.oid)) AS size -FROM pg_class c -LEFT JOIN pg_namespace n ON (n.oid = c.relnamespace) -WHERE NOT n.nspname LIKE ANY($1) -AND c.relkind = 'r' -ORDER BY pg_total_relation_size(c.oid) DESC` - -const UNUSED_INDEXES_QUERY = `SELECT - schemaname || '.' || relname AS table, - indexrelname AS index, - pg_size_pretty(pg_relation_size(i.indexrelid)) AS index_size, - idx_scan as index_scans -FROM pg_stat_user_indexes ui -JOIN pg_index i ON ui.indexrelid = i.indexrelid -WHERE - NOT indisunique AND idx_scan < 50 AND pg_relation_size(relid) > 5 * 8192 - AND NOT schemaname LIKE ANY($1) -ORDER BY - pg_relation_size(i.indexrelid) / nullif(idx_scan, 0) DESC NULLS FIRST, - pg_relation_size(i.indexrelid) DESC` - -const VACUUM_STATS_QUERY = `WITH table_opts AS ( - SELECT - pg_class.oid, relname, nspname, array_to_string(reloptions, '') AS relopts - FROM - pg_class INNER JOIN pg_namespace ns ON relnamespace = ns.oid -), vacuum_settings AS ( - SELECT - oid, relname, nspname, - CASE - WHEN relopts LIKE '%autovacuum_vacuum_threshold%' - THEN substring(relopts, '.*autovacuum_vacuum_threshold=([0-9.]+).*')::integer - ELSE current_setting('autovacuum_vacuum_threshold')::integer - END AS autovacuum_vacuum_threshold, - CASE - WHEN relopts LIKE '%autovacuum_vacuum_scale_factor%' - THEN substring(relopts, '.*autovacuum_vacuum_scale_factor=([0-9.]+).*')::real - ELSE current_setting('autovacuum_vacuum_scale_factor')::real - END AS autovacuum_vacuum_scale_factor - FROM - table_opts -) -SELECT - vacuum_settings.nspname AS schema, - vacuum_settings.relname AS table, - coalesce(to_char(psut.last_vacuum, 'YYYY-MM-DD HH24:MI'), '') AS last_vacuum, - coalesce(to_char(psut.last_autovacuum, 'YYYY-MM-DD HH24:MI'), '') AS last_autovacuum, - to_char(pg_class.reltuples, '9G999G999G999') AS rowcount, - to_char(psut.n_dead_tup, '9G999G999G999') AS dead_rowcount, - to_char(autovacuum_vacuum_threshold - + (autovacuum_vacuum_scale_factor::numeric * pg_class.reltuples), '9G999G999G999') AS autovacuum_threshold, - CASE - WHEN autovacuum_vacuum_threshold + (autovacuum_vacuum_scale_factor::numeric * pg_class.reltuples) < psut.n_dead_tup - THEN 'yes' - ELSE 'no' - END AS expect_autovacuum -FROM - pg_stat_user_tables psut INNER JOIN pg_class ON psut.relid = pg_class.oid -INNER JOIN vacuum_settings ON pg_class.oid = vacuum_settings.oid -WHERE NOT vacuum_settings.nspname LIKE ANY($1) -ORDER BY - case - when pg_class.reltuples = -1 then 1 - else 0 - end, - 1` diff --git a/internal/inspect/replication_slots/replication_slots.go b/internal/inspect/replication_slots/replication_slots.go index b66e7a0d3..4ad5db57a 100644 --- a/internal/inspect/replication_slots/replication_slots.go +++ b/internal/inspect/replication_slots/replication_slots.go @@ -2,18 +2,21 @@ package replication_slots import ( "context" + _ "embed" "fmt" "github.com/go-errors/errors" "github.com/jackc/pgconn" "github.com/jackc/pgx/v4" "github.com/spf13/afero" - "github.com/supabase/cli/internal/inspect" "github.com/supabase/cli/internal/migration/list" "github.com/supabase/cli/internal/utils" "github.com/supabase/cli/internal/utils/pgxv5" ) +//go:embed replication_slots.sql +var ReplicationSlotsQuery string + type Result struct { Slot_name string Active bool @@ -27,7 +30,8 @@ func Run(ctx context.Context, config pgconn.Config, fsys afero.Fs, options ...fu if err != nil { return err } - rows, err := conn.Query(ctx, inspect.REPLICATION_SLOTS_QUERY) + defer conn.Close(context.Background()) + rows, err := conn.Query(ctx, ReplicationSlotsQuery) if err != nil { return errors.Errorf("failed to query rows: %w", err) } diff --git a/internal/inspect/replication_slots/replication_slots.sql b/internal/inspect/replication_slots/replication_slots.sql new file mode 100644 index 000000000..c45add911 --- /dev/null +++ b/internal/inspect/replication_slots/replication_slots.sql @@ -0,0 +1,11 @@ +SELECT + s.slot_name, + s.active, + COALESCE(r.state, 'N/A') as state, + CASE WHEN r.client_addr IS NULL + THEN 'N/A' + ELSE r.client_addr::text + END replication_client_address, + GREATEST(0, ROUND((redo_lsn-restart_lsn)/1024/1024/1024, 2)) as replication_lag_gb +FROM pg_control_checkpoint(), pg_replication_slots s +LEFT JOIN pg_stat_replication r ON (r.pid = s.active_pid) diff --git a/internal/inspect/replication_slots/replication_slots_test.go b/internal/inspect/replication_slots/replication_slots_test.go new file mode 100644 index 000000000..8ed02f13c --- /dev/null +++ b/internal/inspect/replication_slots/replication_slots_test.go @@ -0,0 +1,41 @@ +package replication_slots + +import ( + "context" + "testing" + + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/internal/testing/pgtest" +) + +var dbConfig = pgconn.Config{ + Host: "127.0.0.1", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestReplicationCommand(t *testing.T) { + t.Run("inspects replication slots", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(ReplicationSlotsQuery). + Reply("SELECT 1", Result{ + Slot_name: "test", + Active: true, + State: "active", + Replication_client_address: "127.0.0.1", + Replication_lag_gb: "0.9", + }) + // Run test + err := Run(context.Background(), dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + }) +} diff --git a/internal/inspect/report.go b/internal/inspect/report.go new file mode 100644 index 000000000..de721d954 --- /dev/null +++ b/internal/inspect/report.go @@ -0,0 +1,79 @@ +package inspect + +import ( + "context" + "embed" + "fmt" + "io/fs" + "os" + "path/filepath" + "strings" + "time" + + "github.com/go-errors/errors" + "github.com/jackc/pgconn" + "github.com/jackc/pgx/v4" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/db/reset" + "github.com/supabase/cli/internal/utils" +) + +//go:embed **/*.sql +var queries embed.FS + +func Report(ctx context.Context, out string, config pgconn.Config, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { + date := time.Now().Format("2006-01-02") + if err := utils.MkdirIfNotExistFS(fsys, out); err != nil { + return err + } + conn, err := utils.ConnectByConfig(ctx, config, options...) + if err != nil { + return err + } + defer conn.Close(context.Background()) + fmt.Fprintln(os.Stderr, "Running queries...") + if err := fs.WalkDir(queries, ".", func(path string, d fs.DirEntry, err error) error { + if err != nil { + return errors.Errorf("failed to walk queries: %w", err) + } + if d.IsDir() { + return nil + } + query, err := queries.ReadFile(path) + if err != nil { + return errors.Errorf("failed to read query: %w", err) + } + name := strings.Split(d.Name(), ".")[0] + outPath := filepath.Join(out, fmt.Sprintf("%s_%s.csv", name, date)) + return copyToCSV(ctx, string(query), outPath, conn.PgConn(), fsys) + }); err != nil { + return err + } + if !filepath.IsAbs(out) { + out, _ = filepath.Abs(out) + } + fmt.Fprintln(os.Stderr, "Reports saved to "+utils.Bold(out)) + return nil +} + +func copyToCSV(ctx context.Context, query, outPath string, conn *pgconn.PgConn, fsys afero.Fs) error { + // Create output file + f, err := fsys.OpenFile(outPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return errors.Errorf("failed to create output file: %w", err) + } + defer f.Close() + // Execute query + csvQuery := wrapQuery(query) + if _, err = conn.CopyTo(ctx, f, csvQuery); err != nil { + return errors.Errorf("failed to copy output: %w", err) + } + return nil +} + +var ignoreSchemas = fmt.Sprintf("'{%s}'::text[]", strings.Join(reset.LikeEscapeSchema(utils.InternalSchemas), ",")) + +func wrapQuery(query string) string { + fullQuery := strings.ReplaceAll(query, "$1", ignoreSchemas) + return fmt.Sprintf("COPY (%s) TO STDOUT WITH CSV HEADER", fullQuery) +} diff --git a/internal/inspect/report_test.go b/internal/inspect/report_test.go new file mode 100644 index 000000000..d019976b8 --- /dev/null +++ b/internal/inspect/report_test.go @@ -0,0 +1,110 @@ +package inspect + +import ( + "context" + "fmt" + "testing" + + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/internal/inspect/bloat" + "github.com/supabase/cli/internal/inspect/blocking" + "github.com/supabase/cli/internal/inspect/cache" + "github.com/supabase/cli/internal/inspect/calls" + "github.com/supabase/cli/internal/inspect/index_sizes" + "github.com/supabase/cli/internal/inspect/index_usage" + "github.com/supabase/cli/internal/inspect/locks" + "github.com/supabase/cli/internal/inspect/long_running_queries" + "github.com/supabase/cli/internal/inspect/outliers" + "github.com/supabase/cli/internal/inspect/replication_slots" + "github.com/supabase/cli/internal/inspect/role_connections" + "github.com/supabase/cli/internal/inspect/seq_scans" + "github.com/supabase/cli/internal/inspect/table_index_sizes" + "github.com/supabase/cli/internal/inspect/table_record_counts" + "github.com/supabase/cli/internal/inspect/table_sizes" + "github.com/supabase/cli/internal/inspect/total_index_size" + "github.com/supabase/cli/internal/inspect/total_table_sizes" + "github.com/supabase/cli/internal/inspect/unused_indexes" + "github.com/supabase/cli/internal/inspect/vacuum_stats" + "github.com/supabase/cli/internal/testing/pgtest" +) + +var dbConfig = pgconn.Config{ + Host: "127.0.0.1", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestReportCommand(t *testing.T) { + t.Run("runs all queries", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(wrapQuery(bloat.BloatQuery)). + Reply("COPY 0"). + Query(wrapQuery(blocking.BlockingQuery)). + Reply("COPY 0"). + Query(wrapQuery(cache.CacheQuery)). + Reply("COPY 0"). + Query(wrapQuery(calls.CallsQuery)). + Reply("COPY 0"). + Query(wrapQuery(index_sizes.IndexSizesQuery)). + Reply("COPY 0"). + Query(wrapQuery(index_usage.IndexUsageQuery)). + Reply("COPY 0"). + Query(wrapQuery(locks.LocksQuery)). + Reply("COPY 0"). + Query(wrapQuery(long_running_queries.LongRunningQueriesQuery)). + Reply("COPY 0"). + Query(wrapQuery(outliers.OutliersQuery)). + Reply("COPY 0"). + Query(wrapQuery(replication_slots.ReplicationSlotsQuery)). + Reply("COPY 0"). + Query(wrapQuery(role_connections.RoleConnectionsQuery)). + Reply("COPY 0"). + Query(wrapQuery(seq_scans.SeqScansQuery)). + Reply("COPY 0"). + Query(wrapQuery(table_index_sizes.TableIndexSizesQuery)). + Reply("COPY 0"). + Query(wrapQuery(table_record_counts.TableRecordCountsQuery)). + Reply("COPY 0"). + Query(wrapQuery(table_sizes.TableSizesQuery)). + Reply("COPY 0"). + Query(wrapQuery(total_index_size.TotalIndexSizeQuery)). + Reply("COPY 0"). + Query(wrapQuery(total_table_sizes.TotalTableSizesQuery)). + Reply("COPY 0"). + Query(wrapQuery(unused_indexes.UnusedIndexesQuery)). + Reply("COPY 0"). + Query(wrapQuery(vacuum_stats.VacuumStatsQuery)). + Reply("COPY 0") + // Run test + err := Report(context.Background(), ".", dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + matches, err := afero.Glob(fsys, "*.csv") + assert.NoError(t, err) + assert.Len(t, matches, 19) + }) +} + +func TestWrapQuery(t *testing.T) { + t.Run("wraps query in csv", func(t *testing.T) { + assert.Equal(t, + "COPY (SELECT 1) TO STDOUT WITH CSV HEADER", + wrapQuery("SELECT 1"), + ) + }) + + t.Run("replaces placeholder value", func(t *testing.T) { + assert.Equal(t, + fmt.Sprintf("COPY (SELECT 'a' LIKE ANY(%s)) TO STDOUT WITH CSV HEADER", ignoreSchemas), + wrapQuery("SELECT 'a' LIKE ANY($1)"), + ) + }) +} diff --git a/internal/inspect/role_connections/role_connections.go b/internal/inspect/role_connections/role_connections.go index b0a2aa1fb..25a7ff32a 100644 --- a/internal/inspect/role_connections/role_connections.go +++ b/internal/inspect/role_connections/role_connections.go @@ -2,18 +2,21 @@ package role_connections import ( "context" + _ "embed" "fmt" "github.com/go-errors/errors" "github.com/jackc/pgconn" "github.com/jackc/pgx/v4" "github.com/spf13/afero" - "github.com/supabase/cli/internal/inspect" "github.com/supabase/cli/internal/migration/list" "github.com/supabase/cli/internal/utils" "github.com/supabase/cli/internal/utils/pgxv5" ) +//go:embed role_connections.sql +var RoleConnectionsQuery string + type Result struct { Rolname string Active_connections int @@ -25,7 +28,8 @@ func Run(ctx context.Context, config pgconn.Config, fsys afero.Fs, options ...fu if err != nil { return err } - rows, err := conn.Query(ctx, inspect.ROLE_CONNECTIONS_QUERY) + defer conn.Close(context.Background()) + rows, err := conn.Query(ctx, RoleConnectionsQuery) if err != nil { return errors.Errorf("failed to query rows: %w", err) } diff --git a/internal/inspect/role_connections/role_connections.sql b/internal/inspect/role_connections/role_connections.sql new file mode 100644 index 000000000..40b103669 --- /dev/null +++ b/internal/inspect/role_connections/role_connections.sql @@ -0,0 +1,16 @@ +SELECT + rolname, + ( + SELECT + count(*) + FROM + pg_stat_activity + WHERE + pg_roles.rolname = pg_stat_activity.usename + ) AS active_connections, + CASE WHEN rolconnlimit = -1 + THEN current_setting('max_connections')::int8 + ELSE rolconnlimit + END AS connection_limit +FROM pg_roles +ORDER BY 2 DESC diff --git a/internal/inspect/role_connections/role_connections_test.go b/internal/inspect/role_connections/role_connections_test.go new file mode 100644 index 000000000..79c153558 --- /dev/null +++ b/internal/inspect/role_connections/role_connections_test.go @@ -0,0 +1,39 @@ +package role_connections + +import ( + "context" + "testing" + + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/internal/testing/pgtest" +) + +var dbConfig = pgconn.Config{ + Host: "127.0.0.1", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestRoleCommand(t *testing.T) { + t.Run("inspects role connections", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(RoleConnectionsQuery). + Reply("SELECT 1", Result{ + Rolname: "postgres", + Active_connections: 1, + Connection_limit: 10, + }) + // Run test + err := Run(context.Background(), dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + }) +} diff --git a/internal/inspect/seq_scans/seq_scans.go b/internal/inspect/seq_scans/seq_scans.go index 3cb4cf553..049111665 100644 --- a/internal/inspect/seq_scans/seq_scans.go +++ b/internal/inspect/seq_scans/seq_scans.go @@ -2,6 +2,7 @@ package seq_scans import ( "context" + _ "embed" "fmt" "github.com/go-errors/errors" @@ -9,12 +10,14 @@ import ( "github.com/jackc/pgx/v4" "github.com/spf13/afero" "github.com/supabase/cli/internal/db/reset" - "github.com/supabase/cli/internal/inspect" "github.com/supabase/cli/internal/migration/list" "github.com/supabase/cli/internal/utils" "github.com/supabase/cli/internal/utils/pgxv5" ) +//go:embed seq_scans.sql +var SeqScansQuery string + type Result struct { Name string Count int64 @@ -25,7 +28,8 @@ func Run(ctx context.Context, config pgconn.Config, fsys afero.Fs, options ...fu if err != nil { return err } - rows, err := conn.Query(ctx, inspect.SEQ_SCANS_QUERY, reset.LikeEscapeSchema(utils.InternalSchemas)) + defer conn.Close(context.Background()) + rows, err := conn.Query(ctx, SeqScansQuery, reset.LikeEscapeSchema(utils.InternalSchemas)) if err != nil { return errors.Errorf("failed to query rows: %w", err) } diff --git a/internal/inspect/seq_scans/seq_scans.sql b/internal/inspect/seq_scans/seq_scans.sql new file mode 100644 index 000000000..c8edfc8e3 --- /dev/null +++ b/internal/inspect/seq_scans/seq_scans.sql @@ -0,0 +1,6 @@ +SELECT + schemaname || '.' || relname AS name, + seq_scan as count +FROM pg_stat_user_tables +WHERE NOT schemaname LIKE ANY($1) +ORDER BY seq_scan DESC diff --git a/internal/inspect/seq_scans/seq_scans_test.go b/internal/inspect/seq_scans/seq_scans_test.go new file mode 100644 index 000000000..3bfb2ba61 --- /dev/null +++ b/internal/inspect/seq_scans/seq_scans_test.go @@ -0,0 +1,40 @@ +package seq_scans + +import ( + "context" + "testing" + + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/internal/db/reset" + "github.com/supabase/cli/internal/testing/pgtest" + "github.com/supabase/cli/internal/utils" +) + +var dbConfig = pgconn.Config{ + Host: "127.0.0.1", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestSequentialScansCommand(t *testing.T) { + t.Run("inspects sequential scans", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(SeqScansQuery, reset.LikeEscapeSchema(utils.InternalSchemas)). + Reply("SELECT 1", Result{ + Name: "test_table", + Count: 99999, + }) + // Run test + err := Run(context.Background(), dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + }) +} diff --git a/internal/inspect/table_index_sizes/table_index_sizes.go b/internal/inspect/table_index_sizes/table_index_sizes.go index 905ad3b5a..420f3fdfe 100644 --- a/internal/inspect/table_index_sizes/table_index_sizes.go +++ b/internal/inspect/table_index_sizes/table_index_sizes.go @@ -2,6 +2,7 @@ package table_index_sizes import ( "context" + _ "embed" "fmt" "github.com/go-errors/errors" @@ -9,12 +10,14 @@ import ( "github.com/jackc/pgx/v4" "github.com/spf13/afero" "github.com/supabase/cli/internal/db/reset" - "github.com/supabase/cli/internal/inspect" "github.com/supabase/cli/internal/migration/list" "github.com/supabase/cli/internal/utils" "github.com/supabase/cli/internal/utils/pgxv5" ) +//go:embed table_index_sizes.sql +var TableIndexSizesQuery string + type Result struct { Table string Index_size string @@ -25,7 +28,8 @@ func Run(ctx context.Context, config pgconn.Config, fsys afero.Fs, options ...fu if err != nil { return err } - rows, err := conn.Query(ctx, inspect.TABLE_INDEX_SIZES_QUERY, reset.LikeEscapeSchema(utils.InternalSchemas)) + defer conn.Close(context.Background()) + rows, err := conn.Query(ctx, TableIndexSizesQuery, reset.LikeEscapeSchema(utils.InternalSchemas)) if err != nil { return errors.Errorf("failed to query rows: %w", err) } diff --git a/internal/inspect/table_index_sizes/table_index_sizes.sql b/internal/inspect/table_index_sizes/table_index_sizes.sql new file mode 100644 index 000000000..0c9bc6bfc --- /dev/null +++ b/internal/inspect/table_index_sizes/table_index_sizes.sql @@ -0,0 +1,8 @@ +SELECT + n.nspname || '.' || c.relname AS table, + pg_size_pretty(pg_indexes_size(c.oid)) AS index_size +FROM pg_class c +LEFT JOIN pg_namespace n ON (n.oid = c.relnamespace) +WHERE NOT n.nspname LIKE ANY($1) +AND c.relkind = 'r' +ORDER BY pg_indexes_size(c.oid) DESC diff --git a/internal/inspect/table_index_sizes/table_index_sizes_test.go b/internal/inspect/table_index_sizes/table_index_sizes_test.go new file mode 100644 index 000000000..0a58a7107 --- /dev/null +++ b/internal/inspect/table_index_sizes/table_index_sizes_test.go @@ -0,0 +1,40 @@ +package table_index_sizes + +import ( + "context" + "testing" + + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/internal/db/reset" + "github.com/supabase/cli/internal/testing/pgtest" + "github.com/supabase/cli/internal/utils" +) + +var dbConfig = pgconn.Config{ + Host: "127.0.0.1", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestTableIndexSizesCommand(t *testing.T) { + t.Run("inspects table index sizes", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(TableIndexSizesQuery, reset.LikeEscapeSchema(utils.InternalSchemas)). + Reply("SELECT 1", Result{ + Table: "public.test_table", + Index_size: "3GB", + }) + // Run test + err := Run(context.Background(), dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + }) +} diff --git a/internal/inspect/table_record_counts/table_record_counts.go b/internal/inspect/table_record_counts/table_record_counts.go index 72ca70de4..7d65b918b 100644 --- a/internal/inspect/table_record_counts/table_record_counts.go +++ b/internal/inspect/table_record_counts/table_record_counts.go @@ -2,6 +2,7 @@ package table_record_counts import ( "context" + _ "embed" "fmt" "github.com/go-errors/errors" @@ -9,12 +10,14 @@ import ( "github.com/jackc/pgx/v4" "github.com/spf13/afero" "github.com/supabase/cli/internal/db/reset" - "github.com/supabase/cli/internal/inspect" "github.com/supabase/cli/internal/migration/list" "github.com/supabase/cli/internal/utils" "github.com/supabase/cli/internal/utils/pgxv5" ) +//go:embed table_record_counts.sql +var TableRecordCountsQuery string + type Result struct { Schema string Name string @@ -26,7 +29,8 @@ func Run(ctx context.Context, config pgconn.Config, fsys afero.Fs, options ...fu if err != nil { return err } - rows, err := conn.Query(ctx, inspect.TABLE_RECORD_COUNTS_QUERY, reset.LikeEscapeSchema(utils.PgSchemas)) + defer conn.Close(context.Background()) + rows, err := conn.Query(ctx, TableRecordCountsQuery, reset.LikeEscapeSchema(utils.PgSchemas)) if err != nil { return errors.Errorf("failed to query rows: %w", err) } diff --git a/internal/inspect/table_record_counts/table_record_counts.sql b/internal/inspect/table_record_counts/table_record_counts.sql new file mode 100644 index 000000000..1b24f04a4 --- /dev/null +++ b/internal/inspect/table_record_counts/table_record_counts.sql @@ -0,0 +1,7 @@ +SELECT + schemaname AS schema, + relname AS name, + n_live_tup AS estimated_count +FROM pg_stat_user_tables +WHERE NOT schemaname LIKE ANY($1) +ORDER BY n_live_tup DESC diff --git a/internal/inspect/table_record_counts/table_record_counts_test.go b/internal/inspect/table_record_counts/table_record_counts_test.go new file mode 100644 index 000000000..c5f976833 --- /dev/null +++ b/internal/inspect/table_record_counts/table_record_counts_test.go @@ -0,0 +1,41 @@ +package table_record_counts + +import ( + "context" + "testing" + + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/internal/db/reset" + "github.com/supabase/cli/internal/testing/pgtest" + "github.com/supabase/cli/internal/utils" +) + +var dbConfig = pgconn.Config{ + Host: "127.0.0.1", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestTableRecordCountsCommand(t *testing.T) { + t.Run("inspects table record counts", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(TableRecordCountsQuery, reset.LikeEscapeSchema(utils.PgSchemas)). + Reply("SELECT 1", Result{ + Schema: "public", + Name: "test_table", + Estimated_count: 100, + }) + // Run test + err := Run(context.Background(), dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + }) +} diff --git a/internal/inspect/table_sizes/table_sizes.go b/internal/inspect/table_sizes/table_sizes.go index 74c0396d3..651b39642 100644 --- a/internal/inspect/table_sizes/table_sizes.go +++ b/internal/inspect/table_sizes/table_sizes.go @@ -2,6 +2,7 @@ package table_sizes import ( "context" + _ "embed" "fmt" "github.com/go-errors/errors" @@ -9,12 +10,14 @@ import ( "github.com/jackc/pgx/v4" "github.com/spf13/afero" "github.com/supabase/cli/internal/db/reset" - "github.com/supabase/cli/internal/inspect" "github.com/supabase/cli/internal/migration/list" "github.com/supabase/cli/internal/utils" "github.com/supabase/cli/internal/utils/pgxv5" ) +//go:embed table_sizes.sql +var TableSizesQuery string + type Result struct { Schema string Name string @@ -26,7 +29,8 @@ func Run(ctx context.Context, config pgconn.Config, fsys afero.Fs, options ...fu if err != nil { return err } - rows, err := conn.Query(ctx, inspect.TABLE_SIZES_QUERY, reset.LikeEscapeSchema(utils.PgSchemas)) + defer conn.Close(context.Background()) + rows, err := conn.Query(ctx, TableSizesQuery, reset.LikeEscapeSchema(utils.PgSchemas)) if err != nil { return errors.Errorf("failed to query rows: %w", err) } diff --git a/internal/inspect/table_sizes/table_sizes.sql b/internal/inspect/table_sizes/table_sizes.sql new file mode 100644 index 000000000..2c8fb3064 --- /dev/null +++ b/internal/inspect/table_sizes/table_sizes.sql @@ -0,0 +1,9 @@ +SELECT + n.nspname AS schema, + c.relname AS name, + pg_size_pretty(pg_table_size(c.oid)) AS size +FROM pg_class c +LEFT JOIN pg_namespace n ON (n.oid = c.relnamespace) +WHERE NOT n.nspname LIKE ANY($1) +AND c.relkind = 'r' +ORDER BY pg_table_size(c.oid) DESC diff --git a/internal/inspect/table_sizes/table_sizes_test.go b/internal/inspect/table_sizes/table_sizes_test.go new file mode 100644 index 000000000..f96e17e1b --- /dev/null +++ b/internal/inspect/table_sizes/table_sizes_test.go @@ -0,0 +1,41 @@ +package table_sizes + +import ( + "context" + "testing" + + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/internal/db/reset" + "github.com/supabase/cli/internal/testing/pgtest" + "github.com/supabase/cli/internal/utils" +) + +var dbConfig = pgconn.Config{ + Host: "127.0.0.1", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestTableSizesCommand(t *testing.T) { + t.Run("inspects table sizes", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(TableSizesQuery, reset.LikeEscapeSchema(utils.PgSchemas)). + Reply("SELECT 1", Result{ + Schema: "schema", + Name: "test_table", + Size: "3GB", + }) + // Run test + err := Run(context.Background(), dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + }) +} diff --git a/internal/inspect/total_index_size/total_index_size.go b/internal/inspect/total_index_size/total_index_size.go index 73ee9e672..b5b1b61bb 100644 --- a/internal/inspect/total_index_size/total_index_size.go +++ b/internal/inspect/total_index_size/total_index_size.go @@ -2,6 +2,7 @@ package total_index_size import ( "context" + _ "embed" "fmt" "github.com/go-errors/errors" @@ -9,12 +10,14 @@ import ( "github.com/jackc/pgx/v4" "github.com/spf13/afero" "github.com/supabase/cli/internal/db/reset" - "github.com/supabase/cli/internal/inspect" "github.com/supabase/cli/internal/migration/list" "github.com/supabase/cli/internal/utils" "github.com/supabase/cli/internal/utils/pgxv5" ) +//go:embed total_index_size.sql +var TotalIndexSizeQuery string + type Result struct { Size string } @@ -24,7 +27,8 @@ func Run(ctx context.Context, config pgconn.Config, fsys afero.Fs, options ...fu if err != nil { return err } - rows, err := conn.Query(ctx, inspect.TOTAL_INDEX_SIZE_QUERY, reset.LikeEscapeSchema(utils.InternalSchemas)) + defer conn.Close(context.Background()) + rows, err := conn.Query(ctx, TotalIndexSizeQuery, reset.LikeEscapeSchema(utils.InternalSchemas)) if err != nil { return errors.Errorf("failed to query rows: %w", err) } diff --git a/internal/inspect/total_index_size/total_index_size.sql b/internal/inspect/total_index_size/total_index_size.sql new file mode 100644 index 000000000..d1e8ab3d8 --- /dev/null +++ b/internal/inspect/total_index_size/total_index_size.sql @@ -0,0 +1,6 @@ +SELECT + pg_size_pretty(sum(c.relpages::bigint*8192)::bigint) AS size +FROM pg_class c +LEFT JOIN pg_namespace n ON (n.oid = c.relnamespace) +WHERE NOT n.nspname LIKE ANY($1) +AND c.relkind = 'i' diff --git a/internal/inspect/total_index_size/total_index_size_test.go b/internal/inspect/total_index_size/total_index_size_test.go new file mode 100644 index 000000000..6a6591f26 --- /dev/null +++ b/internal/inspect/total_index_size/total_index_size_test.go @@ -0,0 +1,39 @@ +package total_index_size + +import ( + "context" + "testing" + + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/internal/db/reset" + "github.com/supabase/cli/internal/testing/pgtest" + "github.com/supabase/cli/internal/utils" +) + +var dbConfig = pgconn.Config{ + Host: "127.0.0.1", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestTotalIndexSizeCommand(t *testing.T) { + t.Run("inspects size of all indexes", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(TotalIndexSizeQuery, reset.LikeEscapeSchema(utils.InternalSchemas)). + Reply("SELECT 1", Result{ + Size: "8GB", + }) + // Run test + err := Run(context.Background(), dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + }) +} diff --git a/internal/inspect/total_table_sizes/total_table_sizes.go b/internal/inspect/total_table_sizes/total_table_sizes.go index 8fd441952..2a11f7040 100644 --- a/internal/inspect/total_table_sizes/total_table_sizes.go +++ b/internal/inspect/total_table_sizes/total_table_sizes.go @@ -2,6 +2,7 @@ package total_table_sizes import ( "context" + _ "embed" "fmt" "github.com/go-errors/errors" @@ -9,12 +10,14 @@ import ( "github.com/jackc/pgx/v4" "github.com/spf13/afero" "github.com/supabase/cli/internal/db/reset" - "github.com/supabase/cli/internal/inspect" "github.com/supabase/cli/internal/migration/list" "github.com/supabase/cli/internal/utils" "github.com/supabase/cli/internal/utils/pgxv5" ) +//go:embed total_table_sizes.sql +var TotalTableSizesQuery string + type Result struct { Schema string Name string @@ -26,7 +29,8 @@ func Run(ctx context.Context, config pgconn.Config, fsys afero.Fs, options ...fu if err != nil { return err } - rows, err := conn.Query(ctx, inspect.TOTAL_TABLE_SIZES_QUERY, reset.LikeEscapeSchema(utils.PgSchemas)) + defer conn.Close(context.Background()) + rows, err := conn.Query(ctx, TotalTableSizesQuery, reset.LikeEscapeSchema(utils.PgSchemas)) if err != nil { return errors.Errorf("failed to query rows: %w", err) } diff --git a/internal/inspect/total_table_sizes/total_table_sizes.sql b/internal/inspect/total_table_sizes/total_table_sizes.sql new file mode 100644 index 000000000..471d0655f --- /dev/null +++ b/internal/inspect/total_table_sizes/total_table_sizes.sql @@ -0,0 +1,9 @@ +SELECT + n.nspname AS schema, + c.relname AS name, + pg_size_pretty(pg_total_relation_size(c.oid)) AS size +FROM pg_class c +LEFT JOIN pg_namespace n ON (n.oid = c.relnamespace) +WHERE NOT n.nspname LIKE ANY($1) +AND c.relkind = 'r' +ORDER BY pg_total_relation_size(c.oid) DESC diff --git a/internal/inspect/total_table_sizes/total_table_sizes_test.go b/internal/inspect/total_table_sizes/total_table_sizes_test.go new file mode 100644 index 000000000..a207456cc --- /dev/null +++ b/internal/inspect/total_table_sizes/total_table_sizes_test.go @@ -0,0 +1,41 @@ +package total_table_sizes + +import ( + "context" + "testing" + + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/internal/db/reset" + "github.com/supabase/cli/internal/testing/pgtest" + "github.com/supabase/cli/internal/utils" +) + +var dbConfig = pgconn.Config{ + Host: "127.0.0.1", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestTotalTableSizesCommand(t *testing.T) { + t.Run("inspects total table sizes", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(TotalTableSizesQuery, reset.LikeEscapeSchema(utils.PgSchemas)). + Reply("SELECT 1", Result{ + Schema: "public", + Name: "test_table", + Size: "3GB", + }) + // Run test + err := Run(context.Background(), dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + }) +} diff --git a/internal/inspect/unused_indexes/unused_indexes.go b/internal/inspect/unused_indexes/unused_indexes.go index ca0c4feff..cf429ed5b 100644 --- a/internal/inspect/unused_indexes/unused_indexes.go +++ b/internal/inspect/unused_indexes/unused_indexes.go @@ -2,6 +2,7 @@ package unused_indexes import ( "context" + _ "embed" "fmt" "github.com/go-errors/errors" @@ -9,12 +10,14 @@ import ( "github.com/jackc/pgx/v4" "github.com/spf13/afero" "github.com/supabase/cli/internal/db/reset" - "github.com/supabase/cli/internal/inspect" "github.com/supabase/cli/internal/migration/list" "github.com/supabase/cli/internal/utils" "github.com/supabase/cli/internal/utils/pgxv5" ) +//go:embed unused_indexes.sql +var UnusedIndexesQuery string + type Result struct { Table string Index string @@ -27,7 +30,8 @@ func Run(ctx context.Context, config pgconn.Config, fsys afero.Fs, options ...fu if err != nil { return err } - rows, err := conn.Query(ctx, inspect.UNUSED_INDEXES_QUERY, reset.LikeEscapeSchema(utils.InternalSchemas)) + defer conn.Close(context.Background()) + rows, err := conn.Query(ctx, UnusedIndexesQuery, reset.LikeEscapeSchema(utils.InternalSchemas)) if err != nil { return errors.Errorf("failed to query rows: %w", err) } diff --git a/internal/inspect/unused_indexes/unused_indexes.sql b/internal/inspect/unused_indexes/unused_indexes.sql new file mode 100644 index 000000000..6e775967d --- /dev/null +++ b/internal/inspect/unused_indexes/unused_indexes.sql @@ -0,0 +1,13 @@ +SELECT + schemaname || '.' || relname AS table, + indexrelname AS index, + pg_size_pretty(pg_relation_size(i.indexrelid)) AS index_size, + idx_scan as index_scans +FROM pg_stat_user_indexes ui +JOIN pg_index i ON ui.indexrelid = i.indexrelid +WHERE + NOT indisunique AND idx_scan < 50 AND pg_relation_size(relid) > 5 * 8192 + AND NOT schemaname LIKE ANY($1) +ORDER BY + pg_relation_size(i.indexrelid) / nullif(idx_scan, 0) DESC NULLS FIRST, + pg_relation_size(i.indexrelid) DESC diff --git a/internal/inspect/unused_indexes/unused_indexes_test.go b/internal/inspect/unused_indexes/unused_indexes_test.go new file mode 100644 index 000000000..d295c164b --- /dev/null +++ b/internal/inspect/unused_indexes/unused_indexes_test.go @@ -0,0 +1,42 @@ +package unused_indexes + +import ( + "context" + "testing" + + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/internal/db/reset" + "github.com/supabase/cli/internal/testing/pgtest" + "github.com/supabase/cli/internal/utils" +) + +var dbConfig = pgconn.Config{ + Host: "127.0.0.1", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestUnusedIndexesCommand(t *testing.T) { + t.Run("inspects unused indexes", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(UnusedIndexesQuery, reset.LikeEscapeSchema(utils.InternalSchemas)). + Reply("SELECT 1", Result{ + Table: "test_table", + Index: "test_table_idx", + Index_size: "3GB", + Index_scans: 2, + }) + // Run test + err := Run(context.Background(), dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + }) +} diff --git a/internal/inspect/vacuum_stats/vacuum_stats.go b/internal/inspect/vacuum_stats/vacuum_stats.go index cc98b6b06..f8d2c2221 100644 --- a/internal/inspect/vacuum_stats/vacuum_stats.go +++ b/internal/inspect/vacuum_stats/vacuum_stats.go @@ -2,6 +2,7 @@ package vacuum_stats import ( "context" + _ "embed" "fmt" "strings" @@ -10,12 +11,14 @@ import ( "github.com/jackc/pgx/v4" "github.com/spf13/afero" "github.com/supabase/cli/internal/db/reset" - "github.com/supabase/cli/internal/inspect" "github.com/supabase/cli/internal/migration/list" "github.com/supabase/cli/internal/utils" "github.com/supabase/cli/internal/utils/pgxv5" ) +//go:embed vacuum_stats.sql +var VacuumStatsQuery string + type Result struct { Schema string Table string @@ -32,7 +35,8 @@ func Run(ctx context.Context, config pgconn.Config, fsys afero.Fs, options ...fu if err != nil { return err } - rows, err := conn.Query(ctx, inspect.VACUUM_STATS_QUERY, reset.LikeEscapeSchema(utils.InternalSchemas)) + defer conn.Close(context.Background()) + rows, err := conn.Query(ctx, VacuumStatsQuery, reset.LikeEscapeSchema(utils.InternalSchemas)) if err != nil { return errors.Errorf("failed to query rows: %w", err) } diff --git a/internal/inspect/vacuum_stats/vacuum_stats.sql b/internal/inspect/vacuum_stats/vacuum_stats.sql new file mode 100644 index 000000000..707847497 --- /dev/null +++ b/internal/inspect/vacuum_stats/vacuum_stats.sql @@ -0,0 +1,45 @@ +WITH table_opts AS ( + SELECT + pg_class.oid, relname, nspname, array_to_string(reloptions, '') AS relopts + FROM + pg_class INNER JOIN pg_namespace ns ON relnamespace = ns.oid +), vacuum_settings AS ( + SELECT + oid, relname, nspname, + CASE + WHEN relopts LIKE '%autovacuum_vacuum_threshold%' + THEN substring(relopts, '.*autovacuum_vacuum_threshold=([0-9.]+).*')::integer + ELSE current_setting('autovacuum_vacuum_threshold')::integer + END AS autovacuum_vacuum_threshold, + CASE + WHEN relopts LIKE '%autovacuum_vacuum_scale_factor%' + THEN substring(relopts, '.*autovacuum_vacuum_scale_factor=([0-9.]+).*')::real + ELSE current_setting('autovacuum_vacuum_scale_factor')::real + END AS autovacuum_vacuum_scale_factor + FROM + table_opts +) +SELECT + vacuum_settings.nspname AS schema, + vacuum_settings.relname AS table, + coalesce(to_char(psut.last_vacuum, 'YYYY-MM-DD HH24:MI'), '') AS last_vacuum, + coalesce(to_char(psut.last_autovacuum, 'YYYY-MM-DD HH24:MI'), '') AS last_autovacuum, + to_char(pg_class.reltuples, '9G999G999G999') AS rowcount, + to_char(psut.n_dead_tup, '9G999G999G999') AS dead_rowcount, + to_char(autovacuum_vacuum_threshold + + (autovacuum_vacuum_scale_factor::numeric * pg_class.reltuples), '9G999G999G999') AS autovacuum_threshold, + CASE + WHEN autovacuum_vacuum_threshold + (autovacuum_vacuum_scale_factor::numeric * pg_class.reltuples) < psut.n_dead_tup + THEN 'yes' + ELSE 'no' + END AS expect_autovacuum +FROM + pg_stat_user_tables psut INNER JOIN pg_class ON psut.relid = pg_class.oid +INNER JOIN vacuum_settings ON pg_class.oid = vacuum_settings.oid +WHERE NOT vacuum_settings.nspname LIKE ANY($1) +ORDER BY + case + when pg_class.reltuples = -1 then 1 + else 0 + end, + 1 diff --git a/internal/inspect/vacuum_stats/vacuum_stats_test.go b/internal/inspect/vacuum_stats/vacuum_stats_test.go new file mode 100644 index 000000000..3e1b93f33 --- /dev/null +++ b/internal/inspect/vacuum_stats/vacuum_stats_test.go @@ -0,0 +1,46 @@ +package vacuum_stats + +import ( + "context" + "testing" + + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/supabase/cli/internal/db/reset" + "github.com/supabase/cli/internal/testing/pgtest" + "github.com/supabase/cli/internal/utils" +) + +var dbConfig = pgconn.Config{ + Host: "127.0.0.1", + Port: 5432, + User: "admin", + Password: "password", + Database: "postgres", +} + +func TestVacuumCommand(t *testing.T) { + t.Run("inspects vacuum stats", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(VacuumStatsQuery, reset.LikeEscapeSchema(utils.InternalSchemas)). + Reply("SELECT 1", Result{ + Schema: "test_schema", + Table: "test_table", + Last_vacuum: "2021-01-01 00:00:00", + Last_autovacuum: "2021-01-01 00:00:00", + Rowcount: "1000", + Dead_rowcount: "100", + Autovacuum_threshold: "100", + Expect_autovacuum: "yes", + }) + // Run test + err := Run(context.Background(), dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + }) +}